我们从Python开源项目中,提取了以下46个代码示例,用于说明如何使用fabric.api.env.user()。
def bootstrap(branch='master'): env.sudo_password = getpass('Initial value for env.sudo_password: ') env.domain_name = prompt('Enter your domain name:', default='meetup_facebook_bot') create_permanent_folder() create_log_folder() install_postgres() database_url = setup_postgres(username=env.user, database_name=env.user) renew_ini_file(database_url) install_python() fetch_sources_from_repo(branch, PROJECT_FOLDER) reinstall_venv() install_modules() install_nginx() configure_letsencrypt_if_necessary() add_nginx_reload_crontab_job() configure_nginx_if_necessary() setup_ufw() start_systemctl_service(UWSGI_SERVICE_NAME) start_systemctl_service('nginx') run_setup_scripts() status()
def configure_env(): ''' Configures the fabric env. ''' config = get_config() stage = get_stage() stage_config = get_stage_config(stage) env.user = stage_config.get('user') or config['user'] env.port = stage_config.get('port') or config['port'] env.cwd = stage_config.get('app_dir') or config['app_dir'] env.key_filename = stage_config.get( 'key_filename') or config['key_filename'] env.hosts = [stage_config['host']] ssh_forward_agent = stage_config.get( 'ssh_forward_agent') or config['ssh_forward_agent'] env.forward_agent = ( ssh_forward_agent and str(ssh_forward_agent).lower() == 'true' ) # If Verbose logging is turned on show verbose logs. verbose_logging = stage_config.get('verbose_logging') or config[ 'verbose_logging'] if str(verbose_logging).lower() == 'true': set_verbose_logging()
def vagrant(): env.srvr = 'vagrant' env.path = os.path.join('/', env.srvr) # this is necessary because ssh will fail when known hosts keys vary # every time vagrant is destroyed, a new key will be generated env.disable_known_hosts = True env.within_virtualenv = 'source {}'.format( os.path.join('~', 'venv', 'bin', 'activate')) result = dict(line.split() for line in local('vagrant ssh-config', capture=True).splitlines()) env.hosts = ['%s:%s' % (result['HostName'], result['Port'])] env.key_filename = result['IdentityFile'] env.user = result['User'] print(env.key_filename, env.hosts, env.user)
def parse_args(): args = argparse.ArgumentParser("Setup ceph client to remote host") args.add_argument('node', help="Name or IP address of node to install to") args.add_argument('-C', '--ceph', help="Name of IP address of ceph admin node") args.add_argument('-u', '--user', default='root', help="Username") args.add_argument('-p', '--password', help="Password") args.add_argument('-d', '--deploy-dir', default='/var/opt/deploy', help="Directory to put deploy script to") args.add_argument('-D', '--deploy-script', default='ceph_install.sh', help="Deploy script") args.add_argument('-w', '--app-dir', default='/var/opt/kuberdock', help="Directory of web-application") args.add_argument('-c', '--conf-dir', default='/etc/ceph', help="Directory of ceph-configs") args.add_argument('-T', '--temp-dir', default=TMPDIR, help="Temp directory") return args.parse_args()
def setup_supervisor(): # We use supervisord to keep Crestify running in the background # Recover from crashes, and to start automatically on bootup # Also, using more than 1 gunicorn worker resulted in socket not being released, so only 1 worker will be used sudo('apt-get -y install supervisor') sudo('mkdir /var/log/crestify/') sudo( 'cd /home/crestify/crestify && ../crestifyenv/bin/honcho export -s /bin/sh -a crestify supervisord /etc/supervisor/conf.d') fd = StringIO() get('/etc/supervisor/conf.d/crestify.conf', fd) content = fd.getvalue().splitlines() for n, i in enumerate(content): if i.startswith("environment="): content[n] = i + ",PATH=/home/crestify/crestifyenv/bin:%(ENV_PATH)s" if i.startswith("user="): content[n] = "user=crestify" if i.startswith("stopsignal="): content[n] = "stopsignal=TERM" # Both Gunicorn and Celery use SIGTERM for graceful shutdown content = StringIO("\n".join(content)) put(content, "/etc/supervisor/conf.d/crestify.conf", use_sudo=True) sudo('supervisorctl reread') sudo('supervisorctl update')
def _make_circus(): circus_conf = ''' [watcher:daphne] cmd = daphne -b 0.0.0.0 -p 8001 coding_night_live.asgi:channel_layer working_dir = %s/ copy_env = True user = %s [watcher:worker] cmd = python3 manage.py runworker working_dir = %s/ copy_env = True user = www-data [watcher:redis] cmd = redis-server copy_env = True user = %s ''' % (project_folder, REMOTE_USER, project_folder, REMOTE_USER) f = open(project_folder + '/circus.ini', 'w') f.write(circus_conf.replace(' ', '')) f.close()
def install(host_config): env.host_string = helper.get_env_host_string(host_config) env.user = helper.get_env_user(host_config) env.key_filename = helper.get_env_key_filename(host_config) software_config = helper.get_software_config(host_config, 'redis') redis_version = software_config.get('version', '3.2.6') redis_port = software_config.get('port', '6379') redis_data_dir = software_config.get('data-directory', '/var/lib/redis') machine.disable_transparent_huge_pages(env.host_string) machine.set_overcommit_memory(env.host_string, 1) put('{}/software/scripts/redis.sh'.format(os.getcwd()), '~/', use_sudo=True) sudo("chmod +x redis.sh") sudo(". ~/redis.sh {} {} {}".format(redis_version, redis_port, redis_data_dir))
def install(host_config): env.host_string = helper.get_env_host_string(host_config) env.user = helper.get_env_user(host_config) env.key_filename = helper.get_env_key_filename(host_config) software_config = helper.get_software_config(host_config, 'zookeeper') java.v8_install(host_config) port = software_config.get('port', '2181') zk_server_id = software_config.get('id', '0') zk_nodes = ",".join(software_config.get('nodes')) put('{}/software/scripts/zookeeper.sh'.format(os.getcwd()), '~/', use_sudo=True) sudo("chmod +x zookeeper.sh") sudo(". ~/zookeeper.sh {} {} {}".format(port, zk_server_id, zk_nodes))
def mount_ebs_volumes(host_config): env.host_string = helper.get_env_host_string(host_config) env.user = helper.get_env_user(host_config) env.key_filename = helper.get_env_key_filename(host_config) sudo("apt-get -y install xfsprogs") for ebs in host_config['ec2-mounts']: device = ebs['device'] mount = ebs['mount'] sudo("mkdir -p {}".format(mount)) sudo("mv /etc/fstab /etc/fstab.old") sudo("touch /etc/fstab") if sudo('mkfs.xfs -f {0}'.format(device), warn_only=True): run("echo '{0}\t{1}\txfs\tdefaults\t0\t0' | sudo tee -a /etc/fstab".format(device, mount)) sudo('sudo mount -a') logger.info("EBS volume {} : {} mounted.".format(device, mount))
def create_deploy_user(): """ Crea el deploy user. """ sudo('useradd -m -s /bin/bash -g {remote_group} {remote_user}'.format( remote_user=env.deploy_user, remote_group=env.deploy_group)) sudo('passwd {remote_user}'.format(remote_user=env.deploy_user)) sudo('usermod -a -G {remote_group} {remote_user}'.format( remote_group=env.deploy_group, remote_user=env.deploy_user)) sudo('mkdir /home/{}/.ssh'.format(env.deploy_user)) sudo('chown -R {remote_user} /home/{remote_user}/.ssh'.format( remote_user=env.deploy_user)) sudo('chgrp -R {remote_group} /home/{remote_user}/.ssh'.format( remote_group=env.deploy_group, remote_user=env.deploy_user))
def connect_to_instance_in_ssh(address, keypair_path, user='root'): """ Run the command LS on a given instance :param address: ip or dns name of a machine :type address: str :param keypair_path: keypair path :type keypair_path: str """ env.host_string = address env.user = user env.parallel = False env.key_filename = keypair_path env.disable_known_hosts = True env.connection_attempts = 10 env.timeout = 120 ocb.log(run('ls -la /root'), level='INFO')
def install_localdb(): # leveldb & plyvel install with settings(warn_only=True): user_group = env.user sudo(" echo 'leveldb & plyvel install' ") sudo("mkdir -p /localdb/{bigchain,votes,header}") sudo("chown -R " + user_group + ':' + user_group + ' /localdb') sudo('pip3 install leveldb==0.194') sudo('apt-get install libleveldb1 libleveldb-dev libsnappy1 libsnappy-dev') sudo('apt-get -y -f install') sudo('pip3 install plyvel==0.9') # ramq & pika install sudo(" echo 'ramq & pika install' ") sudo('apt-get -y install rabbitmq-server') sudo('pip3 install pika==0.10.0') #sudo('rabbitmq-server restart') # Install RethinkDB
def generate_ssh(self, server, args, configure): """ ??????SSH?? generate ssh :param server: :param args: :param configure: :return: """ self.reset_server_env(server, configure) # chmod project root owner sudo('chown {user}:{user} -R {path}'.format( user=configure[server]['user'], path=bigdata_conf.project_root )) # generate ssh key if not exists('~/.ssh/id_rsa.pub'): run('ssh-keygen -t rsa -P "" -f ~/.ssh/id_rsa')
def virtualbox_host(): '''Install a VirtualBox host system. More Infos: * overview: https://wiki.ubuntuusers.de/VirtualBox/ * installation: https://wiki.ubuntuusers.de/VirtualBox/Installation/ ''' if query_yes_no(question='Uninstall virtualbox-dkms?', default='yes'): run('sudo apt-get remove virtualbox-dkms') install_packages([ 'virtualbox', 'virtualbox-qt', 'virtualbox-dkms', 'virtualbox-guest-dkms', 'virtualbox-guest-additions-iso', ]) users = [env.user] for username in users: run(flo('sudo adduser {username} vboxusers')) #run('newgrp - vboxusers')
def samba(): '''Install smb server samba and create a share (common read-write-access). More infos: * https://wiki.ubuntuusers.de/Samba%20Server/ ''' username = env.user install_packages(['samba']) run(flo('sudo smbpasswd -a {username}')) path = '$HOME/shared' sharename = 'shared' comment = '"smb share; everyone has full access (read/write)"' acl = flo('Everyone:F,{username}:F guest_ok=y') with warn_only(): run(flo('mkdir {path}')) run(flo('sudo net usershare add {sharename} {path} {comment} {acl}')) run(flo('sudo net usershare info {sharename}'))
def run_download_db(filename=None): """ Downloads the database from the server into your local machine. In order to import the downloaded database, run ``fab import_db`` Usage:: fab prod run_download_db fab prod run_download_db:filename=foobar.dump """ if not filename: filename = settings.DB_DUMP_FILENAME if env.key_filename: ssh = settings.PROJECT_NAME else: ssh = '{0}@{1}'.format(env.user, env.host_string) local('scp {0}:{1}{2} .'.format( ssh, settings.FAB_SETTING('SERVER_DB_BACKUP_DIR'), filename))
def run_download_media(filename=None): """ Downloads the media dump from the server into your local machine. In order to import the downloaded media dump, run ``fab import_media`` Usage:: fab prod run_download_media fab prod run_download_media:filename=foobar.tar.gz """ if not filename: filename = settings.MEDIA_DUMP_FILENAME if env.key_filename: ssh = settings.PROJECT_NAME else: ssh = '{0}@{1}'.format(env.user, env.host_string) local('scp {0}:{1}{2} .'.format( ssh, settings.FAB_SETTING('SERVER_MEDIA_BACKUP_DIR'), filename))
def run_upload_db(filename=None): """ Uploads your local database to the server. You can create a local dump with ``fab export_db`` first. In order to import the database on the server you still need to SSH into the server. Usage:: fab prod run_upload_db fab prod run_upload_db:filename=foobar.dump """ if not filename: filename = settings.DB_DUMP_FILENAME if env.key_filename: ssh = settings.PROJECT_NAME else: ssh = '{0}@{1}'.format(env.user, env.host_string) local('scp {0} {1}:{3}'.format( filename, ssh, settings.FAB_SETTING('SERVER_DB_BACKUP_DIR')))
def create_app_dir(): """Create the application directory and setup a virtualenv.""" # create app dir if exists(remote_app_dir) is False: sudo('mkdir -p ' + remote_app_dir) # create virtual env with cd(remote_app_dir): if exists(remote_app_dir + '/env') is False: sudo('virtualenv env') # Change permissions sudo('chown {}:{} {} -R'.format(env.user, env.user, remote_app_dir)) # Create log dir if exists(remote_log_dir) is False: sudo('mkdir {}'.format(remote_log_dir))
def main(argv): global APPS global DURATION global VMLIST env.user = 'ubuntu' env.connection_attempts = 10 DURATION = int(sys.argv[1]) with open('./tests') as f: APPS = f.read().splitlines() with open('./vmlist') as f: for line in f: vals = line.split() VMMAP[vals[0]] = (vals[1], vals[2]) sample_count = DURATION / INTERVAL #image_dir='data' #files = os.listdir(image_dir) image_file='' executor(sample_count,image_file)
def main(argv): global APPS global DURATION global VMLIST env.user = 'ubuntu' env.connection_attempts = 100 DURATION = int(sys.argv[1]) with open('./tests') as f: APPS = f.read().splitlines() with open('./vmlist') as f: for line in f: vals = line.split() VMMAP[vals[0]] = (vals[1], vals[2]) sample_count = DURATION / INTERVAL executor(sample_count)
def create_service_file(): service_file_config = { 'user': env.user, 'work_dir': PROJECT_FOLDER, 'env_bin_dir': VENV_BIN_DIRECTORY, 'uwsgi_path': os.path.join(VENV_BIN_DIRECTORY, 'uwsgi'), 'app_ini_path': INI_FILE_PATH } upload_template( filename='deploy_configs/meetup-facebook-bot.service', destination=os.path.join('/etc/systemd/system/', UWSGI_SERVICE_NAME), context=service_file_config, use_sudo=True )
def reset_db(): env.sudo_password = getpass('Initial value for env.sudo_password: ') sudo('systemctl stop %s' % UWSGI_SERVICE_NAME) empty_database(database_name=env.user) fill_database_with_example_data() sudo('systemctl start %s' % UWSGI_SERVICE_NAME)
def localhost(): """ local server """ env.srvr = 'local' env.path = os.path.dirname(os.path.realpath(__file__)) env.within_virtualenv = 'workon dprr' env.hosts = [gethostname()] env.user = getuser()
def put_systemd_services(): environment = jinja2.Environment( loader=jinja2.FileSystemLoader(os.path.abspath('systemd')), ) services = ( 'combine-uploader.service.template', 'combine-irc.service.template', 'watch-ip.service.template', ) template_variables = { 'VENV': '/venvs/combine', 'COMBINE_CONFIG_FILE': '/home/%s/combine/config.yml' % env.user, 'IP_FILE': '/var/run/watch-ip/ip', } for name in services: result = environment.get_template(name).render(template_variables) put( StringIO(result), '/etc/systemd/system/%s' % name[:-len('.template')], use_sudo=True, ) put( 'systemd/watch-ip.timer', '/etc/systemd/system/watch-ip.timer', use_sudo=True, ) sudo('systemctl daemon-reload')
def mkdir(path): sudo('mkdir -p {path!r} && chown {user} {path!r}'.format( path=path, user=env.user, ))
def update(): rsync_project( '/home/%s/combine' % env.user, '.', exclude=( '.git', 'data', 'config.yml', ), ) with cd('combine'), venv('combine'): run('mv config.yml{.prd,}') run('pip install -r etc/requirements.txt') run('pip install -e .') put_systemd_services() mkdir('/var/run/gunicorn') systemctl_start('combine-uploader') systemctl_start('combine-irc') mkdir('/var/run/watch-ip') systemctl_start('watch-ip.timer') systemctl_start('watch-ip.service') run('systemctl is-active combine-uploader') run('systemctl is-active combine-irc') restart_nginx()
def deploy(): site_folder = f'/home/{env.user}/sites/{env.host}/source' source_folder = site_folder _get_latest_source(source_folder) _create_directory_structure_if_necessary(site_folder) source_folder = source_folder + '/eloworld' _update_settings(source_folder, env.host) _update_virtualenv(source_folder) _update_static_files(source_folder) _update_database(source_folder)
def create_deployer_group(): """ Create a user group for all project developers """ run('groupadd {}'.format(env.user_group)) run('mv /etc/sudoers /etc/sudoers-backup') run('(cat /etc/sudoers-backup; echo "%' + env.user_group + ' ALL=(ALL) ALL") > /etc/sudoers') run('chmod 440 /etc/sudoers')
def create_deployer_user(): """ Create a user for the user group """ # TODO: use useradd instead of adduser so password and other details can # be added with just one command. run('adduser {}'.format(env.user_name)) run('usermod -a -G {} {}'.format(env.user_group, env.user_name)) run('mkdir /home/{}/.ssh'.format(env.user_name)) run('chown -R {} /home/{}/.ssh'.format(env.user_name, env.user_name)) run('chgrp -R {} /home/{}/.ssh'.format( env.user_group, env.user_name))
def upgrade_server(): """ Upgrade the server as a root user """ run('apt-get update && apt-get -y upgrade') # because ubuntu 16.04 no longer has python2.7 run('sudo apt-get -y install python-simplejson') run('sudo reboot')
def remote_server(): env.hosts = ['127.0.0.1'] env.user = prompt('Enter user name: ') env.password = getpass('Enter password: ')
def remote_server(): env.hosts = ['127.0.0.1'] env.user = prompt('Enter your system username: ') env.password = getpass('Enter your system user password: ') env.mysqlhost = 'localhost' env.mysqluser = prompt('Enter your db username: ') env.mysqlpassword = getpass('Enter your db user password: ') env.db_name = ''
def create_db(): """Create a MySQL DB for App version""" if not env.db_name: db_name = prompt("Enter the DB name:") else: db_name = env.db_name run('echo "CREATE DATABASE %s default character set utf8 collate utf8_unicode_ci;"|mysql --batch --user=%s --password=%s --host=%s'\ % (db_name, env.mysqluser, env.mysqlpassword, env.mysqlhost), pty=True)
def setup_fabric(): env.user = 'root' env.abort_exception = UpgradeError env.key_filename = settings.SSH_KEY_FILENAME env.warn_only = True output.stdout = False output.aborts = False
def pre_start_hook(app): from ..nodes.models import Node # env.warn_only = True env.user = 'root' env.key_filename = SSH_KEY_FILENAME output.stdout = False output.running = False PLUGIN_DIR = '/usr/libexec/kubernetes/kubelet-plugins/net/exec/kuberdock/' with app.app_context(): for node in Node.query.all(): env.host_string = node.hostname put('./node_network_plugin.sh', PLUGIN_DIR + 'kuberdock') put('./node_network_plugin.py', PLUGIN_DIR + 'kuberdock.py') run('systemctl restart kuberdock-watcher') print 'Kuberdock node parts are updated'
def __init__(self): env.user = 'root' env.skip_bad_hosts = True env.key_filename = SSH_KEY_FILENAME self._cached_drives = None self._cached_node_ip = None
def get_by_user(self, user, device_id=None): """ Returns list of persistent drives of a certain user :param user: object -> user object got from SQLAlchemy :return: list -> list of dicts """ if device_id is None: return self.get_drives(user_id=user.id) drives = [item for item in self.get_drives(user_id=user.id) if item['id'] == device_id] if drives: return drives[0]
def get_user_unmapped_drives(self, user): """ Returns unmapped drives of a user :return: list -> list of dicts of unmapped drives of a user """ return [d for d in self.get_drives(user.id) if not d['in_use']]
def delete_by_id(self, drive_id): """ Deletes a user drive :param drive_id: string -> drive id Raises DriveIsLockedError if drive is locked by another operation at the moment. """ with drive_lock(drive_id=drive_id): pd = PersistentDisk.query.filter( PersistentDisk.id == drive_id ).first() if not pd: current_app.logger.warning( 'Unable to delete drive. ' 'Unknown drive id: %s', drive_id ) return 1 # self.end_stat(pd.name, pd.owner_id) rv = self._delete_pd(pd) if rv == 0 and self._cached_drives: self._cached_drives = [ d for d in self._cached_drives if d['id'] != drive_id ] return rv
def start_stat(size, name=None, user_id=None): """ Start counting usage statistics. You need to provide `name` and `user` or `sys_drive_name` :param size: int -> size in GB :param name: string -> user's drive name :param user_id: id of the drive owner """ PersistentDiskState.start(user_id, name, size)
def end_stat(name=None, user_id=None): """ Finish counting usage statistics. You need to provide `name` and `user` or `sys_drive_name` :param name: string -> user's drive name :param user_id: int -> user object """ PersistentDiskState.end(user_id, name)
def __init__(self, user="sympy", repo="sympy", api_url="https://api.github.com", authorize_url="https://api.github.com/authorizations", uploads_url='https://uploads.github.com', main_url='https://github.com'): """Generates all URLs and templates""" self.user = user self.repo = repo self.api_url = api_url self.authorize_url = authorize_url self.uploads_url = uploads_url self.main_url = main_url self.pull_list_url = api_url + "/repos" + "/" + user + "/" + repo + "/pulls" self.issue_list_url = api_url + "/repos/" + user + "/" + repo + "/issues" self.releases_url = api_url + "/repos/" + user + "/" + repo + "/releases" self.single_issue_template = self.issue_list_url + "/%d" self.single_pull_template = self.pull_list_url + "/%d" self.user_info_template = api_url + "/users/%s" self.user_repos_template = api_url + "/users/%s/repos" self.issue_comment_template = (api_url + "/repos" + "/" + user + "/" + repo + "/issues/%d" + "/comments") self.release_uploads_url = (uploads_url + "/repos/" + user + "/" + repo + "/releases/%d" + "/assets") self.release_download_url = (main_url + "/" + user + "/" + repo + "/releases/download/%s/%s")
def vagrant(): """ Run commands using vagrant """ vc = get_vagrant_config() # change from the default user to 'vagrant' env.user = vc['User'] # connect to the port-forwarded ssh env.hosts = ['%s:%s' % (vc['HostName'], vc['Port'])] # use vagrant ssh key env.key_filename = vc['IdentityFile'].strip('"') # Forward the agent if specified: env.forward_agent = vc.get('ForwardAgent', 'no') == 'yes'
def dev(): """ chooses development environment """ env.environment = "dev" env.hosts = [PRODUCTION_HOST] env.user = PRODUCTION_USER print("LOCAL DEVELOPMENT ENVIRONMENT\n")
def staging(): """ chooses testing environment """ env.environment = "staging" env.hosts = ["staging.myproject.com"] env.user = "myproject" print("STAGING WEBSITE\n")