我们从Python开源项目中,提取了以下48个代码示例,用于说明如何使用fabric.api.settings()。
def deploy(target='dev', sha1=None): if sha1 is None: # get current working git sha1 sha1 = local('git rev-parse HEAD', capture=True) # server code reset to current working sha1 home_dir = '/home/pyconkr/{target}.pycon.kr/pyconkr-2016'.format(target=target) if target == 'dev': python_env = '/home/pyconkr/.pyenv/versions/pyconkr-2016-dev' else: python_env = '/home/pyconkr/.pyenv/versions/pyconkr-2016' with settings(cd(home_dir), shell_env(DJANGO_SETTINGS_MODULE='pyconkr.settings_prod')): sudo('git fetch --all -p', user='pyconkr') sudo('git reset --hard ' + sha1, user='pyconkr') sudo('bower install', user='pyconkr') sudo('%s/bin/pip install -r requirements.txt' % python_env, user='pyconkr') sudo('%s/bin/python manage.py compilemessages' % python_env, user='pyconkr') sudo('%s/bin/python manage.py migrate' % python_env, user='pyconkr') sudo('%s/bin/python manage.py collectstatic --noinput' % python_env, user='pyconkr') # worker reload run('echo r > /var/run/pyconkr-2016-%s.fifo' % target)
def flatpages_mig(direction='www'): dev_env = '/home/pyconkr/.pyenv/versions/pyconkr-2016-dev/bin/python' www_env = '/home/pyconkr/.pyenv/versions/pyconkr-2016/bin/python' from_env, to_env = (dev_env, www_env) if direction=='www' else (www_env, dev_env) dev_dir = '/home/pyconkr/dev.pycon.kr/pyconkr-2016' www_dir = '/home/pyconkr/www.pycon.kr/pyconkr-2016' from_dir, to_dir = (dev_dir, www_dir) if direction=='www' else (www_dir, dev_dir) with settings(cd(from_dir), shell_env(DJANGO_SETTINGS_MODULE='pyconkr.settings_prod') ): sudo('{python} manage.py dumpdata --indent 2 flatpages -o {fixture_to}'.format( fixture_to=os.path.join(to_dir, 'pyconkr', 'fixtures', 'flatpages.json'), python=from_env)) with settings(cd(to_dir), shell_env(DJANGO_SETTINGS_MODULE='pyconkr.settings_prod') ): sudo('{python} manage.py loaddata flatpages'.format( python=to_env))
def __init__(self): self._conn = None try: from ..settings import ( AVAILABILITY_ZONE, REGION, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) self._region = REGION self._availability_zone = AVAILABILITY_ZONE self._aws_access_key_id = AWS_ACCESS_KEY_ID self._aws_secret_access_key = AWS_SECRET_ACCESS_KEY except ImportError: self._region = None self._availability_zone = None self._aws_access_key_id = None self._aws_secret_access_key = None super(AmazonStorage, self).__init__()
def setup(): """ Setup servers for deployment. This does not setup services or push to S3. Run deploy() next. """ require('settings', provided_by=['production', 'staging']) require('branch', provided_by=['stable', 'master', 'branch']) if not app_config.DEPLOY_TO_SERVERS: logger.error('You must set DEPLOY_TO_SERVERS = True in your app_config.py before setting up the servers.') return create_directories() create_virtualenv() clone_repo() checkout_latest() install_requirements() setup_logs() generate_secret_key()
def render_confs(): """ Renders server configurations. """ require('settings', provided_by=['production', 'staging']) with settings(warn_only=True): local('mkdir confs/rendered') # Copy the app_config so that when we load the secrets they don't # get exposed to other management commands context = copy.copy(app_config.__dict__) context.update(app_config.get_secrets()) for service, remote_path, extension in app_config.SERVER_SERVICES: template_path = _get_template_conf_path(service, extension) rendered_path = _get_rendered_conf_path(service, extension) with open(template_path, 'r') as read_template: with open(rendered_path, 'w') as write_template: payload = Template(read_template.read()) write_template.write(payload.render(**context))
def nuke_confs(): """ DESTROYS rendered server configurations from the specified server. This will reload nginx and stop the uwsgi config. """ require('settings', provided_by=['production', 'staging']) for service, remote_path, extension in app_config.SERVER_SERVICES: with settings(warn_only=True): installed_path = _get_installed_conf_path(service, remote_path, extension) sudo('rm -f %s' % installed_path) if service == 'nginx': sudo('service nginx reload') elif service == 'uwsgi': service_name = _get_installed_service_name(service) sudo('service %s stop' % service_name) sudo('initctl reload-configuration') elif service == 'app': sudo('rm %s' % app_config.UWSGI_SOCKET_PATH)
def vulture(): """ try to find dead code paths """ with api.quiet(): if not api.local('which vulture').succeeded: print 'vulture not found, installing it' api.local('pip install vulture') ignore_functions_grep = 'egrep -v "{0}"'.format( '|'.join(VULTURE_IGNORE_FUNCTIONS)) excluded = ",".join(VULTURE_EXCLUDE_PATHS) excluded_paths = (' --exclude ' + excluded) if excluded else '' vulture_cmd = '\n vulture {pkg_name}{exclude}{pipes}' vulture_cmd = vulture_cmd.format( pkg_name=PKG_NAME, exclude=excluded_paths, pipes='|'.join(['', ignore_functions_grep])) changedir = api.lcd(os.path.dirname(__file__)) warn_only = api.settings(warn_only=True) be_quit = api.hide('warnings') with contextlib.nested(changedir, warn_only, be_quit): result = api.local(vulture_cmd, capture=True) exit_code = result.return_code print result.strip() raise SystemExit(exit_code)
def tcp_port(self): """ The tcp port used for the game server. Will try to get only once and save to self._tcp_port for later use. """ def get_tcp_port(): cmd = '''grep 'name="port" type="int"' conf.xml |awk -F[\<\>] '{print $3}' ''' with settings(host_string=self.int_ip), cd('/app/{}/backend/apps'.format(self.name)): result = run(cmd) lines = result.splitlines() if len(lines) == 1: return int(lines[0]) else: raise Exception("Can't get tcp port using cmd: {}".format(cmd)) if not self._tcp_port: self._tcp_port = get_tcp_port() return self._tcp_port
def dns(self): """ The dns for the game server. Will try to get only once and save to self._dns for later use. """ def get_dns(): cmd = '''grep server_name %s.conf | awk '{print $2}' | tr -d ";" ''' % self.name with settings(host_string=self.int_ip), cd('/app/nginx/conf/vhost'.format(self.name)): result = run(cmd) lines = result.splitlines() if len(lines) == 1: return lines[0] else: raise Exception("Can't get dns using cmd: {}".format(cmd)) if not self._dns: self._dns = get_dns() return self._dns
def sync(game, region): """ ???????????????? """ conf = ConfigReader(game, region) ip = conf.get("mobile_www_ip") if conf.has_option("mobile_www_port"): """ ????????????ssh?????????Fabric?????host??? """ port = conf.getint("mobile_www_port") if port: ip = '{}:{}'.format(ip, port) root_dir_prod = conf.get("mobile_www_root") root_dir_test = conf.get("mobile_www_root_test") exclude_files = ['proxy.lua', 'lyServers'] exclude_param = ' '.join(['--exclude={}'.format(each) for each in exclude_files]) with settings(host_string=ip): run('''rsync -aqP --delete {exclude} {root_dir_prod}/ {root_dir_test}/'''.format(exclude=exclude_param, root_dir_prod=root_dir_prod, root_dir_test=root_dir_test))
def web_www_incremental_update(game, region, version): """ ???????? """ check_incremental_version(version) conf = ConfigReader(game, region) if conf.has_option("gateway"): gateway = conf.get('gateway') """ gateway????????? """ if gateway != "": #with settings(host_string='ast_hk'): # run('/app/opbin/rundeck/online.frontend_gchw -g {} -t {}'.format(game, version)) local('/app/opbin/rundeck/online.frontend_gchw -g {} -t {}'.format(game, version)) else: local('/app/opbin/rundeck/online.frontend -g {} -t {}'.format(game, version))
def build(name, ask=True, **kwargs): """ Build the malicious mote to its target hardware. :param name: experiment name (or absolute path to experiment) :param ask: ask confirmation :param path: expanded path of the experiment (dynamically filled in through 'command' decorator with 'expand') :param kwargs: simulation keyword arguments (see the documentation for more information) """ def is_device_present(): with settings(hide(*HIDDEN_ALL), warn_only=True): return local("if [ -c /dev/ttyUSB0 ]; then echo 'ok'; else echo 'nok'; fi", capture=True) == 'ok' console = kwargs.get('console') counter, interval = 0.0, 0.5 while not is_device_present(): sleep(interval) counter += interval if counter % 5 == 0: logger.warning("Waiting for mote to be detected...") elif counter >= 120: logger.error("Something failed with the mote ; check that it mounts to /dev/ttyUSB0") return remake(name, build=True, **kwargs) if console is None else console.do_remake(name, build=True, **kwargs) return "Mote built on /dev/ttyUSB0"
def __init__(self, name, cwd=None): """Create a tmux session. Args: name (str): name of the new session cwd (str): initial directory of the session Options used: -d: do not attach to the new session -s: specify a name for the session """ self.name = name with settings(hide('warnings'), warn_only=True): result = local("tmux new -d -s {}".format(name)) # start tmux session if result.failed: raise TmuxSessionExists() if cwd is None: cwd = os.getcwd() # move to current directory self.run("cd {}".format(cwd))
def hosts(self): keys = fab.env.key_filename = [] hosts = [] ssh_configs_data = fab.local('vagrant ssh-config', capture=True) ssh_configs = map( lambda config: dict(map( lambda row: row.lstrip().split(' ', 1), config.strip().splitlines() )), re.split('(?ms).(?=Host )', ssh_configs_data), ) for ssh_config in ssh_configs: keys.append(ssh_config['IdentityFile']) host_string = '{User}@{HostName}:{Port}'.format(**ssh_config) if self.guest_network_interface is not None: with fab.settings( host_string=host_string, # see https://github.com/fabric/fabric/issues/1522 # disable_known_hosts=True, ): ip = self._get_ip() host_string = '{User}@{ip}'.format(ip=ip, **ssh_config) fab.puts('Added host: ' + host_string) hosts.append(host_string) return hosts
def _command( fabric_method, command, ignore_errors=False, quiet=True, hide=('running', 'aborts'), show=(), abort_exception=RuntimeError, **kwargs ): if quiet: hide += ('output', 'warnings') log('{method}: {command}'.format( method=fabric_method.__name__, command=command, )) with fab.settings( fab.hide(*hide), fab.show(*show), abort_exception=abort_exception, warn_only=ignore_errors, ): return fabric_method(command, **kwargs)
def swarm_init(): """ enable Docker swarm mode """ def _swarm_init(): if swarm_init.worker_join_command is None: fabricio.run( 'docker swarm init --advertise-addr {0}'.format(fab.env.host), ignore_errors=True, ) join_token = fabricio.run( 'docker swarm join-token --quiet manager', ignore_errors=True, ) swarm_init.worker_join_command = ( 'docker swarm join --token {join_token} {host}:2377' ).format(join_token=join_token, host=fab.env.host) else: fabricio.run( swarm_init.worker_join_command, ignore_errors=True, ) with fab.settings(hosts=hosts): fab.execute(_swarm_init)
def del_node(tag): settings, headers = get_cluster_config() if tag in settings and "SUBID" in settings[tag]: payload = {'SUBID': settings[tag]["SUBID"]} req = requests.post(API_ENDPOINT + DESTROY_SERVER, data=payload, headers=headers) if req.status_code == 200: settings[tag] = {} save_on_config(tag, settings[tag]) click.echo("\n--> Server %s deleted!!" % tag) return True else: click.echo("\n--> Couldn't create server!!") return False else: click.echo("\n--> Load %s improperly configured!!" % tag) return False
def create_db(): with settings(warn_only=True), hide('output', 'running'): if env.get('settings'): execute('servers.stop_service', 'uwsgi') with shell_env(**app_config.database): local('dropdb --if-exists %s' % app_config.database['PGDATABASE']) if not env.get('settings'): local('psql -c "DROP USER IF EXISTS %s;"' % app_config.database['PGUSER']) local('psql -c "CREATE USER %s WITH SUPERUSER PASSWORD \'%s\';"' % (app_config.database['PGUSER'], app_config.database['PGPASSWORD'])) with shell_env(**app_config.database): local('createdb %s' % app_config.database['PGDATABASE']) if env.get('settings'): execute('servers.start_service', 'uwsgi')
def setup(): """ Setup servers for deployment. This does not setup services or push to S3. Run deploy() next. """ require('settings', provided_by=['production', 'staging']) require('branch', provided_by=['stable', 'master', 'branch']) if not app_config.DEPLOY_TO_SERVERS: logger.error('You must set DEPLOY_TO_SERVERS = True in your app_config.py before setting up the servers.') return create_directories() create_virtualenv() clone_repo() checkout_latest() install_requirements() setup_logs()
def deploy(target='dev', sha1=None): if sha1 is None: # get current working git sha1 sha1 = local('git rev-parse HEAD', capture=True) # server code reset to current working sha1 home_dir = '/home/pyconkr/{target}.pycon.kr/pyconkr-2017'.format(target=target) if target == 'dev': python_env = '/home/pyconkr/.pyenv/versions/pyconkr-2017-dev' else: python_env = '/home/pyconkr/.pyenv/versions/pyconkr-2017' with settings(cd(home_dir), shell_env(DJANGO_SETTINGS_MODULE='pyconkr.settings_prod')): run('git fetch --all -p') run('git reset --hard ' + sha1) run('bower install') run('%s/bin/pip install -r requirements.txt' % python_env) run('%s/bin/python manage.py compilemessages' % python_env) run('%s/bin/python manage.py migrate' % python_env) run('%s/bin/python manage.py collectstatic --noinput' % python_env) # worker reload run('echo r > /var/run/pyconkr-2017-%s.fifo' % target)
def install_package(): """Install the Hadroid Python package.""" with tempfile.NamedTemporaryFile() as src_files: local('git ls-files --exclude-standard > {}'.format(src_files.name)) rsync_project( remote_dir=env.code_path, local_dir='./', extra_opts=('--rsync-path="sudo -u {} rsync" --files-from={}' .format(env.app_user, src_files.name)), delete=True, default_opts='-thrvz') with sudosu(user=env.app_user), python.virtualenv(env.venv_path), \ cd(env.code_path): with settings(warn_only=True): sudo('pip uninstall -y hadroid') sudo('pip install -e .')
def update_project_file(pname): "detects if a box exists for project, updates project file" updates = [ ('%s.vagrant.box' % pname, box_name(pname)), ('%s.vagrant.box-url' % pname, box_metadata_url(pname)) ] if pname == 'basebox': # special handling when updating the basebox # leave the actual basebox project's box and box_url settings as-is updates = [ ('defaults.vagrant.box', box_name(pname)), ('defaults.vagrant.box-url', box_metadata_url(pname)) ] project_file = 'asdf' project_data = core_utils.ordered_load(open(project_file, 'r')) for path, new_val in updates: project_data = project.update_project_file(path, new_val, project_data) project.write_project_file(project_data) print 'wrote', project_file return project_data
def deploy(): with cd("/data/stregsystem"): sudo("systemctl stop apache2.service") with settings(sudo_user='stregsystem'): sudo("git pull --ff-only") with prefix("source /data/stregsystem/env/bin/activate"): sudo("pip install -rrequirements.txt") sudo("python manage.py collectstatic --noinput") sudo("python manage.py migrate") sudo("systemctl start apache2.service")
def create_permanent_folder(): with settings(warn_only=True): sudo('mkdir %s' % PERMANENT_PROJECT_FOLDER)
def stop_mysql(self): with settings(hide('running', 'stdout')): result = local('service mysql stop') return result.return_code == 0, "stop_mysql"
def start_mysql(self): with settings(hide('running', 'stdout')): return local('service mysql start --skip-slave-start').return_code == 0, "start_mysql"
def failover(self, *args, **kwargs): cred_file = self.config.get('failover_creds', '/etc/mysql/failover.cnf') master = kwargs.get('master_host') if not master: return False, "No master_host given" with settings(hide('running')): return local("/usr/bin/mysqlmaster.py switch --new-master {} --defaults-extra-file={} " "--dead-master --assume-yes".format(master, cred_file)).return_code == 0, ""
def restart_nginx(): with settings(warn_only=True): sudo('pkill nginx') sudo('nginx -c combine/etc/nginx.conf -p `pwd`')
def ensure_venv(name): sudo('mkdir -p /venvs') sudo('chmod 777 /venvs') with settings(warn_only=True): venv_exists = run('test -d /venvs/%s' % name).succeeded if not venv_exists: run('python3.6 -m venv /venvs/%s' % name)
def run_remote_command(host_string, command, timeout=NODE_COMMAND_TIMEOUT, jsonresult=False, catch_exitcodes=None): """Executes command on remote host via fabric run. Optionally timeout may be specified. If result of execution is expected in json format, then the output will be treated as json. """ with settings(hide(NODE_STATUSES.running, 'warnings', 'stdout', 'stderr'), host_string=host_string, warn_only=True): return execute_run(command, timeout=timeout, jsonresult=jsonresult, catch_exitcodes=catch_exitcodes)
def get_monitors(self): """ Parse ceph config and return ceph monitors list :return: list -> list of ip addresses """ if self._monitors is not None: return self._monitors try: from ..settings import MONITORS self._monitors = [i.strip() for i in MONITORS.split(',')] except ImportError: # We have no monitor predefined configuration conf = self._get_client_config() if not conf: return fo = StringIO(conf) cp = ConfigParser() try: cp.readfp(fo) except Exception: raise APIError("Cannot get CEPH monitors." " Make sure your CEPH cluster is available" " and KuberDock is configured to use CEPH") if not cp.has_option('global', 'mon_host'): self._monitors = ['127.0.0.1'] else: self._monitors = [ i.strip() for i in cp.get('global', 'mon_host').split(',') ] return self._monitors
def get_storage_class(): """Returns storage class according to current settings """ if CEPH: return CephStorage return LocalStorage
def create_directories(): """ Create server directories. """ require('settings', provided_by=['production', 'staging']) run('mkdir -p %(SERVER_PROJECT_PATH)s' % app_config.__dict__) sudo('mkdir -p /etc/uwsgi') sudo('mkdir -p /etc/uwsgi/sites') # run('mkdir -p /var/www/uploads/%(PROJECT_FILENAME)s' % app_config.__dict__)
def create_virtualenv(): """ Setup a server virtualenv. """ require('settings', provided_by=['production', 'staging']) run('virtualenv -p %(SERVER_PYTHON)s %(SERVER_VIRTUALENV_PATH)s' % app_config.__dict__) run('source %(SERVER_VIRTUALENV_PATH)s/bin/activate' % app_config.__dict__)
def clone_repo(): """ Clone the source repository. """ require('settings', provided_by=['production', 'staging']) run('git clone %(REPOSITORY_URL)s %(SERVER_REPOSITORY_PATH)s' % app_config.__dict__) if app_config.REPOSITORY_ALT_URL: run('git remote add bitbucket %(REPOSITORY_ALT_URL)s' % app_config.__dict__)
def install_requirements(): """ Install the latest requirements. """ require('settings', provided_by=['production', 'staging']) run('%(SERVER_VIRTUALENV_PATH)s/bin/pip install -U -r %(SERVER_REPOSITORY_PATH)s/requirements.txt' % app_config.__dict__)
def setup_logs(): """ Create log directories. """ require('settings', provided_by=['production', 'staging']) sudo('mkdir %(SERVER_LOG_PATH)s' % app_config.__dict__) sudo('chown ubuntu:ubuntu %(SERVER_LOG_PATH)s' % app_config.__dict__)
def install_crontab(): """ Install cron jobs script into cron.d. """ require('settings', provided_by=['production', 'staging']) sudo('cp %(SERVER_REPOSITORY_PATH)s/cronjobs/* /etc/cron.d' % app_config.__dict__)
def uninstall_crontab(): """ Remove a previously install cron jobs script from cron.d """ require('settings', provided_by=['production', 'staging']) sudo('rm /etc/cron.d/%(PROJECT_FILENAME)s' % app_config.__dict__)
def start_service(service): """ Start a service on the server. """ require('settings', provided_by=['production', 'staging']) service_name = _get_installed_service_name(service) sudo('service %s start' % service_name)
def stop_service(service): """ Stop a service on the server """ require('settings', provided_by=['production', 'staging']) service_name = _get_installed_service_name(service) sudo('service %s stop' % service_name)
def restart_service(service): """ Start a service on the server. """ require('settings', provided_by=['production', 'staging']) service_name = _get_installed_service_name(service) sudo('service %s restart' % service_name)
def fabcast(command): """ Actually run specified commands on the server specified by staging() or production(). """ require('settings', provided_by=['production', 'staging']) if not app_config.DEPLOY_TO_SERVERS: logging.error('You must set DEPLOY_TO_SERVERS = True in your app_config.py and setup a server before fabcasting.') run('cd %s && bash run_on_server.sh fab %s $DEPLOYMENT_TARGET %s' % (app_config.SERVER_REPOSITORY_PATH, env.branch, command))