我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用fabric.api.run()。
def deploy(target='dev', sha1=None): if sha1 is None: # get current working git sha1 sha1 = local('git rev-parse HEAD', capture=True) # server code reset to current working sha1 home_dir = '/home/pyconkr/{target}.pycon.kr/pyconkr-2016'.format(target=target) if target == 'dev': python_env = '/home/pyconkr/.pyenv/versions/pyconkr-2016-dev' else: python_env = '/home/pyconkr/.pyenv/versions/pyconkr-2016' with settings(cd(home_dir), shell_env(DJANGO_SETTINGS_MODULE='pyconkr.settings_prod')): sudo('git fetch --all -p', user='pyconkr') sudo('git reset --hard ' + sha1, user='pyconkr') sudo('bower install', user='pyconkr') sudo('%s/bin/pip install -r requirements.txt' % python_env, user='pyconkr') sudo('%s/bin/python manage.py compilemessages' % python_env, user='pyconkr') sudo('%s/bin/python manage.py migrate' % python_env, user='pyconkr') sudo('%s/bin/python manage.py collectstatic --noinput' % python_env, user='pyconkr') # worker reload run('echo r > /var/run/pyconkr-2016-%s.fifo' % target)
def compose_run(cmd): """ Calls docker compose run using the correct environment. :param cmd: run command, including container name. """ opt = ['--rm'] if service_name is None: print("please, provide service name") exit() with cd(project_dst): local_cmd = get_compose_cmd() + ['run'] local_cmd += opt local_cmd += [service_name] local_cmd += cmd.split() get_fn()(' '.join(local_cmd))
def update(version=None): require('srvr', 'path', 'within_virtualenv', provided_by=env.servers) if version: # try specified version first to_version = version elif not version and env.srvr in ['local', 'vagrant', 'dev']: # if local, vagrant or dev deploy to develop branch to_version = 'develop' else: # else deploy to master branch to_version = 'master' with cd(env.path), prefix(env.within_virtualenv): run('git pull') run('git checkout {}'.format(to_version))
def setup_system(): """Setup the system dependencies and repo. """ add_apt('fkrull/deadsnakes') apt_install( 'emacs-nox', 'python3.6-dev', 'python3.6-gdbm', 'python3.6-venv', 'nginx', 'nginx-core', 'screen', 'gcc', 'libssl-dev', ) ensure_venv('combine') sudo('mkdir -p /tmp/gunicorn_run') sudo('chmod 777 /tmp/gunicorn_run') restart_nginx() sudo('mkdir -p /var/run/watch-ip') sudo('chmod 777 /var/run/watch-ip')
def install_jenkins(*args, **kwargs): home = run('echo $HOME') version = kwargs.get('version', 'latest') init = os.path.join(home,'init') jenkins_base_dir = os.path.join(home, 'jenkins') jenkins_init = os.path.join(init, 'jenkins') port = kwargs.get('port') if not exists(jenkins_base_dir): run('mkdir ' + jenkins_base_dir) if not exists(os.path.join(jenkins_base_dir, 'jenkins.war')): with hide('output'): run('wget http://mirrors.jenkins-ci.org/war/%s/jenkins.war -O ~/jenkins/jenkins.war' % version) if not exists(os.path.join(jenkins_base_dir, 'org.jenkinsci.main.modules.sshd.SSHD.xml')): with hide('output'): run('wget https://templates.wservices.ch/jenkins/org.jenkinsci.main.modules.sshd.SSHD.xml -O ~/jenkins/org.jenkinsci.main.modules.sshd.SSHD.xml') if not exists(init): run('mkdir ~/init') if not exists(jenkins_init): with hide('output'): run('wget https://templates.wservices.ch/jenkins/jenkins.init -O ~/init/jenkins') run('chmod 750 ~/init/jenkins') sed(jenkins_init, 'PORT=HTTP_PORT', 'PORT=%s' % port) run('~/init/jenkins start') else: run('~/init/jenkins restart')
def load_config(conf_file, base_conf=[], spec_conf=[], delimiter=' '): if exists(conf_file): with hide('output'): config_data = run('cat %s' % conf_file) else: config_data = '' confs = base_conf + spec_conf for conf in confs: param, value = conf.split(delimiter, 1) value = re.sub(r'#.*$', "", str(value)) # Delete comments match = re.search('^%s[ ]?%s[ ]?(.*)' % (param, delimiter), config_data, re.MULTILINE) if match: orig_value = match.group(1).strip() orig_line = '%s' % match.group(0).strip() if orig_value != str(value): if config_data and param in spec_conf: continue # Do not override already existing specific configurations print('%s %s change to %s' % (param, orig_value, value)) sed(conf_file, orig_line, '%s%s%s' % (param, delimiter, value)) else: print('Config OK: %s%s%s' % (param, delimiter, value)) else: print('Add config %s%s%s' % (param, delimiter, value)) append(conf_file, '%s%s%s' % (param, delimiter, value))
def install_lighttpd(*args, **kwargs): home = run('echo $HOME') lighttpd_dir = os.path.join(home, 'lighttpd') lighttpd_port = kwargs.get('port') run('mkdir -p %s' % lighttpd_dir) run('wget https://templates.wservices.ch/lighttpd/lighttpd.conf -O %s' % (os.path.join(lighttpd_dir, 'lighttpd.conf'))) run('wget https://templates.wservices.ch/lighttpd/port.conf -O %s' % (os.path.join(lighttpd_dir, 'port.conf'))) append(os.path.join(lighttpd_dir, 'port.conf'), 'server.port = %s' % lighttpd_port) if not exists(os.path.join(lighttpd_dir, 'django.conf')): run('wget https://templates.wservices.ch/lighttpd/django.conf -O %s' % (os.path.join(lighttpd_dir, 'django.conf'))) run('mkdir -p ~/init') if not exists('~/init/lighttpd'): run('wget https://templates.wservices.ch/lighttpd/init -O ~/init/lighttpd') run('chmod 750 ~/init/lighttpd') if exists('~/lighttpd/lighttpd.pid'): run('~/init/lighttpd restart') else: run('~/init/lighttpd start')
def _upgrade_199(upd, with_testing, *args, **kwargs): ku = User.get_internal() pod = db.session.query(Pod).filter_by( name=KUBERDOCK_DNS_POD_NAME, owner=ku).first() nodes = Node.query.all() if not nodes: upd.print_log('No nodes found, exiting') return for node in nodes: k8s_node = node_utils._get_k8s_node_by_host(node.hostname) status, _ = node_utils.get_status(node, k8s_node) if status == NODE_STATUSES.running: if pod: pc = PodCollection() pc.delete(pod.id, force=True) create_dns_pod(node.hostname, ku) return raise helpers.UpgradeError("Can't find any running node to run dns pod")
def upgrade_node(upd, with_testing, env, *args, **kwargs): upd.print_log('Replacing kubernetes with new kubernetes-node...') upd.print_log( helpers.remote_install( 'kubernetes kubernetes-node-0.20.2-0.4.git323fde5.el7.centos.2', with_testing, 'swap')) upd.print_log('Replacing auth config with new...') put('/etc/kubernetes/configfile_for_nodes', '/etc/kubernetes/configfile') run("""sed -i '/^KUBELET_ARGS/ {s|--auth_path=/var/lib/kubelet/kubernetes_auth|--kubeconfig=/etc/kubernetes/configfile --register-node=false|}' /etc/kubernetes/kubelet""") run("""sed -i '/^KUBE_MASTER/ {s|http://|https://|}' /etc/kubernetes/config""") run("""sed -i '/^KUBE_MASTER/ {s|7080|6443|}' /etc/kubernetes/config""") run("""sed -i '/^KUBE_PROXY_ARGS/ {s|""|"--kubeconfig=/etc/kubernetes/configfile"|}' /etc/kubernetes/proxy""") service, res = helpers.restart_node_kubernetes(with_enable=True) if res != 0: raise helpers.UpgradeError('Failed to restart {0}. {1}' .format(service, res)) else: upd.print_log(res) print run('rm -f /var/lib/kubelet/kubernetes_auth')
def upgrade_node(upd, with_testing, env, *args, **kwargs): run('yum --enablerepo=kube,kube-testing clean metadata') # 00101_update.py upd.print_log('Update fslimit.py script...') upd.print_log(put('/var/opt/kuberdock/fslimit.py', '/var/lib/kuberdock/scripts/fslimit.py', mode=0755)) # 00102_update.py put('/var/opt/kuberdock/node_network_plugin.sh', PLUGIN_DIR + 'kuberdock') put('/var/opt/kuberdock/node_network_plugin.py', PLUGIN_DIR + 'kuberdock.py') run('systemctl restart kuberdock-watcher') # 00103_update.py upd.print_log('Enabling restart for ntpd.service') run('mkdir -p ' + SERVICE_DIR) run('echo -e "' + OVERRIDE_CONF + '" > ' + OVERRIDE_FILE) run('systemctl daemon-reload') run('systemctl restart ntpd')
def restart_node_kubernetes(with_docker=False, with_enable=False): """ :return: Tuple: service on which restart was error or 0, + fabric res """ res = run('systemctl daemon-reload') if res.failed: return 'Error_daemon-reload', res services = ('kubelet', 'kube-proxy',) if with_docker: services += ('docker',) for i in services: res = run('systemctl restart ' + i) if res.failed: return i, res if with_enable: res = run('systemctl reenable ' + i) if res.failed: return i, res return 0, 0
def fabric_retry(f, cmd, retry_pause, max_retries, exc_message=None, upd=None, *f_args, **f_kwargs): """ Retries the given function call until it succeed :param f: a function to retry, e.g. local or run :param cmd: command to execute :param retry_pause: pause between retries (seconds) :param max_retries: max retries num. :param exc_message: exception message template :param upd: db record of current update script :return: result of the cmd's stdout """ for _ in range(max_retries): out = f(cmd, *f_args, **f_kwargs) if out.succeeded: return out if upd: upd.print_log('Retrying: {cmd}'.format(cmd=cmd)) time.sleep(retry_pause) if exc_message: raise UpgradeError(exc_message.format(out=out), code=out.return_code)
def get_node_state(node): status = {} if node.get('status') == NODE_STATUSES.pending: status[NODE_STATUSES.pending] = False return status hostname = node['hostname'] status[NODE_STATUSES.running] = node.get('status') == NODE_STATUSES.running env.host_string = hostname try: status['ntp'] = False status['services'] = False # AC-3105 Fix. Check if master can connect to node via ssh. if can_ssh_to_host(hostname): rv = run('ntpstat', quiet=True, timeout=SSH_TIMEOUT) if rv.succeeded: status['ntp'] = True status['ssh'] = True stopped = get_stopped_services(node_services, local=False) status['services'] = stopped if stopped else True status['disk'] = check_disk_space(local=False) else: status['ssh'] = False except (NetworkError, CommandTimeout): status['ssh'] = False return status
def health_check(post_upgrade_check=False): if not args.skip_health_check: print "Performing cluster health check..." msg = check_cluster() if msg: print >> sys.stderr, "There are some problems with cluster." print >> sys.stderr, msg if post_upgrade_check: print >> sys.stderr, "Some of them could be temporary due " \ "restarts of various KuberDock " \ "services/pods/nodes during upgrade " \ "process and will gone in few minutes.\n" \ "It's strongly recommended to re-run " \ "health check later soon to ensure this " \ "problems are gone and fix them if they " \ "still remains." else: print >> sys.stderr, "Please, solve problems or use key " \ "--skip-health-check (on your own risk)" return False print "Health check: OK" else: print "Skipping health check." return True
def execute_run(command, timeout=NODE_COMMAND_TIMEOUT, jsonresult=False, catch_exitcodes=None): try: result = run(command, timeout=timeout) except (CommandTimeout, NetworkError): raise NodeCommandTimeoutError( 'Timeout reached while execute remote command' ) if result.return_code != 0: if not catch_exitcodes or result.return_code not in catch_exitcodes: raise NodeCommandError( 'Remote command `{0}` execution failed (exit code = {1})' .format(command, result.return_code) ) raise NodeCommandWrongExitCode(code=result.return_code) if jsonresult: try: result = json.loads(result) except (ValueError, TypeError): raise NodeCommandError( u'Invalid json output of remote command: {}'.format(result)) return result
def _create_drive(self, name, size): """ Actually creates a ceph rbd image of a given size. :param name: string -> drive name :param size: int -> drive size in GB :return: int -> return code of 'run' """ mb_size = 1024 * int(size) try: self.run_on_first_node( 'rbd {0} create {1} --size={2} --image-format=2'.format( get_ceph_credentials(), name, mb_size ) ) except NodeCommandError: # FIXME: we use warning for all possible errors. This is wrong, # we should check whether retcode == 17 (already exists case) and # print correct message with appropriate level current_app.logger.warning( u"Failed to create CEPH drive '%s', size = %s. " u"Possibly it's already exists or other error has happened", name, size) return 1 return 0
def _create_drive(self, name, size): """ Actually creates an amazon EBS of a given size. :param name: string -> drive name :param size: int -> drive size in GB :return: int -> return code of 'run' """ raw_drives = self._get_raw_drives() for vol in raw_drives: if vol.tags.get('Name', 'Nameless') == name: return 17 # errno.EEXIST vol = self._conn.create_volume(size, self._availability_zone) if vol: vol.add_tag('Name', name) while vol.status != 'available': time.sleep(1) vol.update() # self.start_stat(size, sys_drive_name=name) return 0
def package_install_requirements(path): """Use FPM's setup metadata library to package dependencies.""" fpm_path = os.path.split(run('gem which fpm')) metadata_pkg_path = os.path.join(fpm_path[0], 'fpm/package') with cd(path): with shell_env(PYTHONPATH=metadata_pkg_path): run('python3 setup.py --command-packages=pyfpm get_metadata ' '--output=package_metadata.json') get(remote_path='package_metadata.json', local_path='/tmp/package_metadata.json') with open('/tmp/package_metadata.json') as metadata_file: package_metadata = json.load(metadata_file) for dependency in package_metadata['dependencies']: if _run_fpm_python('\'%s\'' % (dependency), warn_only=True).failed: # If this fails, it is likely that this is an Endaga python # package and will be fulfilled from the Endaga repo. print('Ignoring dependency %s' % dependency) # We don't want to clobber dependencies built previously. run('mv -n *.%s %s' % (env.pkgfmt, PKG_DIR), quiet=True) run('rm *.%s' % (env.pkgfmt, ), quiet=True) run('rm package_metadata.json')
def gitrepos(branch=None, fork='sympy'): """ Clone the repo fab vagrant prepare (namely, checkout_cache()) must be run first. By default, the branch checked out is the same one as the one checked out locally. The master branch is not allowed--use a release branch (see the README). No naming convention is put on the release branch. To test the release, create a branch in your fork, and set the fork option. """ with cd("/home/vagrant"): if not exists("sympy-cache.git"): error("Run fab vagrant prepare first") if not branch: # Use the current branch (of this git repo, not the one in Vagrant) branch = local("git rev-parse --abbrev-ref HEAD", capture=True) if branch == "master": raise Exception("Cannot release from master") run("mkdir -p repos") with cd("/home/vagrant/repos"): run("git clone --reference ../sympy-cache.git https://github.com/{fork}/sympy.git".format(fork=fork)) with cd("/home/vagrant/repos/sympy"): run("git checkout -t origin/%s" % branch)
def test_tarball(release='2'): """ Test that the tarball can be unpacked and installed, and that sympy imports in the install. """ if release not in {'2', '3'}: # TODO: Add win32 raise ValueError("release must be one of '2', '3', not %s" % release) venv = "/home/vagrant/repos/test-{release}-virtualenv".format(release=release) tarball_formatter_dict = tarball_formatter() with use_venv(release): make_virtualenv(venv) with virtualenv(venv): run("cp /vagrant/release/{source} releasetar.tar".format(**tarball_formatter_dict)) run("tar xvf releasetar.tar") with cd("/home/vagrant/{source-orig-notar}".format(**tarball_formatter_dict)): run("python setup.py install") run('python -c "import sympy; print(sympy.__version__)"')
def release(branch=None, fork='sympy'): """ Perform all the steps required for the release, except uploading In particular, it builds all the release files, and puts them in the release/ directory in the same directory as this one. At the end, it prints some things that need to be pasted into various places as part of the release. To test the release, push a branch to your fork on GitHub and set the fork option to your username. """ remove_userspace() gitrepos(branch, fork) # This has to be run locally because it itself uses fabric. I split it out # into a separate script so that it can be used without vagrant. local("../bin/mailmap_update.py") source_tarball() build_docs() copy_release_files() test_tarball('2') test_tarball('3') compare_tar_against_git() print_authors()
def build_docs(): """ Build the html and pdf docs """ with cd("/home/vagrant/repos/sympy"): run("mkdir -p dist") venv = "/home/vagrant/docs-virtualenv" make_virtualenv(venv, dependencies=['sphinx==1.1.3', 'numpy']) with virtualenv(venv): with cd("/home/vagrant/repos/sympy/doc"): run("make clean") run("make html-errors") with cd("/home/vagrant/repos/sympy/doc/_build"): run("mv html {html-nozip}".format(**tarball_formatter())) run("zip -9lr {html} {html-nozip}".format(**tarball_formatter())) run("cp {html} ../../dist/".format(**tarball_formatter())) run("make clean") run("make latex") with cd("/home/vagrant/repos/sympy/doc/_build/latex"): run("make") run("cp {pdf-orig} ../../../dist/{pdf}".format(**tarball_formatter()))
def test_pypi(release='2'): """ Test that the sympy can be pip installed, and that sympy imports in the install. """ # This function is similar to test_tarball() version = get_sympy_version() release = str(release) if release not in {'2', '3'}: # TODO: Add win32 raise ValueError("release must be one of '2', '3', not %s" % release) venv = "/home/vagrant/repos/test-{release}-pip-virtualenv".format(release=release) with use_venv(release): make_virtualenv(venv) with virtualenv(venv): run("pip install sympy") run('python -c "import sympy; assert sympy.__version__ == \'{version}\'"'.format(version=version))
def deploy(): test() with cd('/home/deploy/webapp'): run("git pull") run("pip install -r requirements.txt") sudo("cp supervisord.conf /etc/supervisor/conf.d/webapp.conf") sudo("cp nginx.conf /etc/nginx/sites-available/your_domain") sudo("ln -sf /etc/nginx/sites-available/your_domain " "/etc/nginx/sites-enabled/your_domain") sudo("cp apache.conf /etc/apache2/sites-available/your_domain") sudo("ln -sf /etc/apache2/sites-available/your_domain " "/etc/apache2/sites-enabled/your_domain") sudo("service nginx restart") sudo("service apache2 restart")
def run(self): from soda.host import nginx # Import here to avoid wrong Fabric --list # Stop nginx first execute(nginx.stop) user = settings(user='root') cwd = cd(self.roledef.get('letsencrypt_dir', '/opt/letsencrypt')) warn_only = settings(warn_only=True) # Generate the certificate with user, cwd, warn_only: result = run('./letsencrypt-auto renew --standalone') # Display a result message if result.succeeded: display.success('SSL certificates were successfully renewed!') else: display.error( 'Failed to renew SSL certificates.', abort_task=False) # Put nginx back up execute(nginx.start)
def run(self, revision): with hide('everything'), self.user, self.in_app: # Get the used remote name git_remote = run('git remote') # Update the app source code display.info( 'Fetching source code from "{}"...'.format(git_remote)) print(run('git fetch -p {}'.format(git_remote))) # Check working directory display.info('Checking app directory...') git_status = run('git status --porcelain -uno').strip() if git_status: print(git_status) display.error( 'App directory is dirty.', abort_task=not env.force) # Check out to specified revision display.info('Checking out to specified revision...') print(run('git checkout -f {}/{}'.format( git_remote, revision)))
def setting_trans_env(server_name): conf_name = 'download_{}.conf'.format(server_name) root_dir = '/app/opbak/download_{}_{}'.format(TIMESTAMP, server_name) mk_remote_dir(root_dir) try: with cd('/app/nginx/conf/vhost'): run('''echo -e "server {\\n listen 80;\\n server_name %s;\\n root %s;\\n index Main.html;\\n access_log logs/default.access.log main;\\n location / {\\n expires 0;\\n }\\n\\n error_page 404 500 502 503 504 /404.html;\\n}" >%s''' % (server_name, root_dir, conf_name)) reload_nginx() yield finally: with cd('/app/nginx/conf/vhost'): run('rm -f {}'.format(conf_name)) reload_nginx() run('rm -rf {}'.format(root_dir))
def tcp_port(self): """ The tcp port used for the game server. Will try to get only once and save to self._tcp_port for later use. """ def get_tcp_port(): cmd = '''grep 'name="port" type="int"' conf.xml |awk -F[\<\>] '{print $3}' ''' with settings(host_string=self.int_ip), cd('/app/{}/backend/apps'.format(self.name)): result = run(cmd) lines = result.splitlines() if len(lines) == 1: return int(lines[0]) else: raise Exception("Can't get tcp port using cmd: {}".format(cmd)) if not self._tcp_port: self._tcp_port = get_tcp_port() return self._tcp_port
def fetch(prune=True): ''' The git fetch command. ''' run('git fetch' + (' --prune' if prune else ''))
def checkout(branch, force=False): ''' The git checkout command. ''' force_flag = '-f ' if force else '' run('git checkout {0}{1}'.format(force_flag, branch))
def pull(branch): ''' The git pull command. ''' run('git pull origin %s' % branch) # TODO: Custom origin
def sync(branch): ''' Sync the current HEAD with the remote(origin)'s branch ''' run('git reset --hard origin/%s' % branch) # TODO: Custom origin
def last_commit(remote=True, short=False): ''' Get the last commit of the git repository. Note: This assumes the current working directory (on remote or local host) to be a git repository. So, make sure current directory is set before using this. ''' cmd = 'git rev-parse{}HEAD'.format(' --short ' if short else ' ') with hide('everything'): result = run(cmd) if remote else local(cmd, capture=True) return result.strip()
def show_last_commit(): ''' Display the last commit. ''' run('git log -1')
def run(command, remote=True): ''' Run a command using fabric. ''' if remote: return _run(command) else: return _local(command)
def get_fn(): """ Returns the correct function call for the environment. """ return run if renv == 'prd' else local
def on_service(name): """ Define service where command should run """ global service_name service_name = name
def make_doc(): with lcd('../docs'): local('make html') local('cp -rf _build/html/* /var/www/mendelmd_static/docs/') #def backup(): # run(' mysqldump -u root -p mendelmd14 | gzip > db_backup/mendelmd151012.sql.gz ')
def create_sample_data(): #backup all users # with cd('/projects/www/mendelmd14'): # # run('mysqldump -u root -p mendelmd14 auth_user account_account profiles_profile | gzip > db_backup/users.sql.gz') # #get sample from individuals # run('mysqldump -u root -p --where="individual_id < 16" mendelmd14 individuals_variant | gzip > db_backup/individual_variants_sample.sql.gz') # run('mysqldump -u root -p --where="id < 16" mendelmd14 individuals_individual | gzip > db_backup/individuals_sample.sql.gz') get('/projects/www/mendelmd14/db_backup/users.sql.gz', '/home/raony/sites/mendelmd14/db_backup/') get('/projects/www/mendelmd14/db_backup/individual_variants_sample.sql.gz', '/home/raony/sites/mendelmd14/db_backup/') get('/projects/www/mendelmd14/db_backup/individuals_sample.sql.gz', '/home/raony/sites/mendelmd14/db_backup/')
def loaddata(): #Load user and sample from individuals local('gunzip < db_backup/users.sql.gz | mysql -u root -p mendelmd') local('gunzip < db_backup/individual_variants_sample.sql.gz | mysql -u root -p mendelmd ') local('gunzip < db_backup/individuals_sample.sql.gz | mysql -u root -p mendelmd ') # run(' gunzip < db_backup/mendelmd151012.sql.gz | mysql -u root -p mendelmd14 ') # local("""python manage.py loaddata db_backup/all_without_individuals.json.gz""")
def deploy(message="changes (fabric)"): local('git add .; git commit -m "%s";git push' % (message)) with cd('/projects/www/mendelmd'): # run('git reset --hard HEAD') run('git pull') # run('source virtualenvwrapper.sh && workon genome_research && python manage.py syncdb --noinput') run('sudo /etc/init.d/apache2 restart')
def unlock(): """os x servers need to be unlocked""" require('srvr', 'path', 'within_virtualenv', provided_by=env.servers) with cd(env.path): run('security unlock-keychain')
def create_virtualenv(): require('srvr', 'path', 'within_virtualenv', provided_by=env.servers) with quiet(): env_vpath = os.path.join(env.envs_path, 'dprr-' + env.srvr) if run('ls {}'.format(env_vpath)).succeeded: print( green('virtual environment at [{}] exists'.format(env_vpath))) return print(yellow('setting up virtual environment in [{}]'.format(env_vpath))) run('virtualenv {}'.format(env_vpath))
def clone_repo(): require('srvr', 'path', 'within_virtualenv', provided_by=env.servers) with quiet(): if run('ls {}'.format(os.path.join(env.path, '.git'))).succeeded: print(green(('repository at' ' [{}] exists').format(env.path))) return print(yellow('cloning repository to [{}]'.format(env.path))) run('git clone --recursive {} {}'.format(REPOSITORY, env.path))
def makemigrations(app=None): require('srvr', 'path', 'within_virtualenv', provided_by=env.servers) if env.srvr in ['dev', 'stg', 'liv']: print(yellow('Do not run makemigrations on the servers')) return with cd(env.path), prefix(env.within_virtualenv): run('./manage.py makemigrations {}'.format(app if app else ''))
def update_index(): require('srvr', 'path', 'within_virtualenv', provided_by=env.servers) with cd(env.path), prefix(env.within_virtualenv): run('./manage.py build_solr_schema > schema.xml') run('mv schema.xml ../../solr/collection1/conf/') sudo('service tomcat7-{} restart'.format(env.srvr)) run('./manage.py update_index')
def clear_cache(): require('srvr', 'path', 'within_virtualenv', provided_by=env.servers) with cd(env.path), prefix(env.within_virtualenv): run('./manage.py clear_cache')
def collect_static(process=False): require('srvr', 'path', 'within_virtualenv', provided_by=env.servers) if env.srvr in ['local', 'vagrant']: print(yellow('Do not run collect_static on local servers')) return with cd(env.path), prefix(env.within_virtualenv): run('./manage.py collectstatic {process} --noinput'.format( process=('--no-post-process' if not process else '')))
def install_requirements(): require('srvr', 'path', 'within_virtualenv', provided_by=env.servers) reqs = 'requirements-{}.txt'.format(env.srvr) try: assert os.path.exists(reqs) except AssertionError: reqs = 'requirements.txt' with cd(env.path), prefix(env.within_virtualenv): run('pip install -U -r {}'.format(reqs))