我们从Python开源项目中,提取了以下23个代码示例,用于说明如何使用fabric.api.put()。
def parse_args(): args = argparse.ArgumentParser("Setup ceph client to remote host") args.add_argument('node', help="Name or IP address of node to install to") args.add_argument('-C', '--ceph', help="Name of IP address of ceph admin node") args.add_argument('-u', '--user', default='root', help="Username") args.add_argument('-p', '--password', help="Password") args.add_argument('-d', '--deploy-dir', default='/var/opt/deploy', help="Directory to put deploy script to") args.add_argument('-D', '--deploy-script', default='ceph_install.sh', help="Deploy script") args.add_argument('-w', '--app-dir', default='/var/opt/kuberdock', help="Directory of web-application") args.add_argument('-c', '--conf-dir', default='/etc/ceph', help="Directory of ceph-configs") args.add_argument('-T', '--temp-dir', default=TMPDIR, help="Temp directory") return args.parse_args()
def upgrade_node(upd, with_testing, env, *args, **kwargs): run('yum --enablerepo=kube,kube-testing clean metadata') # 00101_update.py upd.print_log('Update fslimit.py script...') upd.print_log(put('/var/opt/kuberdock/fslimit.py', '/var/lib/kuberdock/scripts/fslimit.py', mode=0755)) # 00102_update.py put('/var/opt/kuberdock/node_network_plugin.sh', PLUGIN_DIR + 'kuberdock') put('/var/opt/kuberdock/node_network_plugin.py', PLUGIN_DIR + 'kuberdock.py') run('systemctl restart kuberdock-watcher') # 00103_update.py upd.print_log('Enabling restart for ntpd.service') run('mkdir -p ' + SERVICE_DIR) run('echo -e "' + OVERRIDE_CONF + '" > ' + OVERRIDE_FILE) run('systemctl daemon-reload') run('systemctl restart ntpd')
def update_config(self, content, path): old_file = six.BytesIO() if files.exists(path, use_sudo=self.sudo): fab.get(remote_path=path, local_path=old_file, use_sudo=self.sudo) old_content = old_file.getvalue() need_update = content != old_content if need_update: fabricio.move_file( path_from=path, path_to=path + '.backup', sudo=self.sudo, ignore_errors=True, ) fab.put(six.BytesIO(content), path, use_sudo=self.sudo, mode='0644') fabricio.log('{path} updated'.format(path=path)) else: fabricio.log('{path} not changed'.format(path=path)) return need_update
def _update(self, compose_file, new_settings, force=False): if not force: settings, digests = self.current_settings digests = digests and json.loads(b64decode(digests).decode()) if settings == new_settings and digests is not None: new_digests = self._get_digests(digests) if digests == new_digests: return False with fab.cd(self.temp_dir): fab.put(six.BytesIO(compose_file), self.actual_compose_file) fabricio.run('docker stack deploy {options} {name}'.format( options=utils.Options(self.options), name=self.name, )) self.stack_updated.set() return True
def write_environment_info(stackname, overwrite=False): """Looks for /etc/cfn-info.json and writes one if not found. Must be called with an active stack connection. This gives Salt the outputs available at stack creation, but that were not available at template compilation time. """ if not files.exists("/etc/cfn-info.json") or overwrite: LOG.info('no cfn outputs found or overwrite=True, writing /etc/cfn-info.json ...') infr_config = utils.json_dumps(template_info(stackname)) return put(StringIO(infr_config), "/etc/cfn-info.json", use_sudo=True) LOG.debug('cfn outputs found, skipping') return [] # # #
def upload(local_path, remote_path): ''' Upload one or more files to a remote host. ''' return put(local_path, remote_path)
def save_remote_file(path, data): ''' Save data to the remote file. ''' fd = StringIO(data) put(fd, path) return fd.getvalue()
def load_db_dump(dump_file): """Given a dump on your home dir on the server, load it to the server's database, overwriting any existing data. BE CAREFUL!""" require('environment') if not files.exists("%(home)s/.pgpass" % env): abort("Please get a copy of .pgpass and put it in your home dir") temp_file = os.path.join(env.home, '%(project)s-%(environment)s.sql' % env) put(dump_file, temp_file) run('psql -h %s -U %s -d %s -f %s' % (env.db_host, env.db_user, env.db, temp_file))
def upgrade_node(cls, upd, with_testing, env, *args, **kwargs): upd.print_log('Update network plugin...') run('ipset -exist create kuberdock_ingress hash:ip') put('/var/opt/kuberdock/node_network_plugin.sh', cls.PLUGIN_PATH + 'kuberdock')
def upgrade_node(cls, upd, with_testing, env, *args, **kwargs): upd.print_log('Reject outgoing smtp packets to 25 port') with quiet(): put_rv = put('/var/opt/kuberdock/node_network_plugin.sh', PLUGIN_PATH + 'kuberdock') if put_rv.failed: raise helpers.UpgradeError( "Can't update node_network_plugin.sh") check = run(cls.RULE.format('C')) if check.return_code: rv = run(cls.RULE.format('I')) if rv.return_code: raise helpers.UpgradeError( "Can't add iptables rule: {}".format(rv))
def upgrade_node(cls, upd, with_testing, env, *args, **kwargs): upd.print_log('Update network plugin...') put('/var/opt/kuberdock/node_network_plugin.sh', PLUGIN_PATH + 'kuberdock') put('/var/opt/kuberdock/node_network_plugin.py', PLUGIN_PATH + 'kuberdock.py')
def upgrade_node(upd, with_testing, env, *args, **kwargs): upd.print_log('Update fslimit.py script...') upd.print_log(put('/var/opt/kuberdock/fslimit.py', '/var/lib/kuberdock/scripts/fslimit.py', mode=0755)) upd.print_log('Update FS limits on nodes...') upd.print_log(run('rm -f /etc/{projects,projid}')) spaces = dict((i, (s, u)) for i, s, u in Kube.query.values( Kube.id, Kube.disk_space, Kube.disk_space_units)) limits = [] for user in User.query: for pod in PodCollection(user).get(as_json=False): if pod.get('host') != env.host_string: continue for container in pod['containers']: container_id = container['containerID'] if container_id == container['name']: continue space, unit = spaces.get(pod['kube_type'], (0, 'GB')) disk_space = space * container['kubes'] disk_space_units = unit[0].lower() if unit else '' if disk_space_units not in ('', 'k', 'm', 'g', 't'): disk_space_units = '' limits.append([container_id, disk_space, disk_space_units]) if not limits: return lim_str = ' '.join(['{0}={1}{2}'.format(c, s, u) for c, s, u in limits]) upd.print_log( run('python /var/lib/kuberdock/scripts/fslimit.py {0}'.format(lim_str)) )
def upgrade_node(upd, with_testing, env, *args, **kwargs): upd.print_log('Setup network plugin:') upd.print_log('Install packages...') upd.print_log(helpers.remote_install('python-requests', with_testing)) upd.print_log(helpers.remote_install('python-ipaddress', with_testing)) upd.print_log(helpers.remote_install('ipset', with_testing)) upd.print_log(run("""sed -i '/^KUBELET_ARGS/ {s|"\(.*\) --network-plugin=kuberdock"|"\\1"|}' /etc/kubernetes/kubelet""")) upd.print_log(run("""sed -i '/^KUBELET_ARGS/ {s|"\(.*\) --register-node=false"|"\\1 --register-node=false --network-plugin=kuberdock"|}' /etc/kubernetes/kubelet""")) upd.print_log(run("mkdir -p {0}/data".format(PLUGIN_DIR))) upd.print_log(put('/var/opt/kuberdock/node_network_plugin.sh', PLUGIN_DIR + 'kuberdock', mode=0755)) upd.print_log(put('/var/opt/kuberdock/node_network_plugin.py', PLUGIN_DIR + 'kuberdock.py', mode=0755)) upd.print_log(run('chmod +x {0}'.format(PLUGIN_DIR + 'kuberdock'))) upd.print_log( run("cat > /etc/systemd/system/kuberdock-watcher.service << 'EOF' {0}" .format(SERVICE_FILE)) ) run('systemctl daemon-reload') upd.print_log(run('systemctl reenable kuberdock-watcher'))
def update(self, tag=None, registry=None, account=None, force=False): if not fab.env.parallel: fab.abort( 'Master-slave configuration update requires parallel mode. ' 'Use Fabric\'s `--parallel` option to enable this mode ' 'for a current session.' ) self.instances.put(None) try: recovery_config_updated = self.update_recovery_config( tag=tag, registry=registry, account=account, ) container_updated = super( StreamingReplicatedPostgresqlContainer, self, ).update(force=force, tag=tag, registry=registry, account=account) if not container_updated and recovery_config_updated: self.reload() self.master_obtained.set() # one who first comes here is master return container_updated or recovery_config_updated except Exception as exception: self.multiprocessing_data.exception = exception raise finally: try: self.master_lock.release() except ValueError: # ignore "released too many times" error pass self.instances.get() self.instances.task_done() self.instances.join() # wait until all instances will be updated # reset state at the end to prevent fail of the next Fabric command self.master_obtained.clear()
def monkey_patch(): """ apply monkey patch """ # replace fabricio.run by fabricio.local to run all commands on localhost fabricio.run = functools.partial(fabricio.local, capture=True) # uncomment row below to disable file uploading (e.g. docker-compose.yml) # fab.put = lambda *args, **kwargs: None
def _importlayers(t=None, local=None, drop=None, user=None, overwrite=None, category=None, keywords=None, private=None): t = _request_input("Type (vanilla/geoshape)", t, True, options=GEONODE_TYPES) local = _request_input("Local File Path", local, True) drop = _request_input("Remote Drop Folder", drop, True) user = _request_input("User", user, False) overwrite = _request_input("Overwrite", overwrite, False) category = _request_input("Category", category, False, options=ISO_CATEGORIES) keywords = _request_input("Keywords (Comma-separated)", keywords, False) private = _request_input("Private", private, True) path_managepy = PATH_MANAGEPY_GS if t.lower()=="geoshape" else PATH_MANAGEPY_VN if _request_continue(): sudo("[ -d {d} ] || mkdir {d}".format(d=drop)) remote_files = put(local, drop, mode='0444', use_sudo=True) if remote_files: with cd(path_managepy): template = "source {a}; python manage.py importlayers {paths}" if user: template += " -u {u}".format(u=user) if overwrite: template += " -o" if category: template += " -c {c}".format(c=category) if keywords: template += " -k {kw}".format(kw=keywords) if private: template += " -p" c = template.format(a=PATH_ACTIVATE, paths=(" ".join(remote_files))) sudo(c) else: print "Not files uploaded"
def scp_to_remote(*files): for file in files: total_file_path = file.rsplit('/', 1) if len(total_file_path) > 1: file_path, file_name = total_file_path[0], total_file_path[1] else: file_path, file_name = '', total_file_path[0] put(file, os.path.join(base_dir, file_path)) #[ put(file, os.path.join(base_dir, file)) for file in files ]
def _put_temporary_script(script_filename): local_script = join(config.SCRIPTS_PATH, script_filename) start = datetime.now() timestamp_marker = start.strftime("%Y%m%d%H%M%S") remote_script = join('/tmp', os.path.basename(script_filename) + '-' + timestamp_marker) put(local_script, remote_script) return remote_script
def upload_master_builder_key(key): private_key = "/root/.ssh/id_rsa" try: # NOTE: overwrites any existing master key on machine being updated operations.put(local_path=key, remote_path=private_key, use_sudo=True) finally: key.close()
def upload_master_configuration(master_stack, master_configuration): with stack_conn(master_stack, username=BOOTSTRAP_USER): operations.put(local_path=master_configuration, remote_path='/etc/salt/master', use_sudo=True)
def upgrade_node(upd, with_testing, env, *args, **kwargs): run('yum --enablerepo=kube,kube-testing clean metadata') # 00091_update.py upd.print_log('Upgrading nodes with docker-cleaner.sh') run("""rm -f /var/lib/kuberdock/scripts/docker-cleaner.sh""") run("""crontab -l | grep -v "docker-cleaner.sh" | crontab - """) # 00092_update.py put('/var/opt/kuberdock/node_network_plugin.sh', '/usr/libexec/kubernetes/kubelet-plugins/net/exec/kuberdock/kuberdock') # 00093_update.py upd.print_log('Use custom log template with rsyslog...') run("sed -i '/^{0}/d' {1}".format(PARAM1, CONF)) run("sed -i '/^{0}/d' {1}".format(PARAM2, CONF)) run("sed -i '$ a{0} {1}' {2}".format(PARAM1, TEMPLATE, CONF)) run("sed -i '$ a{0};{1}' {2}".format(PARAM2, TEMPLATE_NAME, CONF)) run('systemctl restart rsyslog') # 00096_update.py upd.print_log('Disabling swap and backing up fstab to {0}...'.format(FSTAB_BACKUP)) run('swapoff -a') run('mkdir -p /var/lib/kuberdock/backups') run('test -f {0} && echo "{0} is already exists" || cp /etc/fstab {0}'.format(FSTAB_BACKUP)) run("sed -r -i '/[[:space:]]+swap[[:space:]]+/d' /etc/fstab") # 00097_update.py upd.print_log('Update elasticsearch for logs...') upd.print_log(put('/var/opt/kuberdock/make_elastic_config.py', '/var/lib/elasticsearch', mode=0755)) upd.print_log('Update logging pod...') ki = User.filter_by(username=KUBERDOCK_INTERNAL_USER).first() pod_name = get_kuberdock_logs_pod_name(env.host_string) for pod in PodCollection(ki).get(as_json=False): if pod['name'] == pod_name: break else: return PodCollection(ki).delete(pod['id'], force=True) logs_config = get_kuberdock_logs_config( env.host_string, pod_name, pod['kube_type'], pod['containers'][0]['kubes'], pod['containers'][1]['kubes'], MASTER_IP, ki.get_token(), ) check_internal_pod_data(logs_config, user=ki) logs_pod = PodCollection(ki).add(logs_config, skip_check=True) run('docker pull kuberdock/elasticsearch:2.2') run('docker pull kuberdock/fluentd:1.5') PodCollection(ki).update(logs_pod['id'], {'command': 'start'})
def upgrade_node(upd, with_testing, env, *args, **kwargs): # 00076_update.py upd.print_log('Add kernel parameters to make pod isolation work...') run('sed -i "/net.bridge.bridge-nf-call-ip6\?tables/d" {0}'.format(CONF)) run("echo net.bridge.bridge-nf-call-iptables = 1 >> {0}".format(CONF)) run("echo net.bridge.bridge-nf-call-ip6tables = 1 >> {0}".format(CONF)) run("sysctl -w net.bridge.bridge-nf-call-iptables=1") run("sysctl -w net.bridge.bridge-nf-call-ip6tables=1") # 00079_update.py upd.print_log('Copy Elasticsearch config maker...') upd.print_log(put('/var/opt/kuberdock/make_elastic_config.py', '/var/lib/elasticsearch', mode=0755)) upd.print_log('Update logging pod...') ki = User.filter_by(username=KUBERDOCK_INTERNAL_USER).first() pod_name = get_kuberdock_logs_pod_name(env.host_string) for pod in PodCollection(ki).get(as_json=False): if pod['name'] == pod_name: break else: return PodCollection(ki).delete(pod['id'], force=True) logs_config = get_kuberdock_logs_config( env.host_string, pod_name, pod['kube_type'], pod['containers'][0]['kubes'], pod['containers'][1]['kubes'], MASTER_IP, ki.get_token(), ) check_internal_pod_data(logs_config, user=ki) logs_pod = PodCollection(ki).add(logs_config, skip_check=True) run('docker pull kuberdock/elasticsearch:1.5') PodCollection(ki).update(logs_pod['id'], {'command': 'start'}) # 00082_update.py upd.print_log('Upgrading nodes with docker-cleaner.sh') run("cat > /var/lib/kuberdock/scripts/docker-cleaner.sh << 'EOF' {0}" .format(DOCKERCLEANER)) run("""chmod +x /var/lib/kuberdock/scripts/docker-cleaner.sh""") run("""crontab -l | { cat; echo "0 */6 * * * /var/lib/kuberdock/scripts/docker-cleaner.sh"; } | crontab - """)
def ssh_upload( self, remote_path, local_path, use_sudo=False, quiet=False, **kwargs ): """ Upload a file or directory to the virtual machine :param remote_path: The remote location :param local_path: The local local :param use_sudo: If True, it runs as sudo :param quiet: Whether to hide the stdout/stderr output or not :return: The list of uploaded files :raise: UploadError: If the task fails """ if self._vm_object: self._wait_for_ssh_service( kwargs['vcdriver_vm_ssh_username'], kwargs['vcdriver_vm_ssh_password'] ) with fabric_context( self.ip(), kwargs['vcdriver_vm_ssh_username'], kwargs['vcdriver_vm_ssh_password'] ): if quiet: with hide('everything'): result = put( local_path, remote_path, use_sudo=use_sudo ) else: result = put(local_path, remote_path, use_sudo=use_sudo) if result.failed: raise UploadError( local_path=local_path, remote_path=remote_path ) else: return result