我们从Python开源项目中,提取了以下35个代码示例,用于说明如何使用errno.ENOSPC。
def test_downloadPageLogsFileCloseError(self): """ If there is an exception closing the file being written to after the connection is prematurely closed, that exception is logged. """ class BrokenFile: def write(self, bytes): pass def close(self): raise IOError(ENOSPC, "No file left on device") d = client.downloadPage(self.getURL("broken"), BrokenFile()) d = self.assertFailure(d, client.PartialDownloadError) def cbFailed(ignored): self.assertEqual(len(self.flushLoggedErrors(IOError)), 1) d.addCallback(cbFailed) return d
def oswrite(self, fh, data): if self._writestate: return os.write(fh, data) else: raise OSError(errno.ENOSPC, "Faked Space problem")
def watch_tree(self): self.watched_dirs = {} self.watched_rmap = {} try: self.add_watches(self.basedir) except OSError as e: if e.errno == errno.ENOSPC: raise DirTooLarge(self.basedir)
def process_event(self, wd, mask, cookie, name): if wd == -1 and (mask & self.Q_OVERFLOW): # We missed some INOTIFY events, so we dont # know the state of any tracked dirs. self.watch_tree() self.modified = True return path = self.watched_rmap.get(wd, None) if path is not None: if not self.ignore_event(path, name): self.modified = True if mask & self.CREATE: # A new sub-directory might have been created, monitor it. try: if not isinstance(path, bytes): name = name.decode(self.fenc) self.add_watch(os.path.join(path, name)) except OSError as e: if e.errno == errno.ENOENT: # Deleted before add_watch() pass elif e.errno == errno.ENOSPC: raise DirTooLarge(self.basedir) else: raise if (mask & self.DELETE_SELF or mask & self.MOVE_SELF) and path == self.basedir: raise BaseDirChanged('The directory %s was moved/deleted' % path)
def handle_error(self): eno = ctypes.get_errno() extra = '' if eno == errno.ENOSPC: extra = 'You may need to increase the inotify limits on your system, via /proc/sys/fs/inotify/max_user_*' raise OSError(eno, self.os.strerror(eno) + str(extra))
def test_above_fd_setsize(self): # A scalable implementation should have no problem with more than # FD_SETSIZE file descriptors. Since we don't know the value, we just # try to set the soft RLIMIT_NOFILE to the hard RLIMIT_NOFILE ceiling. soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) try: resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard)) self.addCleanup(resource.setrlimit, resource.RLIMIT_NOFILE, (soft, hard)) NUM_FDS = min(hard, 2**16) except (OSError, ValueError): NUM_FDS = soft # guard for already allocated FDs (stdin, stdout...) NUM_FDS -= 32 s = self.SELECTOR() self.addCleanup(s.close) for i in range(NUM_FDS // 2): try: rd, wr = self.make_socketpair() except OSError: # too many FDs, skip - note that we should only catch EMFILE # here, but apparently *BSD and Solaris can fail upon connect() # or bind() with EADDRNOTAVAIL, so let's be safe self.skipTest("FD limit reached") try: s.register(rd, selectors.EVENT_READ) s.register(wr, selectors.EVENT_WRITE) except OSError as e: if e.errno == errno.ENOSPC: # this can be raised by epoll if we go over # fs.epoll.max_user_watches sysctl self.skipTest("FD limit reached") raise self.assertEqual(NUM_FDS // 2, len(s.select()))
def __init__(self, code=None, msg='Unknown error'): super(XAttrMetadataError, self).__init__(msg) self.code = code # Parsing code and msg if (self.code in (errno.ENOSPC, errno.EDQUOT) or 'No space left' in self.msg or 'Disk quota excedded' in self.msg): self.reason = 'NO_SPACE' elif self.code == errno.E2BIG or 'Argument list too long' in self.msg: self.reason = 'VALUE_TOO_LONG' else: self.reason = 'NOT_SUPPORTED'
def graph(self, uuid, locked=None, **kwargs): if locked is None: with self.lock.shared(uuid) as locked: g = self.graph(uuid, locked=locked, **kwargs) return g kwargs.update(self.creds) try: return self.collection.graph(uuid, **kwargs) except (IOError, OSError) as e: if e.errno is errno.EPERM: raise HTTPError(403, str(e)) elif e.errno is errno.ENOSPC: raise HTTPError(507, str(e)) raise HTTPError(404, "Backend graph for %s is inaccessible: %s" % (uuid, str(e)))
def __init__(self, code=None, msg='Unknown error'): super(XAttrMetadataError, self).__init__(msg) self.code = code self.msg = msg # Parsing code and msg if (self.code in (errno.ENOSPC, errno.EDQUOT) or 'No space left' in self.msg or 'Disk quota excedded' in self.msg): self.reason = 'NO_SPACE' elif self.code == errno.E2BIG or 'Argument list too long' in self.msg: self.reason = 'VALUE_TOO_LONG' else: self.reason = 'NOT_SUPPORTED'
def listdir(dirname): try: os.listdir(dirname) except OSError as e: error = e.errno if error == errno.ENOENT: print 'No such file or directory' elif error == errno.EACCES: print 'Prmission denied' elif error == errno.ENOSPC: print 'No space left on device' else: print e.strerror else: print 'No error!'
def _raise_error(): """ Raises errors for inotify failures. """ err = ctypes.get_errno() if err == errno.ENOSPC: raise OSError("inotify watch limit reached") elif err == errno.EMFILE: raise OSError("inotify instance limit reached") else: raise OSError(os.strerror(err))
def _get_code_from_exception(backend, operation, e): if isinstance(e, BackendException) and e.code != log.ErrorCode.backend_error: return e.code elif hasattr(backend, '_error_code'): return backend._error_code(operation, e) or log.ErrorCode.backend_error elif hasattr(e, 'errno'): # A few backends return such errors (local, paramiko, etc) if e.errno == errno.EACCES: return log.ErrorCode.backend_permission_denied elif e.errno == errno.ENOENT: return log.ErrorCode.backend_not_found elif e.errno == errno.ENOSPC: return log.ErrorCode.backend_no_space return log.ErrorCode.backend_error
def mknod(self, parent, name, mode, mtime): assertion(self.is_dir(parent), "mknod: parent is not a directory") assertion(name[0] != 0, "mknod: name is null") self._inode.begin_tx() parent_block, parent_bid, off, valid = self.locate_empty_dentry_slot_err_ino(parent) if Not(valid): self._inode.commit_tx() return 0, errno.ENOSPC ino = self.ialloc() attr = Stat(size=0, mtime=mtime, mode=mode, nlink=2) self._inode.set_iattr(ino, attr) attr = self._inode.get_iattr(parent) assertion(ULE(attr.bsize, 522), "mknod: bsize is larger than 522") attr.size = Concat32(BitVecVal(522, 32), BitVecVal(4096 * 522, 32)) assertion(ULT(attr.nlink, attr.nlink + 1), "mknod: nlink overflow") attr.nlink += 1 self._inode.set_iattr(parent, attr) self.write_dentry(parent_block, off, ino, name) parent_block[off] = ino self._inode.write(parent_bid, parent_block) self._inode.commit_tx() return ino, 0
def dir_find_empty(self, blk): res = BitVecVal(-errno.ENOSPC, 64) for i in range(2): res = If(blk[self.I_OFF_DATA + i * 2] == 0, i, res) return res
def graphtxn(write=False, create=False, excl=False, on_success=None, on_failure=None): def decorator(func): def wrapper(self, _, uuid, *args, **kwargs): g = self.graph(uuid, readonly=not write, create=create, excl=excl) success = None try: with g.transaction(write=write) as txn: lastID = txn.lastID success = False ret = func(self, g, txn, _, uuid, *args, **kwargs) try: next = getattr(ret, 'next') except AttributeError: ret = [ret] if ret is not None else [] finally: if write: txn.flush() self.res.headers.set('X-lg-updates', txn.lastID - lastID) self.res.headers.set('X-lg-maxID', txn.lastID) for x in ret: yield x success = True # delay final chunking trailer until txn has been committed if write: if 'x-lg-sync' in self.req.headers: g.sync(force=True) yield '' except HTTPError: raise except (IOError, OSError) as e: raise HTTPError(507 if e.errno is errno.ENOSPC else 500, str(e)) except Exception as e: info = sys.exc_info() log.error('Unhandled exception: %s', traceback.print_exception(*info)) raise finally: if success is True: if on_success: on_success(g) elif success is False: if on_failure: on_failure(g) g.close() return wrapper return decorator
def put(self, _, uuid): with self.lock.exclusive(uuid) as locked: (fd, name, dbname, path) = self.tmp_graph(uuid) os.close(fd) cleanup = [lambda: os.unlink(path)] try: # with self.graph(uuid, readonly=True, locked=locked) as g1, self.collection.graph(dbname, create=True, hook=False) as g2: with self.graph(uuid, locked=locked) as g1, self.collection.graph(dbname, create=True, hook=False) as g2: with g1.transaction(write=False) as t1, g2.transaction(write=True) as t2: # fixme cleanup.append(lambda: os.unlink('%s-lock' % path)) keep = self.input() if keep is None: keep = self.default_keep if keep.get('kv',False): self.clone_kv(t1, t2) seeds = keep.get('seeds', None) if seeds: self.clone_seeds(uuid, t1, t2, seeds) target = g1.path cleanup.pop()() # unlink(path-lock) try: # fixme os.unlink('%s-lock' % target) except OSError: pass os.rename(path, target) cleanup.pop() # unlink(path) # bypass creds check, allow hooks to run self.collection.remove(uuid) # with self.collection.graph(uuid, readonly=True): with self.collection.graph(uuid): pass except (IOError, OSError) as e: if e.errno is errno.EPERM: raise HTTPError(403, str(e)) elif e.errno is errno.ENOSPC: raise HTTPError(507, str(e)) raise HTTPError(404, "Reset failed for graph %s: %s" % (uuid, repr(e))) finally: for x in cleanup: try: x() except: pass
def _add_watch_for_path(self, path): # Must be called with _inotify_fd_lock held. logging.debug('_add_watch_for_path(%r)', path) for dirpath, directories, _ in itertools.chain( [(os.path.dirname(path), [os.path.basename(path)], None)], os.walk(path, topdown=True, followlinks=True)): watcher_common.skip_ignored_dirs(directories) # TODO: this is not an ideal solution as there are other ways for # symlinks to confuse our algorithm but a general solution is going to # be very complex and this is good enough to solve the immediate problem # with Dart's directory structure. watcher_common.skip_local_symlinks( self._real_directories, dirpath, directories) for directory in directories: directory_path = os.path.join(dirpath, directory) # dirpath cannot be used as the parent directory path because it is the # empty string for symlinks :-( parent_path = os.path.dirname(directory_path) watch_descriptor = _libc.inotify_add_watch( self._inotify_fd, ctypes.create_string_buffer(directory_path), _INTERESTING_INOTIFY_EVENTS) if watch_descriptor < 0: if ctypes.get_errno() == errno.ENOSPC: logging.warning( 'There are too many directories in your application for ' 'changes in all of them to be monitored. You may have to ' 'restart the development server to see some changes to your ' 'files.') return error = OSError('could not add watch for %r' % directory_path) error.errno = ctypes.get_errno() error.strerror = errno.errorcode[ctypes.get_errno()] error.filename = directory_path raise error if parent_path in self._directory_to_subdirs: self._directory_to_subdirs[parent_path].add(directory_path) self._watch_to_directory[watch_descriptor] = directory_path self._directory_to_watch_descriptor[directory_path] = watch_descriptor self._directory_to_subdirs[directory_path] = set()
def backup_root(self, root, absroots): logging.info('Backing up root %s', root) self.progress.what('connecting to live data %s' % root) self.reopen_fs(root) self.progress.what('scanning for files in %s' % root) absroot = self.fs.abspath('.') # If the root is a file, we can just back up the file. if os.path.isfile(root): self.just_one_file = os.path.join(absroot, os.path.split(root)[1]) else: self.just_one_file = None self.root_metadata = self.fs.lstat(absroot) num_dirs = 0 # The following is a very approximate guess, but we have no # way of being exact. dir_entry_size = 1000 flush_threshold = obnamlib.DEFAULT_DIR_BAG_BYTES / dir_entry_size for pathname, metadata in self.find_files(absroot): logging.info('Backing up %s', pathname) if not self.pretend: existed = self.repo.file_exists(self.new_generation, pathname) try: self.maybe_simulate_error(pathname) if stat.S_ISDIR(metadata.st_mode): self.backup_directory(pathname, metadata, absroots) else: self.backup_non_directory(pathname, metadata) except (IOError, OSError) as e: e2 = self.translate_enverror_to_obnamerror(pathname, e) msg = 'Can\'t back up %s: %s' % (pathname, str(e2)) self.progress.error(msg, exc=e) if not existed and not self.pretend: self.remove_partially_backed_up_file(pathname) if e.errno in (errno.ENOSPC, errno.EPIPE): raise if metadata.isdir() and not self.pretend: num_dirs += 1 if num_dirs >= flush_threshold: self.repo.flush_client(self.client_name) self.app.dump_memory_profile('after flushing client') num_dirs = 0 if self.checkpoint_manager.time_for_checkpoint(): self.make_checkpoint() self.progress.what(pathname) num_dirs = 0 self.backup_parents('.')
def _policy_RequestIG(self, books_needed): '''Select books from IGs specified in interleave_request attribute. If interleave_request_pos is present use it as the starting point.''' db = self.LCEobj.db ig_req = db.get_xattr(self.shelf, self.XATTR_IG_REQ) self.LCEobj.errno = errno.ERANGE assert ig_req is not None, \ 'RequestIG policy requires prior %s' % self.XATTR_IG_REQ assert len(ig_req), \ 'RequestIG policy requires prior %s' % self.XATTR_IG_REQ # Get a starting position for the interleave_request list self.LCEobj.errno = errno.ENOSPC pos = db.get_xattr(self.shelf, self.XATTR_IG_REQ_POS) try: ig_pos = int(pos) if ig_pos < 0 or ig_pos > (len(ig_req) - 1): ig_pos = 0 except TypeError as err: # TSNH, see create_shelf. Legacy paranoia. ig_pos = 0 resp = db.create_xattr(self.shelf, self.XATTR_IG_REQ_POS, ig_pos) except ValueError as err: ig_pos = 0 reqIGs = [ord(ig_req[i:i+1]) for i in range(0, len(ig_req), 1)] # Determine number of books needed from each IG igCnt = defaultdict(int) cur = ig_pos for cnt in range(0, books_needed): ig = reqIGs[cur % len(reqIGs)] igCnt[ig] += 1 cur += 1 # Allocate specified number of books from each selected IG booksIG = {} for ig in igCnt.keys(): booksIG[ig] = db.get_books_by_intlv_group( igCnt[ig], (ig, ), exclude=False) # Build list of books using request_interleave pattern self.LCEobj.errno = errno.ENOSPC bookList = [] cur = ig_pos for cnt in range(0, books_needed): ig = reqIGs[cur % len(reqIGs)] assert len(booksIG[ig]) != 0, 'Not enough books remaining in IG' bookList.append(booksIG[ig].pop(0)) cur += 1 # Save current position in interleave_request list db.modify_xattr(self.shelf, self.XATTR_IG_REQ_POS, cur % len(reqIGs)) return bookList