text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Return a set of constraints that define projective measurements. <END_TASK> <USER_TASK:> Description: def projective_measurement_constraints(*parties): """Return a set of constraints that define projective measurements. :param parties: Measurements of different parties. :type A: list or tuple of list of list of :class:`sympy.physics.quantum.operator.HermitianOperator`. :returns: substitutions containing idempotency, orthogonality and commutation relations. """
substitutions = {} # Idempotency and orthogonality of projectors if isinstance(parties[0][0][0], list): parties = parties[0] for party in parties: for measurement in party: for projector1 in measurement: for projector2 in measurement: if projector1 == projector2: substitutions[projector1**2] = projector1 else: substitutions[projector1*projector2] = 0 substitutions[projector2*projector1] = 0 # Projectors commute between parties in a partition for n1 in range(len(parties)): for n2 in range(n1+1, len(parties)): for measurement1 in parties[n1]: for measurement2 in parties[n2]: for projector1 in measurement1: for projector2 in measurement2: substitutions[projector2*projector1] = \ projector1*projector2 return substitutions
<SYSTEM_TASK:> Define a polynomial using measurements and an I matrix describing a Bell <END_TASK> <USER_TASK:> Description: def define_objective_with_I(I, *args): """Define a polynomial using measurements and an I matrix describing a Bell inequality. :param I: The I matrix of a Bell inequality in the Collins-Gisin notation. :type I: list of list of int. :param args: Either the measurements of Alice and Bob or a `Probability` class describing their measurement operators. :type A: tuple of list of list of :class:`sympy.physics.quantum.operator.HermitianOperator` or :class:`ncpol2sdpa.Probability` :returns: :class:`sympy.core.expr.Expr` -- the objective function to be solved as a minimization problem to find the maximum quantum violation. Note that the sign is flipped compared to the Bell inequality. """
objective = I[0][0] if len(args) > 2 or len(args) == 0: raise Exception("Wrong number of arguments!") elif len(args) == 1: A = args[0].parties[0] B = args[0].parties[1] else: A = args[0] B = args[1] i, j = 0, 1 # Row and column index in I for m_Bj in B: # Define first row for Bj in m_Bj: objective += I[i][j] * Bj j += 1 i += 1 for m_Ai in A: for Ai in m_Ai: objective += I[i][0] * Ai j = 1 for m_Bj in B: for Bj in m_Bj: objective += I[i][j] * Ai * Bj j += 1 i += 1 return -objective
<SYSTEM_TASK:> Correlators between the probabilities of two parties. <END_TASK> <USER_TASK:> Description: def correlator(A, B): """Correlators between the probabilities of two parties. :param A: Measurements of Alice. :type A: list of list of :class:`sympy.physics.quantum.operator.HermitianOperator`. :param B: Measurements of Bob. :type B: list of list of :class:`sympy.physics.quantum.operator.HermitianOperator`. :returns: list of correlators. """
correlators = [] for i in range(len(A)): correlator_row = [] for j in range(len(B)): corr = 0 for k in range(len(A[i])): for l in range(len(B[j])): if k == l: corr += A[i][k] * B[j][l] else: corr -= A[i][k] * B[j][l] correlator_row.append(corr) correlators.append(correlator_row) return correlators
<SYSTEM_TASK:> Get the maximum violation of a two-party Bell inequality. <END_TASK> <USER_TASK:> Description: def maximum_violation(A_configuration, B_configuration, I, level, extra=None): """Get the maximum violation of a two-party Bell inequality. :param A_configuration: Measurement settings of Alice. :type A_configuration: list of int. :param B_configuration: Measurement settings of Bob. :type B_configuration: list of int. :param I: The I matrix of a Bell inequality in the Collins-Gisin notation. :type I: list of list of int. :param level: Level of relaxation. :type level: int. :returns: tuple of primal and dual solutions of the SDP relaxation. """
P = Probability(A_configuration, B_configuration) objective = define_objective_with_I(I, P) if extra is None: extramonomials = [] else: extramonomials = P.get_extra_monomials(extra) sdpRelaxation = SdpRelaxation(P.get_all_operators(), verbose=0) sdpRelaxation.get_relaxation(level, objective=objective, substitutions=P.substitutions, extramonomials=extramonomials) solve_sdp(sdpRelaxation) return sdpRelaxation.primal, sdpRelaxation.dual
<SYSTEM_TASK:> Returns by how much two intervals overlap <END_TASK> <USER_TASK:> Description: def interval_overlap(a, b, x, y): """Returns by how much two intervals overlap assumed that a <= b and x <= y"""
if b <= x or a >= y: return 0 elif x <= a <= y: return min(b, y) - a elif x <= b <= y: return b - max(a, x) elif a >= x and b <= y: return b - a else: assert False
<SYSTEM_TASK:> Returns a list of lines, split on the last possible space of each line. <END_TASK> <USER_TASK:> Description: def linesplit(string, columns): # type: (Union[Text, FmtStr], int) -> List[FmtStr] """Returns a list of lines, split on the last possible space of each line. Split spaces will be removed. Whitespaces will be normalized to one space. Spaces will be the color of the first whitespace character of the normalized whitespace. If a word extends beyond the line, wrap it anyway. >>> linesplit(fmtstr(" home is where the heart-eating mummy is", 'blue'), 10) [blue('home')+blue(' ')+blue('is'), blue('where')+blue(' ')+blue('the'), blue('heart-eati'), blue('ng')+blue(' ')+blue('mummy'), blue('is')] """
if not isinstance(string, FmtStr): string = fmtstr(string) string_s = string.s matches = list(re.finditer(r'\s+', string_s)) spaces = [string[m.start():m.end()] for m in matches if m.start() != 0 and m.end() != len(string_s)] words = [string[start:end] for start, end in zip( [0] + [m.end() for m in matches], [m.start() for m in matches] + [len(string_s)]) if start != end] word_to_lines = lambda word: [word[columns*i:columns*(i+1)] for i in range((len(word) - 1) // columns + 1)] lines = word_to_lines(words[0]) for word, space in zip(words[1:], spaces): if len(lines[-1]) + len(word) < columns: lines[-1] += fmtstr(' ', **space.shared_atts) lines[-1] += word else: lines.extend(word_to_lines(word)) return lines
<SYSTEM_TASK:> Returns a kwargs dictionary by turning args into kwargs <END_TASK> <USER_TASK:> Description: def parse_args(args, kwargs): """Returns a kwargs dictionary by turning args into kwargs"""
if 'style' in kwargs: args += (kwargs['style'],) del kwargs['style'] for arg in args: if not isinstance(arg, (bytes, unicode)): raise ValueError("args must be strings:" + repr(args)) if arg.lower() in FG_COLORS: if 'fg' in kwargs: raise ValueError("fg specified twice") kwargs['fg'] = FG_COLORS[arg] elif arg.lower().startswith('on_') and arg[3:].lower() in BG_COLORS: if 'bg' in kwargs: raise ValueError("fg specified twice") kwargs['bg'] = BG_COLORS[arg[3:]] elif arg.lower() in STYLES: kwargs[arg] = True else: raise ValueError("couldn't process arg: "+repr(arg)) for k in kwargs: if k not in ['fg', 'bg'] + list(STYLES.keys()): raise ValueError("Can't apply that transformation") if 'fg' in kwargs: if kwargs['fg'] in FG_COLORS: kwargs['fg'] = FG_COLORS[kwargs['fg']] if kwargs['fg'] not in list(FG_COLORS.values()): raise ValueError("Bad fg value: %r" % kwargs['fg']) if 'bg' in kwargs: if kwargs['bg'] in BG_COLORS: kwargs['bg'] = BG_COLORS[kwargs['bg']] if kwargs['bg'] not in list(BG_COLORS.values()): raise ValueError("Bad bg value: %r" % kwargs['bg']) return kwargs
<SYSTEM_TASK:> FmtStr repr is build by concatenating these. <END_TASK> <USER_TASK:> Description: def repr_part(self): """FmtStr repr is build by concatenating these."""
def pp_att(att): if att == 'fg': return FG_NUMBER_TO_COLOR[self.atts[att]] elif att == 'bg': return 'on_' + BG_NUMBER_TO_COLOR[self.atts[att]] else: return att atts_out = dict((k, v) for (k, v) in self.atts.items() if v) return (''.join(pp_att(att)+'(' for att in sorted(atts_out)) + (repr(self.s) if PY3 else repr(self.s)[1:]) + ')'*len(atts_out))
<SYSTEM_TASK:> Requests a sub-chunk of max_width or shorter. Returns None if no chunks left. <END_TASK> <USER_TASK:> Description: def request(self, max_width): # type: (int) -> Optional[Tuple[int, Chunk]] """Requests a sub-chunk of max_width or shorter. Returns None if no chunks left."""
if max_width < 1: raise ValueError('requires positive integer max_width') s = self.chunk.s length = len(s) if self.internal_offset == len(s): return None width = 0 start_offset = i = self.internal_offset replacement_char = u' ' while True: w = wcswidth(s[i]) # If adding a character puts us over the requested width, return what we've got so far if width + w > max_width: self.internal_offset = i # does not include ith character self.internal_width += width # if not adding it us makes us short, this must have been a double-width character if width < max_width: assert width + 1 == max_width, 'unicode character width of more than 2!?!' assert w == 2, 'unicode character of width other than 2?' return (width + 1, Chunk(s[start_offset:self.internal_offset] + replacement_char, atts=self.chunk.atts)) return (width, Chunk(s[start_offset:self.internal_offset], atts=self.chunk.atts)) # otherwise add this width width += w # If one more char would put us over, return whatever we've got if i + 1 == length: self.internal_offset = i + 1 # beware the fencepost, i is an index not an offset self.internal_width += width return (width, Chunk(s[start_offset:self.internal_offset], atts=self.chunk.atts)) # otherwise attempt to add the next character i += 1
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def from_str(cls, s): # type: (Union[Text, bytes]) -> FmtStr r""" Return a FmtStr representing input. The str() of a FmtStr is guaranteed to produced the same FmtStr. Other input with escape sequences may not be preserved. >>> fmtstr("|"+fmtstr("hey", fg='red', bg='blue')+"|") '|'+on_blue(red('hey'))+'|' >>> fmtstr('|\x1b[31m\x1b[44mhey\x1b[49m\x1b[39m|') '|'+on_blue(red('hey'))+'|' """
if '\x1b[' in s: try: tokens_and_strings = parse(s) except ValueError: return FmtStr(Chunk(remove_ansi(s))) else: chunks = [] cur_fmt = {} for x in tokens_and_strings: if isinstance(x, dict): cur_fmt.update(x) elif isinstance(x, (bytes, unicode)): atts = parse_args('', dict((k, v) for k, v in cur_fmt.items() if v is not None)) chunks.append(Chunk(x, atts=atts)) else: raise Exception("logic error") return FmtStr(*chunks) else: return FmtStr(Chunk(s))
<SYSTEM_TASK:> Copies the current FmtStr's attributes while changing its string. <END_TASK> <USER_TASK:> Description: def copy_with_new_str(self, new_str): """Copies the current FmtStr's attributes while changing its string."""
# What to do when there are multiple Chunks with conflicting atts? old_atts = dict((att, value) for bfs in self.chunks for (att, value) in bfs.atts.items()) return FmtStr(Chunk(new_str, old_atts))
<SYSTEM_TASK:> Returns a new FmtStr with the same content but new formatting <END_TASK> <USER_TASK:> Description: def copy_with_new_atts(self, **attributes): """Returns a new FmtStr with the same content but new formatting"""
return FmtStr(*[Chunk(bfs.s, bfs.atts.extend(attributes)) for bfs in self.chunks])
<SYSTEM_TASK:> Joins an iterable yielding strings or FmtStrs with self as separator <END_TASK> <USER_TASK:> Description: def join(self, iterable): """Joins an iterable yielding strings or FmtStrs with self as separator"""
before = [] chunks = [] for i, s in enumerate(iterable): chunks.extend(before) before = self.chunks if isinstance(s, FmtStr): chunks.extend(s.chunks) elif isinstance(s, (bytes, unicode)): chunks.extend(fmtstr(s).chunks) #TODO just make a chunk directly else: raise TypeError("expected str or FmtStr, %r found" % type(s)) return FmtStr(*chunks)
<SYSTEM_TASK:> Split based on seperator, optionally using a regex <END_TASK> <USER_TASK:> Description: def split(self, sep=None, maxsplit=None, regex=False): """Split based on seperator, optionally using a regex Capture groups are ignored in regex, the whole pattern is matched and used to split the original FmtStr."""
if maxsplit is not None: raise NotImplementedError('no maxsplit yet') s = self.s if sep is None: sep = r'\s+' elif not regex: sep = re.escape(sep) matches = list(re.finditer(sep, s)) return [self[start:end] for start, end in zip( [0] + [m.end() for m in matches], [m.start() for m in matches] + [len(s)])]
<SYSTEM_TASK:> Return a list of lines, split on newline characters, <END_TASK> <USER_TASK:> Description: def splitlines(self, keepends=False): """Return a list of lines, split on newline characters, include line boundaries, if keepends is true."""
lines = self.split('\n') return [line+'\n' for line in lines] if keepends else ( lines if lines[-1] else lines[:-1])
<SYSTEM_TASK:> The number of columns it would take to display this string <END_TASK> <USER_TASK:> Description: def width(self): """The number of columns it would take to display this string"""
if self._width is not None: return self._width self._width = sum(fs.width for fs in self.chunks) return self._width
<SYSTEM_TASK:> Returns the horizontal position of character n of the string <END_TASK> <USER_TASK:> Description: def width_at_offset(self, n): """Returns the horizontal position of character n of the string"""
#TODO make more efficient? width = wcswidth(self.s[:n]) assert width != -1 return width
<SYSTEM_TASK:> Gets atts shared among all nonzero length component Chunk <END_TASK> <USER_TASK:> Description: def shared_atts(self): """Gets atts shared among all nonzero length component Chunk"""
#TODO cache this, could get ugly for large FmtStrs atts = {} first = self.chunks[0] for att in sorted(first.atts): #TODO how to write this without the '???'? if all(fs.atts.get(att, '???') == first.atts[att] for fs in self.chunks if len(fs) > 0): atts[att] = first.atts[att] return atts
<SYSTEM_TASK:> Returns a new FmtStr with the same content but some attributes removed <END_TASK> <USER_TASK:> Description: def new_with_atts_removed(self, *attributes): """Returns a new FmtStr with the same content but some attributes removed"""
return FmtStr(*[Chunk(bfs.s, bfs.atts.remove(*attributes)) for bfs in self.chunks])
<SYSTEM_TASK:> List of indices of divisions between the constituent chunks. <END_TASK> <USER_TASK:> Description: def divides(self): """List of indices of divisions between the constituent chunks."""
acc = [0] for s in self.chunks: acc.append(acc[-1] + len(s)) return acc
<SYSTEM_TASK:> Slice based on the number of columns it would take to display the substring. <END_TASK> <USER_TASK:> Description: def width_aware_slice(self, index): """Slice based on the number of columns it would take to display the substring."""
if wcswidth(self.s) == -1: raise ValueError('bad values for width aware slicing') index = normalize_slice(self.width, index) counter = 0 parts = [] for chunk in self.chunks: if index.start < counter + chunk.width and index.stop > counter: start = max(0, index.start - counter) end = min(index.stop - counter, chunk.width) if end - start == chunk.width: parts.append(chunk) else: s_part = width_aware_slice(chunk.s, max(0, index.start - counter), index.stop - counter) parts.append(Chunk(s_part, chunk.atts)) counter += chunk.width if index.stop < counter: break return FmtStr(*parts) if parts else fmtstr('')
<SYSTEM_TASK:> Split into lines, pushing doublewidth characters at the end of a line to the next line. <END_TASK> <USER_TASK:> Description: def width_aware_splitlines(self, columns): # type: (int) -> Iterator[FmtStr] """Split into lines, pushing doublewidth characters at the end of a line to the next line. When a double-width character is pushed to the next line, a space is added to pad out the line. """
if columns < 2: raise ValueError("Column width %s is too narrow." % columns) if wcswidth(self.s) == -1: raise ValueError('bad values for width aware slicing') return self._width_aware_splitlines(columns)
<SYSTEM_TASK:> Return key pressed from bytes_ or None <END_TASK> <USER_TASK:> Description: def get_key(bytes_, encoding, keynames='curtsies', full=False): """Return key pressed from bytes_ or None Return a key name or None meaning it's an incomplete sequence of bytes (more bytes needed to determine the key pressed) encoding is how the bytes should be translated to unicode - it should match the terminal encoding. keynames is a string describing how keys should be named: * curtsies uses unicode strings like <F8> * curses uses unicode strings similar to those returned by the Python ncurses window.getkey function, like KEY_F(8), plus a nonstandard representation of meta keys (bytes 128-255) because returning the corresponding unicode code point would be indistinguishable from the multibyte sequence that encodes that character in the current encoding * bytes returns the original bytes from stdin (NOT unicode) if full, match a key even if it could be a prefix to another key (useful for detecting a plain escape key for instance, since escape is also a prefix to a bunch of char sequences for other keys) Events are subclasses of Event, or unicode strings Precondition: get_key(prefix, keynames) is None for all proper prefixes of bytes. This means get_key should be called on progressively larger inputs (for 'asdf', first on 'a', then on 'as', then on 'asd' - until a non-None value is returned) """
if not all(isinstance(c, type(b'')) for c in bytes_): raise ValueError("get key expects bytes, got %r" % bytes_) # expects raw bytes if keynames not in ['curtsies', 'curses', 'bytes']: raise ValueError("keynames must be one of 'curtsies', 'curses' or 'bytes'") seq = b''.join(bytes_) if len(seq) > MAX_KEYPRESS_SIZE: raise ValueError('unable to decode bytes %r' % seq) def key_name(): if keynames == 'curses': if seq in CURSES_NAMES: # may not be here (and still not decodable) curses names incomplete return CURSES_NAMES[seq] # Otherwise, there's no special curses name for this try: return seq.decode(encoding) # for normal decodable text or a special curtsies sequence with bytes that can be decoded except UnicodeDecodeError: # this sequence can't be decoded with this encoding, so we need to represent the bytes if len(seq) == 1: return u'x%02X' % ord(seq) #TODO figure out a better thing to return here else: raise NotImplementedError("are multibyte unnameable sequences possible?") return u'bytes: ' + u'-'.join(u'x%02X' % ord(seq[i:i+1]) for i in range(len(seq))) #TODO if this isn't possible, return multiple meta keys as a paste event if paste events enabled elif keynames == 'curtsies': if seq in CURTSIES_NAMES: return CURTSIES_NAMES[seq] return seq.decode(encoding) #assumes that curtsies names are a subset of curses ones else: assert keynames == 'bytes' return seq key_known = seq in CURTSIES_NAMES or seq in CURSES_NAMES or decodable(seq, encoding) if full and key_known: return key_name() elif seq in KEYMAP_PREFIXES or could_be_unfinished_char(seq, encoding): return None # need more input to make up a full keypress elif key_known: return key_name() else: seq.decode(encoding) # this will raise a unicode error (they're annoying to raise ourselves) assert False, 'should have raised an unicode decode error'
<SYSTEM_TASK:> Whether seq bytes might create a char in encoding if more bytes were added <END_TASK> <USER_TASK:> Description: def could_be_unfinished_char(seq, encoding): """Whether seq bytes might create a char in encoding if more bytes were added"""
if decodable(seq, encoding): return False # any sensible encoding surely doesn't require lookahead (right?) # (if seq bytes encoding a character, adding another byte shouldn't also encode something) if encodings.codecs.getdecoder('utf8') is encodings.codecs.getdecoder(encoding): return could_be_unfinished_utf8(seq) elif encodings.codecs.getdecoder('ascii') is encodings.codecs.getdecoder(encoding): return False else: return True
<SYSTEM_TASK:> Returns pretty representation of an Event or keypress <END_TASK> <USER_TASK:> Description: def pp_event(seq): """Returns pretty representation of an Event or keypress"""
if isinstance(seq, Event): return str(seq) # Get the original sequence back if seq is a pretty name already rev_curses = dict((v, k) for k, v in CURSES_NAMES.items()) rev_curtsies = dict((v, k) for k, v in CURTSIES_NAMES.items()) if seq in rev_curses: seq = rev_curses[seq] elif seq in rev_curtsies: seq = rev_curtsies[seq] pretty = curtsies_name(seq) if pretty != seq: return pretty return repr(seq).lstrip('u')[1:-1]
<SYSTEM_TASK:> Return a string of random nouns up to max number <END_TASK> <USER_TASK:> Description: def create_nouns(max=2): """ Return a string of random nouns up to max number """
nouns = [] for noun in range(0, max): nouns.append(random.choice(noun_list)) return " ".join(nouns)
<SYSTEM_TASK:> Create a random valid date <END_TASK> <USER_TASK:> Description: def create_date(past=False, max_years_future=10, max_years_past=10): """ Create a random valid date If past, then dates can be in the past If into the future, then no more than max_years into the future If it's not, then it can't be any older than max_years_past """
if past: start = datetime.datetime.today() - datetime.timedelta(days=max_years_past * 365) #Anywhere between 1980 and today plus max_ears num_days = (max_years_future * 365) + start.day else: start = datetime.datetime.today() num_days = max_years_future * 365 random_days = random.randint(1, num_days) random_date = start + datetime.timedelta(days=random_days) return(random_date)
<SYSTEM_TASK:> Create a random birthday fomr someone between the ages of min_age and max_age <END_TASK> <USER_TASK:> Description: def create_birthday(min_age=18, max_age=80): """ Create a random birthday fomr someone between the ages of min_age and max_age """
age = random.randint(min_age, max_age) start = datetime.date.today() - datetime.timedelta(days=random.randint(0, 365)) return start - datetime.timedelta(days=age * 365)
<SYSTEM_TASK:> Run through some simple examples <END_TASK> <USER_TASK:> Description: def show_examples(): """ Run through some simple examples """
first, last = create_name() add = create_street() zip, city, state = create_city_state_zip() phone = create_phone(zip) print(first, last) print(add) print("{0:s} {1:s} {2:s}".format(city, state, zip)) print(phone) print(create_sentence()) print(create_paragraphs(num=3)) print(create_cc_number()) expiry = create_date(max_years_future=3) print("{0:%m/%y}".format(expiry)) print(create_email(name=(first, last))) print("Password: {0:s}".format(create_pw())) print(create_company_name()) print(create_job_title()) print("Born on {0:%m/%d/%Y}".format(create_birthday()))
<SYSTEM_TASK:> MOSEK requires a specific sparse format to define the lower-triangular <END_TASK> <USER_TASK:> Description: def convert_to_mosek_index(block_struct, row_offsets, block_offsets, row): """MOSEK requires a specific sparse format to define the lower-triangular part of a symmetric matrix. This function does the conversion from the sparse upper triangular matrix format of Ncpol2SDPA. """
block_index, i, j = convert_row_to_sdpa_index(block_struct, row_offsets, row) offset = block_offsets[block_index] ci = offset + i cj = offset + j return cj, ci
<SYSTEM_TASK:> Converts the entire sparse representation of the Fi constraint matrices <END_TASK> <USER_TASK:> Description: def convert_to_mosek_matrix(sdp): """Converts the entire sparse representation of the Fi constraint matrices to sparse MOSEK matrices. """
barci = [] barcj = [] barcval = [] barai = [] baraj = [] baraval = [] for k in range(sdp.n_vars): barai.append([]) baraj.append([]) baraval.append([]) row_offsets = [0] block_offsets = [0] cumulative_sum = 0 cumulative_square_sum = 0 for block_size in sdp.block_struct: cumulative_sum += block_size cumulative_square_sum += block_size ** 2 row_offsets.append(cumulative_square_sum) block_offsets.append(cumulative_sum) for row in range(len(sdp.F.rows)): if len(sdp.F.rows[row]) > 0: col_index = 0 for k in sdp.F.rows[row]: value = sdp.F.data[row][col_index] i, j = convert_to_mosek_index(sdp.block_struct, row_offsets, block_offsets, row) if k > 0: barai[k - 1].append(i) baraj[k - 1].append(j) baraval[k - 1].append(-value) else: barci.append(i) barcj.append(j) barcval.append(value) col_index += 1 return barci, barcj, barcval, barai, baraj, baraval
<SYSTEM_TASK:> Convert an SDP relaxation to a MOSEK task. <END_TASK> <USER_TASK:> Description: def convert_to_mosek(sdp): """Convert an SDP relaxation to a MOSEK task. :param sdp: The SDP relaxation to convert. :type sdp: :class:`ncpol2sdpa.sdp`. :returns: :class:`mosek.Task`. """
import mosek # Cheat when variables are complex and convert with PICOS if sdp.complex_matrix: from .picos_utils import convert_to_picos Problem = convert_to_picos(sdp).to_real() Problem._make_mosek_instance() task = Problem.msk_task if sdp.verbose > 0: task.set_Stream(mosek.streamtype.log, streamprinter) return task barci, barcj, barcval, barai, baraj, baraval = \ convert_to_mosek_matrix(sdp) bkc = [mosek.boundkey.fx] * sdp.n_vars blc = [-v for v in sdp.obj_facvar] buc = [-v for v in sdp.obj_facvar] env = mosek.Env() task = env.Task(0, 0) if sdp.verbose > 0: task.set_Stream(mosek.streamtype.log, streamprinter) numvar = 0 numcon = len(bkc) BARVARDIM = [sum(sdp.block_struct)] task.appendvars(numvar) task.appendcons(numcon) task.appendbarvars(BARVARDIM) for i in range(numcon): task.putconbound(i, bkc[i], blc[i], buc[i]) symc = task.appendsparsesymmat(BARVARDIM[0], barci, barcj, barcval) task.putbarcj(0, [symc], [1.0]) for i in range(len(barai)): syma = task.appendsparsesymmat(BARVARDIM[0], barai[i], baraj[i], baraval[i]) task.putbaraij(i, 0, [syma], [1.0]) # Input the objective sense (minimize/maximize) task.putobjsense(mosek.objsense.minimize) return task
<SYSTEM_TASK:> If ``text`` is not empty, append a new Text node to the most recent <END_TASK> <USER_TASK:> Description: def _add_text(self, text): """ If ``text`` is not empty, append a new Text node to the most recent pending node, if there is any, or to the new nodes, if there are no pending nodes. """
if text: if self.pending_nodes: self.pending_nodes[-1].append(nodes.Text(text)) else: self.new_nodes.append(nodes.Text(text))
<SYSTEM_TASK:> Returns two FSArrays with differences underlined <END_TASK> <USER_TASK:> Description: def diff(cls, a, b, ignore_formatting=False): """Returns two FSArrays with differences underlined"""
def underline(x): return u'\x1b[4m%s\x1b[0m' % (x,) def blink(x): return u'\x1b[5m%s\x1b[0m' % (x,) a_rows = [] b_rows = [] max_width = max([len(row) for row in a] + [len(row) for row in b]) a_lengths = [] b_lengths = [] for a_row, b_row in zip(a, b): a_lengths.append(len(a_row)) b_lengths.append(len(b_row)) extra_a = u'`' * (max_width - len(a_row)) extra_b = u'`' * (max_width - len(b_row)) a_line = u'' b_line = u'' for a_char, b_char in zip(a_row + extra_a, b_row + extra_b): if ignore_formatting: a_char_for_eval = a_char.s if isinstance(a_char, FmtStr) else a_char b_char_for_eval = b_char.s if isinstance(b_char, FmtStr) else b_char else: a_char_for_eval = a_char b_char_for_eval = b_char if a_char_for_eval == b_char_for_eval: a_line += actualize(a_char) b_line += actualize(b_char) else: a_line += underline(blink(actualize(a_char))) b_line += underline(blink(actualize(b_char))) a_rows.append(a_line) b_rows.append(b_line) hdiff = '\n'.join(a_line + u' %3d | %3d ' % (a_len, b_len) + b_line for a_line, b_line, a_len, b_len in zip(a_rows, b_rows, a_lengths, b_lengths)) return hdiff
<SYSTEM_TASK:> Decide what method to use for paging through text. <END_TASK> <USER_TASK:> Description: def pager(text, color=None): """Decide what method to use for paging through text."""
stdout = _default_text_stdout() if not isatty(sys.stdin) or not isatty(stdout): return _nullpager(stdout, text, color) if 'PAGER' in os.environ: if WIN: return _tempfilepager(text, os.environ['PAGER'], color) return _pipepager(text, os.environ['PAGER'], color) if os.environ.get('TERM') in ('dumb', 'emacs'): return _nullpager(stdout, text, color) if WIN or sys.platform.startswith('os2'): return _tempfilepager(text, 'more <', color) if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0: return _pipepager(text, 'less', color) import tempfile fd, filename = tempfile.mkstemp() os.close(fd) try: if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0: return _pipepager(text, 'more', color) return _nullpager(stdout, text, color) finally: os.unlink(filename)
<SYSTEM_TASK:> Backwards-compatibility for the old retries format. <END_TASK> <USER_TASK:> Description: def from_int(cls, retries, redirect=True, default=None): """ Backwards-compatibility for the old retries format."""
if retries is None: retries = default if default is not None else cls.DEFAULT if isinstance(retries, Retry): return retries redirect = bool(redirect) and None new_retries = cls(retries, redirect=redirect) log.debug("Converted retries value: %r -> %r" % (retries, new_retries)) return new_retries
<SYSTEM_TASK:> Converts a callable or python ty into the most appropriate param <END_TASK> <USER_TASK:> Description: def convert_type(ty, default=None): """Converts a callable or python ty into the most appropriate param ty. """
if isinstance(ty, tuple): return Tuple(ty) if isinstance(ty, ParamType): return ty guessed_type = False if ty is None and default is not None: ty = type(default) guessed_type = True if ty is text_type or ty is str or ty is None: return STRING if ty is int: return INT # Booleans are only okay if not guessed. This is done because for # flags the default value is actually a bit of a lie in that it # indicates which of the flags is the one we want. See get_default() # for more information. if ty is bool and not guessed_type: return BOOL if ty is float: return FLOAT if guessed_type: return STRING # Catch a common mistake if __debug__: try: if issubclass(ty, ParamType): raise AssertionError('Attempted to use an uninstantiated ' 'parameter type (%s).' % ty) except TypeError: pass return FuncParamType(ty)
<SYSTEM_TASK:> Create new default preset <END_TASK> <USER_TASK:> Description: def new(preset, name, silent, update): """Create new default preset \b Usage: $ be new ad "blue_unicorn" created $ be new film --name spiderman "spiderman" created """
if self.isactive(): lib.echo("Please exit current preset before starting a new") sys.exit(lib.USER_ERROR) if not name: count = 0 name = lib.random_name() while name in _extern.projects(): if count > 10: lib.echo("ERROR: Couldn't come up with a unique name :(") sys.exit(lib.USER_ERROR) name = lib.random_name() count += 1 project_dir = lib.project_dir(_extern.cwd(), name) if os.path.exists(project_dir): lib.echo("\"%s\" already exists" % name) sys.exit(lib.USER_ERROR) username, preset = ([None] + preset.split("/", 1))[-2:] presets_dir = _extern.presets_dir() preset_dir = os.path.join(presets_dir, preset) # Is the preset given referring to a repository directly? relative = False if username else True try: if not update and preset in _extern.local_presets(): _extern.copy_preset(preset_dir, project_dir) else: lib.echo("Finding preset for \"%s\".. " % preset, silent) time.sleep(1 if silent else 0) if relative: # Preset is relative, look it up from the Hub presets = _extern.github_presets() if preset not in presets: sys.stdout.write("\"%s\" not found" % preset) sys.exit(lib.USER_ERROR) time.sleep(1 if silent else 0) repository = presets[preset] else: # Absolute paths are pulled directly repository = username + "/" + preset lib.echo("Pulling %s.. " % repository, silent) repository = _extern.fetch_release(repository) # Remove existing preset if preset in _extern.local_presets(): _extern.remove_preset(preset) try: _extern.pull_preset(repository, preset_dir) except IOError as e: lib.echo("ERROR: Sorry, something went wrong.\n" "Use be --verbose for more") lib.echo(e) sys.exit(lib.USER_ERROR) try: _extern.copy_preset(preset_dir, project_dir) finally: # Absolute paths are not stored locally if not relative: _extern.remove_preset(preset) except IOError as exc: if self.verbose: lib.echo("ERROR: %s" % exc) else: lib.echo("ERROR: Could not write, do you have permission?") sys.exit(lib.PROGRAM_ERROR) lib.echo("\"%s\" created" % name, silent)
<SYSTEM_TASK:> Update a local preset <END_TASK> <USER_TASK:> Description: def update(preset, clean): """Update a local preset This command will cause `be` to pull a preset already available locally. \b Usage: $ be update ad Updating "ad".. """
if self.isactive(): lib.echo("ERROR: Exit current project first") sys.exit(lib.USER_ERROR) presets = _extern.github_presets() if preset not in presets: sys.stdout.write("\"%s\" not found" % preset) sys.exit(lib.USER_ERROR) lib.echo("Are you sure you want to update \"%s\", " "any changes will be lost?: [y/N]: ", newline=False) if raw_input().lower() in ("y", "yes"): presets_dir = _extern.presets_dir() preset_dir = os.path.join(presets_dir, preset) repository = presets[preset] if clean: try: _extern.remove_preset(preset) except: lib.echo("ERROR: Could not clean existing preset") sys.exit(lib.USER_ERROR) lib.echo("Updating %s.. " % repository) try: _extern.pull_preset(repository, preset_dir) except IOError as e: lib.echo(e) sys.exit(lib.USER_ERROR) else: lib.echo("Cancelled")
<SYSTEM_TASK:> Utility sub-command for tabcompletion <END_TASK> <USER_TASK:> Description: def tab(topics, complete): """Utility sub-command for tabcompletion This command is meant to be called by a tab completion function and is given a the currently entered topics, along with a boolean indicating whether or not the last entered argument is complete. """
# Discard `be tab` topics = list(topics)[2:] # When given an incomplete argument, # the argument is *sometimes* returned twice (?) # .. note:: Seen in Git Bash on Windows # $ be in giant [TAB] # -> ['giant'] # $ be in gi[TAB] # -> ['gi', 'gi'] if len(topics) > 1 and topics[-1] == topics[-2]: topics.pop() # Suggest projects if len(topics) == 0: projects = lib.list_projects(root=_extern.cwd()) sys.stdout.write(" ".join(projects)) elif len(topics) == 1: project = topics[0] projects = lib.list_projects(root=_extern.cwd()) # Complete project if not complete: projects = [i for i in projects if i.startswith(project)] sys.stdout.write(" ".join(projects)) else: # Suggest items from inventory inventory = _extern.load_inventory(project) inventory = lib.list_inventory(inventory) items = [i for i, b in inventory] sys.stdout.write(" ".join(items)) else: project, item = topics[:2] # Complete inventory item if len(topics) == 2 and not complete: inventory = _extern.load_inventory(project) inventory = lib.list_inventory(inventory) items = [i for i, b in inventory] items = [i for i in items if i.startswith(item)] sys.stdout.write(" ".join(items)) # Suggest items from template else: try: be = _extern.load_be(project) templates = _extern.load_templates(project) inventory = _extern.load_inventory(project) item = topics[-1] items = lib.list_template(root=_extern.cwd(), topics=topics, templates=templates, inventory=inventory, be=be) if not complete: items = lib.list_template(root=_extern.cwd(), topics=topics[:-1], templates=templates, inventory=inventory, be=be) items = [i for i in items if i.startswith(item)] sys.stdout.write(" ".join(items) + " ") else: sys.stdout.write(" ".join(items) + " ") except IndexError: sys.exit(lib.NORMAL)
<SYSTEM_TASK:> Enter into an environment with support for tab-completion <END_TASK> <USER_TASK:> Description: def activate(): """Enter into an environment with support for tab-completion This command drops you into a subshell, similar to the one generated via `be in ...`, except no topic is present and instead it enables tab-completion for supported shells. See documentation for further information. https://github.com/mottosso/be/wiki/cli """
parent = lib.parent() try: cmd = lib.cmd(parent) except SystemError as exc: lib.echo(exc) sys.exit(lib.PROGRAM_ERROR) # Store reference to calling shell context = lib.context(root=_extern.cwd()) context["BE_SHELL"] = parent if lib.platform() == "unix": context["BE_TABCOMPLETION"] = os.path.join( os.path.dirname(__file__), "_autocomplete.sh").replace("\\", "/") context.pop("BE_ACTIVE", None) sys.exit(subprocess.call(cmd, env=context))
<SYSTEM_TASK:> List contents of current context <END_TASK> <USER_TASK:> Description: def ls(topics): """List contents of current context \b Usage: $ be ls - spiderman - hulk $ be ls spiderman - peter - mjay $ be ls spiderman seq01 - 1000 - 2000 - 2500 Return codes: 0 Normal 2 When insufficient arguments are supplied, or a template is unsupported. """
if self.isactive(): lib.echo("ERROR: Exit current project first") sys.exit(lib.USER_ERROR) # List projects if len(topics) == 0: for project in lib.list_projects(root=_extern.cwd()): lib.echo("- %s (project)" % project) sys.exit(lib.NORMAL) # List inventory of project elif len(topics) == 1: inventory = _extern.load_inventory(topics[0]) for item, binding in lib.list_inventory(inventory): lib.echo("- %s (%s)" % (item, binding)) sys.exit(lib.NORMAL) # List specific portion of template else: try: project = topics[0] be = _extern.load_be(project) templates = _extern.load_templates(project) inventory = _extern.load_inventory(project) for item in lib.list_template(root=_extern.cwd(), topics=topics, templates=templates, inventory=inventory, be=be): lib.echo("- %s" % item) except IndexError as exc: lib.echo(exc) sys.exit(lib.USER_ERROR) sys.exit(lib.NORMAL)
<SYSTEM_TASK:> Create directory with template for topic of the current environment <END_TASK> <USER_TASK:> Description: def mkdir(dir, enter): """Create directory with template for topic of the current environment """
if not os.path.exists(dir): os.makedirs(dir)
<SYSTEM_TASK:> Print current environment <END_TASK> <USER_TASK:> Description: def dump(): """Print current environment Environment is outputted in a YAML-friendly format \b Usage: $ be dump Prefixed: - BE_TOPICS=hulk bruce animation - ... """
if not self.isactive(): lib.echo("ERROR: Enter a project first") sys.exit(lib.USER_ERROR) # Print custom environment variables first custom = sorted(os.environ.get("BE_ENVIRONMENT", "").split()) if custom: lib.echo("Custom:") for key in custom: lib.echo("- %s=%s" % (key, os.environ.get(key))) # Then print redirected variables project = os.environ["BE_PROJECT"] root = os.environ["BE_PROJECTSROOT"] be = _extern.load(project, "be", optional=True, root=root) redirect = be.get("redirect", {}).items() if redirect: lib.echo("\nRedirect:") for map_source, map_dest in sorted(redirect): lib.echo("- %s=%s" % (map_dest, os.environ.get(map_dest))) # And then everything else prefixed = dict((k, v) for k, v in os.environ.iteritems() if k.startswith("BE_")) if prefixed: lib.echo("\nPrefixed:") for key in sorted(prefixed): if not key.startswith("BE_"): continue lib.echo("- %s=%s" % (key, os.environ.get(key))) sys.exit(lib.NORMAL)
<SYSTEM_TASK:> Return a string representing the default user agent. <END_TASK> <USER_TASK:> Description: def default_user_agent(name="python-requests"): """Return a string representing the default user agent."""
_implementation = platform.python_implementation() if _implementation == 'CPython': _implementation_version = platform.python_version() elif _implementation == 'PyPy': _implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major, sys.pypy_version_info.minor, sys.pypy_version_info.micro) if sys.pypy_version_info.releaselevel != 'final': _implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel]) elif _implementation == 'Jython': _implementation_version = platform.python_version() # Complete Guess elif _implementation == 'IronPython': _implementation_version = platform.python_version() # Complete Guess else: _implementation_version = 'Unknown' try: p_system = platform.system() p_release = platform.release() except IOError: p_system = 'Unknown' p_release = 'Unknown' return " ".join(['%s/%s' % (name, __version__), '%s/%s' % (_implementation, _implementation_version), '%s/%s' % (p_system, p_release)])
<SYSTEM_TASK:> Given a string object, regardless of type, returns a representation of that <END_TASK> <USER_TASK:> Description: def to_native_string(string, encoding='ascii'): """ Given a string object, regardless of type, returns a representation of that string in the native string type, encoding and decoding where necessary. This assumes ASCII unless told otherwise. """
out = None if isinstance(string, builtin_str): out = string else: if is_py2: out = string.encode(encoding) else: out = string.decode(encoding) return out
<SYSTEM_TASK:> List topic from external datastore <END_TASK> <USER_TASK:> Description: def ls(*topic, **kwargs): """List topic from external datastore Arguments: topic (str): One or more topics, e.g. ("project", "item", "task") root (str, optional): Absolute path to where projects reside, defaults to os.getcwd() backend (callable, optional): Function to call with absolute path as argument to retrieve children. Defaults to os.listdir absolute (bool, optional): Whether to return relative or absolute paths Example: >> ls() /projects/thedeal /projects/hulk >> ls("thedeal") /projects/thedeal/assets/ben /projects/thedeal/assets/table >> ls("thedeal", "ben") /projects/thedeal/assets/ben/rigging /projects/thedeal/assets/ben/modeling """
context = dump() root = kwargs.get("root") or context.get("cwd") or os.getcwd() backend = kwargs.get("backend", os.listdir) absolute = kwargs.get("absolute", True) content = { 0: "projects", 1: "inventory", 2: "template" }[min(2, len(topic))] # List projects if content == "projects": projects = lib.list_projects(root=root, backend=backend) if absolute: return map(lambda p: os.path.join(root, p), projects) else: return projects # List items if content == "inventory": project = topic[0] be = _extern.load(project, "be", root=root) inventory = _extern.load(project, "inventory", root=root) inventory = lib.invert_inventory(inventory) templates = _extern.load(project, "templates", root=root) if absolute: paths = list() for item, binding in inventory.iteritems(): template = templates.get(binding) index = len(topic) sliced = lib.slice(index, template) paths.append(sliced.format(*(topic + (item,)), **context)) return paths else: return inventory.keys() # List template if content == "template": project = topic[0] be = _extern.load(project, "be", root=root) templates = _extern.load(project, "templates", root=root) inventory = _extern.load(project, "inventory", root=root) return lib.list_template(root=root, topics=topic, templates=templates, inventory=inventory, be=be, absolute=absolute)
<SYSTEM_TASK:> Dump current environment as a dictionary <END_TASK> <USER_TASK:> Description: def dump(context=os.environ): """Dump current environment as a dictionary Arguments: context (dict, optional): Current context, defaults to the current environment. """
output = {} for key, value in context.iteritems(): if not key.startswith("BE_"): continue output[key[3:].lower()] = value return output
<SYSTEM_TASK:> Return the be current working directory <END_TASK> <USER_TASK:> Description: def cwd(): """Return the be current working directory"""
cwd = os.environ.get("BE_CWD") if cwd and not os.path.isdir(cwd): sys.stderr.write("ERROR: %s is not a directory" % cwd) sys.exit(lib.USER_ERROR) return cwd or os.getcwd().replace("\\", "/")
<SYSTEM_TASK:> Write script to a temporary directory <END_TASK> <USER_TASK:> Description: def write_script(script, tempdir): """Write script to a temporary directory Arguments: script (list): Commands which to put into a file Returns: Absolute path to script """
name = "script" + self.suffix path = os.path.join(tempdir, name) with open(path, "w") as f: f.write("\n".join(script)) return path
<SYSTEM_TASK:> Write aliases to temporary directory <END_TASK> <USER_TASK:> Description: def write_aliases(aliases, tempdir): """Write aliases to temporary directory Arguments: aliases (dict): {name: value} dict of aliases tempdir (str): Absolute path to where aliases will be stored """
platform = lib.platform() if platform == "unix": home_alias = "cd $BE_DEVELOPMENTDIR" else: home_alias = "cd %BE_DEVELOPMENTDIR%" aliases["home"] = home_alias tempdir = os.path.join(tempdir, "aliases") os.makedirs(tempdir) for alias, cmd in aliases.iteritems(): path = os.path.join(tempdir, alias) if platform == "windows": path += ".bat" with open(path, "w") as f: f.write(cmd) if platform == "unix": # Make executable st = os.stat(path) os.chmod(path, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) return tempdir
<SYSTEM_TASK:> Evaluate whether gist is a be package <END_TASK> <USER_TASK:> Description: def _gist_is_preset(repo): """Evaluate whether gist is a be package Arguments: gist (str): username/id pair e.g. mottosso/2bb4651a05af85711cde """
_, gistid = repo.split("/") gist_template = "https://api.github.com/gists/{}" gist_path = gist_template.format(gistid) response = get(gist_path) if response.status_code == 404: return False try: data = response.json() except: return False files = data.get("files", {}) package = files.get("package.json", {}) try: content = json.loads(package.get("content", "")) except: return False if content.get("type") != "bepreset": return False return True
<SYSTEM_TASK:> Evaluate whether GitHub repository is a be package <END_TASK> <USER_TASK:> Description: def _repo_is_preset(repo): """Evaluate whether GitHub repository is a be package Arguments: gist (str): username/id pair e.g. mottosso/be-ad """
package_template = "https://raw.githubusercontent.com" package_template += "/{repo}/master/package.json" package_path = package_template.format(repo=repo) response = get(package_path) if response.status_code == 404: return False try: data = response.json() except: return False if not data.get("type") == "bepreset": return False return True
<SYSTEM_TASK:> Copy contents of preset into new project <END_TASK> <USER_TASK:> Description: def copy_preset(preset_dir, project_dir): """Copy contents of preset into new project If package.json contains the key "contents", limit the files copied to those present in this list. Arguments: preset_dir (str): Absolute path to preset project_dir (str): Absolute path to new project """
os.makedirs(project_dir) package_file = os.path.join(preset_dir, "package.json") with open(package_file) as f: package = json.load(f) for fname in os.listdir(preset_dir): src = os.path.join(preset_dir, fname) contents = package.get("contents") or [] if fname not in self.files + contents: continue if os.path.isfile(src): shutil.copy2(src, project_dir) else: dest = os.path.join(project_dir, fname) shutil.copytree(src, dest)
<SYSTEM_TASK:> Prints a message plus a newline to the given file or stdout. On <END_TASK> <USER_TASK:> Description: def echo(message=None, file=None, nl=True, err=False, color=None): """Prints a message plus a newline to the given file or stdout. On first sight, this looks like the print function, but it has improved support for handling Unicode and binary data that does not fail no matter how badly configured the system is. Primarily it means that you can print binary data as well as Unicode data on both 2.x and 3.x to the given file in the most appropriate way possible. This is a very carefree function as in that it will try its best to not fail. In addition to that, if `colorama`_ is installed, the echo function will also support clever handling of ANSI codes. Essentially it will then do the following: - add transparent handling of ANSI color codes on Windows. - hide ANSI codes automatically if the destination file is not a terminal. .. _colorama: http://pypi.python.org/pypi/colorama .. versionchanged:: 2.0 Starting with version 2.0 of Click, the echo function will work with colorama if it's installed. .. versionadded:: 3.0 The `err` parameter was added. .. versionchanged:: 4.0 Added the `color` flag. :param message: the message to print :param file: the file to write to (defaults to ``stdout``) :param err: if set to true the file defaults to ``stderr`` instead of ``stdout``. This is faster and easier than calling :func:`get_text_stderr` yourself. :param nl: if set to `True` (the default) a newline is printed afterwards. :param color: controls if the terminal supports ANSI colors or not. The default is autodetection. """
if file is None: if err: file = _default_text_stderr() else: file = _default_text_stdout() # Convert non bytes/text into the native string type. if message is not None and not isinstance(message, echo_native_types): message = text_type(message) # If there is a message, and we're in Python 3, and the value looks # like bytes, we manually need to find the binary stream and write the # message in there. This is done separately so that most stream # types will work as you would expect. Eg: you can write to StringIO # for other cases. if message and not PY2 and is_bytes(message): binary_file = _find_binary_writer(file) if binary_file is not None: file.flush() binary_file.write(message) if nl: binary_file.write(b'\n') binary_file.flush() return # ANSI-style support. If there is no message or we are dealing with # bytes nothing is happening. If we are connected to a file we want # to strip colors. If we are on windows we either wrap the stream # to strip the color or we use the colorama support to translate the # ansi codes to API calls. if message and not is_bytes(message): if should_strip_ansi(file, color): message = strip_ansi(message) elif WIN: if auto_wrap_for_ansi is not None: file = auto_wrap_for_ansi(file) elif not color: message = strip_ansi(message) if message: file.write(message) if nl: file.write('\n') file.flush()
<SYSTEM_TASK:> Determine subshell matching the currently running shell <END_TASK> <USER_TASK:> Description: def parent(): """Determine subshell matching the currently running shell The shell is determined by either a pre-defined BE_SHELL environment variable, or, if none is found, via psutil which looks at the parent process directly through system-level calls. For example, is `be` is run from cmd.exe, then the full path to cmd.exe is returned, and the same goes for bash.exe and bash (without suffix) for Unix environments. The point is to return an appropriate subshell for the running shell, as opposed to the currently running OS. """
if self._parent: return self._parent if "BE_SHELL" in os.environ: self._parent = os.environ["BE_SHELL"] else: # If a shell is not provided, rely on `psutil` # to look at the calling process name. try: import psutil except ImportError: raise ImportError( "No shell provided, see documentation for " "BE_SHELL for more information.\n" "https://github.com/mottosso/be/wiki" "/environment#read-environment-variables") parent = psutil.Process(os.getpid()).parent() # `pip install` creates an additional executable # that tricks the above mechanism to think of it # as the parent shell. See #34 for more. if parent.name() in ("be", "be.exe"): parent = parent.parent() self._parent = str(parent.exe()) return self._parent
<SYSTEM_TASK:> Determine subshell command for subprocess.call <END_TASK> <USER_TASK:> Description: def cmd(parent): """Determine subshell command for subprocess.call Arguments: parent (str): Absolute path to parent shell executable """
shell_name = os.path.basename(parent).rsplit(".", 1)[0] dirname = os.path.dirname(__file__) # Support for Bash if shell_name in ("bash", "sh"): shell = os.path.join(dirname, "_shell.sh").replace("\\", "/") cmd = [parent.replace("\\", "/"), shell] # Support for Cmd elif shell_name in ("cmd",): shell = os.path.join(dirname, "_shell.bat").replace("\\", "/") cmd = [parent, "/K", shell] # Support for Powershell elif shell_name in ("powershell",): raise SystemError("Powershell not yet supported") # Unsupported else: raise SystemError("Unsupported shell: %s" % shell_name) return cmd
<SYSTEM_TASK:> Produce the be environment <END_TASK> <USER_TASK:> Description: def context(root, project=""): """Produce the be environment The environment is an exact replica of the active environment of the current process, with a few additional variables, all of which are listed below. """
environment = os.environ.copy() environment.update({ "BE_PROJECT": project, "BE_PROJECTROOT": ( os.path.join(root, project).replace("\\", "/") if project else ""), "BE_PROJECTSROOT": root, "BE_ALIASDIR": "", "BE_CWD": root, "BE_CD": "", "BE_ROOT": "", "BE_TOPICS": "", "BE_DEVELOPMENTDIR": "", "BE_ACTIVE": "1", "BE_USER": "", "BE_SCRIPT": "", "BE_PYTHON": "", "BE_ENTER": "", "BE_TEMPDIR": "", "BE_PRESETSDIR": "", "BE_GITHUB_API_TOKEN": "", "BE_ENVIRONMENT": "", "BE_BINDING": "", "BE_TABCOMPLETION": "" }) return environment
<SYSTEM_TASK:> Return whether or not `path` is a project <END_TASK> <USER_TASK:> Description: def isproject(path): """Return whether or not `path` is a project Arguments: path (str): Absolute path """
try: if os.path.basename(path)[0] in (".", "_"): return False if not os.path.isdir(path): return False if not any(fname in os.listdir(path) for fname in ("templates.yaml", "inventory.yaml")): return False except: return False return True
<SYSTEM_TASK:> Print to the console <END_TASK> <USER_TASK:> Description: def echo(text, silent=False, newline=True): """Print to the console Arguments: text (str): Text to print to the console silen (bool, optional): Whether or not to produce any output newline (bool, optional): Whether or not to append a newline. """
if silent: return print(text) if newline else sys.stdout.write(text)
<SYSTEM_TASK:> List projects at `root` <END_TASK> <USER_TASK:> Description: def list_projects(root, backend=os.listdir): """List projects at `root` Arguments: root (str): Absolute path to the `be` root directory, typically the current working directory. """
projects = list() for project in sorted(backend(root)): abspath = os.path.join(root, project) if not isproject(abspath): continue projects.append(project) return projects
<SYSTEM_TASK:> List a projects inventory <END_TASK> <USER_TASK:> Description: def list_inventory(inventory): """List a projects inventory Given a project, simply list the contents of `inventory.yaml` Arguments: root (str): Absolute path to the `be` root directory, typically the current working directory. inventory (dict): inventory.yaml """
inverted = invert_inventory(inventory) items = list() for item in sorted(inverted, key=lambda a: (inverted[a], a)): items.append((item, inverted[item])) return items
<SYSTEM_TASK:> List contents for resolved template <END_TASK> <USER_TASK:> Description: def list_template(root, topics, templates, inventory, be, absolute=False): """List contents for resolved template Resolve a template as far as possible via the given `topics`. For example, if a template supports 5 arguments, but only 3 are given, resolve the template until its 4th argument and list the contents thereafter. In some cases, an additional path is present following an argument, e.g. {3}/assets. The `/assets` portion is referred to as the "tail" and is appended also. Arguments: topics (tuple): Current topics templates (dict): templates.yaml inventory (dict): inventory.yaml be (dict): be.yaml """
project = topics[0] # Get item try: key = be.get("templates", {}).get("key") or "{1}" item = item_from_topics(key, topics) binding = binding_from_item(inventory, item) except KeyError: return [] except IndexError as exc: raise IndexError("At least %s topics are required" % str(exc)) fields = replacement_fields_from_context( context(root, project)) binding = binding_from_item(inventory, item) pattern = pattern_from_template(templates, binding) # 2 arguments, {1}/{2}/{3} -> {1}/{2} # 2 arguments, {1}/{2}/assets/{3} -> {1}/{2}/assets index_end = pattern.index(str(len(topics)-1)) + 2 trimmed_pattern = pattern[:index_end] # If there aren't any more positional arguments, we're done print trimmed_pattern if not re.findall("{[\d]+}", pattern[index_end:]): return [] # Append trail # e.g. {1}/{2}/assets # ^^^^^^^ try: index_trail = pattern[index_end:].index("{") trail = pattern[index_end:index_end + index_trail - 1] trimmed_pattern += trail except ValueError: pass try: path = trimmed_pattern.format(*topics, **fields) except IndexError: raise IndexError("Template for \"%s\" has unordered " "positional arguments: \"%s\"" % (item, pattern)) if not os.path.isdir(path): return [] items = list() for dirname in os.listdir(path): abspath = os.path.join(path, dirname).replace("\\", "/") if not os.path.isdir(abspath): continue if absolute: items.append(abspath) else: items.append(dirname) return items
<SYSTEM_TASK:> Convert context replacement fields <END_TASK> <USER_TASK:> Description: def replacement_fields_from_context(context): """Convert context replacement fields Example: BE_KEY=value -> {"key": "value} Arguments: context (dict): The current context """
return dict((k[3:].lower(), context[k]) for k in context if k.startswith("BE_"))
<SYSTEM_TASK:> Get binding from `topics` via `key` <END_TASK> <USER_TASK:> Description: def item_from_topics(key, topics): """Get binding from `topics` via `key` Example: {0} == hello --> be in hello world {1} == world --> be in hello world Returns: Single topic matching the key Raises: IndexError (int): With number of required arguments for the key """
if re.match("{\d+}", key): pos = int(key.strip("{}")) try: binding = topics[pos] except IndexError: raise IndexError(pos + 1) else: echo("be.yaml template key not recognised") sys.exit(PROJECT_ERROR) return binding
<SYSTEM_TASK:> Return pattern for name <END_TASK> <USER_TASK:> Description: def pattern_from_template(templates, name): """Return pattern for name Arguments: templates (dict): Current templates name (str): Name of name """
if name not in templates: echo("No template named \"%s\"" % name) sys.exit(1) return templates[name]
<SYSTEM_TASK:> Return binding for `item` <END_TASK> <USER_TASK:> Description: def binding_from_item(inventory, item): """Return binding for `item` Example: asset: - myasset The binding is "asset" Arguments: project: Name of project item (str): Name of item """
if item in self.bindings: return self.bindings[item] bindings = invert_inventory(inventory) try: self.bindings[item] = bindings[item] return bindings[item] except KeyError as exc: exc.bindings = bindings raise exc
<SYSTEM_TASK:> Resolve the be.yaml environment key <END_TASK> <USER_TASK:> Description: def parse_environment(fields, context, topics): """Resolve the be.yaml environment key Features: - Lists, e.g. ["/path1", "/path2"] - Environment variable references, via $ - Replacement field references, e.g. {key} - Topic references, e.g. {1} """
def _resolve_environment_lists(context): """Concatenate environment lists""" for key, value in context.copy().iteritems(): if isinstance(value, list): context[key] = os.pathsep.join(value) return context def _resolve_environment_references(fields, context): """Resolve $ occurences by expansion Given a dictionary {"PATH": "$PATH;somevalue;{0}"} Return {"PATH": "value_of_PATH;somevalue;myproject"}, given that the first topic - {0} - is "myproject" Arguments: fields (dict): Environment from be.yaml context (dict): Source context """ def repl(match): key = pattern[match.start():match.end()].strip("$") return context.get(key) pat = re.compile("\$\w+", re.IGNORECASE) for key, pattern in fields.copy().iteritems(): fields[key] = pat.sub(repl, pattern) \ .strip(os.pathsep) # Remove superflous separators return fields def _resolve_environment_fields(fields, context, topics): """Resolve {} occurences Supports both positional and BE_-prefixed variables. Example: BE_MYKEY -> "{mykey}" from `BE_MYKEY` {1} -> "{mytask}" from `be in myproject mytask` Returns: Dictionary of resolved fields """ source_dict = replacement_fields_from_context(context) source_dict.update(dict((str(topics.index(topic)), topic) for topic in topics)) def repl(match): key = pattern[match.start():match.end()].strip("{}") try: return source_dict[key] except KeyError: echo("PROJECT ERROR: Unavailable reference \"%s\" " "in be.yaml" % key) sys.exit(PROJECT_ERROR) for key, pattern in fields.copy().iteritems(): fields[key] = re.sub("{[\d\w]+}", repl, pattern) return fields fields = _resolve_environment_lists(fields) fields = _resolve_environment_references(fields, context) fields = _resolve_environment_fields(fields, context, topics) return fields
<SYSTEM_TASK:> Resolve the be.yaml redirect key <END_TASK> <USER_TASK:> Description: def parse_redirect(redirect, topics, context): """Resolve the be.yaml redirect key Arguments: redirect (dict): Source/destination pairs, e.g. {BE_ACTIVE: ACTIVE} topics (tuple): Topics from which to sample, e.g. (project, item, task) context (dict): Context from which to sample """
for map_source, map_dest in redirect.items(): if re.match("{\d+}", map_source): topics_index = int(map_source.strip("{}")) topics_value = topics[topics_index] context[map_dest] = topics_value continue context[map_dest] = context[map_source]
<SYSTEM_TASK:> Slice a template based on it's positional argument <END_TASK> <USER_TASK:> Description: def slice(index, template): """Slice a template based on it's positional argument Arguments: index (int): Position at which to slice template (str): Template to slice Example: >>> slice(0, "{cwd}/{0}/assets/{1}/{2}") '{cwd}/{0}' >>> slice(1, "{cwd}/{0}/assets/{1}/{2}") '{cwd}/{0}/assets/{1}' """
try: return re.match("^.*{[%i]}" % index, template).group() except AttributeError: raise ValueError("Index %i not found in template: %s" % (index, template))
<SYSTEM_TASK:> Returns a dictionary of the headers to add to any request sent <END_TASK> <USER_TASK:> Description: def proxy_headers(self, proxy): """Returns a dictionary of the headers to add to any request sent through a proxy. This works with urllib3 magic to ensure that they are correctly sent to the proxy, rather than in a tunnelled request if CONNECT is being used. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param proxies: The url of the proxy being used for this request. :param kwargs: Optional additional keyword arguments. """
headers = {} username, password = get_auth_from_url(proxy) if username and password: headers['Proxy-Authorization'] = _basic_auth_str(username, password) return headers
<SYSTEM_TASK:> Takes as an argument an optional domain and path and returns a plain <END_TASK> <USER_TASK:> Description: def get_dict(self, domain=None, path=None): """Takes as an argument an optional domain and path and returns a plain old Python dict of name-value pairs of cookies that meet the requirements."""
dictionary = {} for cookie in iter(self): if (domain is None or cookie.domain == domain) and (path is None or cookie.path == path): dictionary[cookie.name] = cookie.value return dictionary
<SYSTEM_TASK:> Helper for quickly adding a StreamHandler to the logger. Useful for <END_TASK> <USER_TASK:> Description: def add_stderr_logger(level=logging.DEBUG): """ Helper for quickly adding a StreamHandler to the logger. Useful for debugging. Returns the handler after adding it. """
# This method needs to be in this __init__.py to get the __name__ correct # even if urllib3 is vendored within another package. logger = logging.getLogger(__name__) handler = logging.StreamHandler() handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s')) logger.addHandler(handler) logger.setLevel(level) logger.debug('Added a stderr logging handler to logger: %s' % __name__) return handler
<SYSTEM_TASK:> All arguments have the same meaning as ``ssl_wrap_socket``. <END_TASK> <USER_TASK:> Description: def create_urllib3_context(ssl_version=None, cert_reqs=ssl.CERT_REQUIRED, options=None, ciphers=None): """All arguments have the same meaning as ``ssl_wrap_socket``. By default, this function does a lot of the same work that ``ssl.create_default_context`` does on Python 3.4+. It: - Disables SSLv2, SSLv3, and compression - Sets a restricted set of server ciphers If you wish to enable SSLv3, you can do:: from urllib3.util import ssl_ context = ssl_.create_urllib3_context() context.options &= ~ssl_.OP_NO_SSLv3 You can do the same to enable compression (substituting ``COMPRESSION`` for ``SSLv3`` in the last line above). :param ssl_version: The desired protocol version to use. This will default to PROTOCOL_SSLv23 which will negotiate the highest protocol that both the server and your installation of OpenSSL support. :param cert_reqs: Whether to require the certificate verification. This defaults to ``ssl.CERT_REQUIRED``. :param options: Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``, ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``. :param ciphers: Which cipher suites to allow the server to select. :returns: Constructed SSLContext object with specified options :rtype: SSLContext """
context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23) if options is None: options = 0 # SSLv2 is easily broken and is considered harmful and dangerous options |= OP_NO_SSLv2 # SSLv3 has several problems and is now dangerous options |= OP_NO_SSLv3 # Disable compression to prevent CRIME attacks for OpenSSL 1.0+ # (issue #309) options |= OP_NO_COMPRESSION context.options |= options if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6 context.set_ciphers(ciphers or _DEFAULT_CIPHERS) context.verify_mode = cert_reqs if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2 # We do our own verification, including fingerprints and alternative # hostnames. So disable it here context.check_hostname = False return context
<SYSTEM_TASK:> Put a connection back into the pool. <END_TASK> <USER_TASK:> Description: def _put_conn(self, conn): """ Put a connection back into the pool. :param conn: Connection object for the current host and port as returned by :meth:`._new_conn` or :meth:`._get_conn`. If the pool is already full, the connection is closed and discarded because we exceeded maxsize. If connections are discarded frequently, then maxsize should be increased. If the pool is closed, then the connection will be closed and discarded. """
try: self.pool.put(conn, block=False) return # Everything is dandy, done. except AttributeError: # self.pool is None. pass except Full: # This should never happen if self.block == True log.warning( "Connection pool is full, discarding connection: %s" % self.host) # Connection never got put back into the pool, close it. if conn: conn.close()
<SYSTEM_TASK:> Called right before a request is made, after the socket is created. <END_TASK> <USER_TASK:> Description: def _validate_conn(self, conn): """ Called right before a request is made, after the socket is created. """
super(HTTPSConnectionPool, self)._validate_conn(conn) # Force connect early to allow us to validate the connection. if not getattr(conn, 'sock', None): # AppEngine might not have `.sock` conn.connect() if not conn.is_verified: warnings.warn(( 'Unverified HTTPS request is being made. ' 'Adding certificate verification is strongly advised. See: ' 'https://urllib3.readthedocs.org/en/latest/security.html'), InsecureRequestWarning)
<SYSTEM_TASK:> Styles a text with ANSI styles and returns the new string. By <END_TASK> <USER_TASK:> Description: def style(text, fg=None, bg=None, bold=None, dim=None, underline=None, blink=None, reverse=None, reset=True): """Styles a text with ANSI styles and returns the new string. By default the styling is self contained which means that at the end of the string a reset code is issued. This can be prevented by passing ``reset=False``. Examples:: click.echo(click.style('Hello World!', fg='green')) click.echo(click.style('ATTENTION!', blink=True)) click.echo(click.style('Some things', reverse=True, fg='cyan')) Supported color names: * ``black`` (might be a gray) * ``red`` * ``green`` * ``yellow`` (might be an orange) * ``blue`` * ``magenta`` * ``cyan`` * ``white`` (might be light gray) * ``reset`` (reset the color code only) .. versionadded:: 2.0 :param text: the string to style with ansi codes. :param fg: if provided this will become the foreground color. :param bg: if provided this will become the background color. :param bold: if provided this will enable or disable bold mode. :param dim: if provided this will enable or disable dim mode. This is badly supported. :param underline: if provided this will enable or disable underline. :param blink: if provided this will enable or disable blinking. :param reverse: if provided this will enable or disable inverse rendering (foreground becomes background and the other way round). :param reset: by default a reset-all code is added at the end of the string which means that styles do not carry over. This can be disabled to compose styles. """
bits = [] if fg: try: bits.append('\033[%dm' % (_ansi_colors.index(fg) + 30)) except ValueError: raise TypeError('Unknown color %r' % fg) if bg: try: bits.append('\033[%dm' % (_ansi_colors.index(bg) + 40)) except ValueError: raise TypeError('Unknown color %r' % bg) if bold is not None: bits.append('\033[%dm' % (1 if bold else 22)) if dim is not None: bits.append('\033[%dm' % (2 if dim else 22)) if underline is not None: bits.append('\033[%dm' % (4 if underline else 24)) if blink is not None: bits.append('\033[%dm' % (5 if blink else 25)) if reverse is not None: bits.append('\033[%dm' % (7 if reverse else 27)) bits.append(text) if reset: bits.append(_ansi_reset_all) return ''.join(bits)
<SYSTEM_TASK:> Determines appropriate setting for a given request, taking into account the <END_TASK> <USER_TASK:> Description: def merge_setting(request_setting, session_setting, dict_class=OrderedDict): """ Determines appropriate setting for a given request, taking into account the explicit setting on that request, and the setting in the session. If a setting is a dictionary, they will be merged together using `dict_class` """
if session_setting is None: return request_setting if request_setting is None: return session_setting # Bypass if not a dictionary (e.g. verify) if not ( isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping) ): return request_setting merged_setting = dict_class(to_key_val_list(session_setting)) merged_setting.update(to_key_val_list(request_setting)) # Remove keys that are set to None. for (k, v) in request_setting.items(): if v is None: del merged_setting[k] merged_setting = dict((k, v) for (k, v) in merged_setting.items() if v is not None) return merged_setting
<SYSTEM_TASK:> Receives a Response. Returns a generator of Responses. <END_TASK> <USER_TASK:> Description: def resolve_redirects(self, resp, req, stream=False, timeout=None, verify=True, cert=None, proxies=None): """Receives a Response. Returns a generator of Responses."""
i = 0 hist = [] # keep track of history while resp.is_redirect: prepared_request = req.copy() if i > 0: # Update history and keep track of redirects. hist.append(resp) new_hist = list(hist) resp.history = new_hist try: resp.content # Consume socket so it can be released except (ChunkedEncodingError, ContentDecodingError, RuntimeError): resp.raw.read(decode_content=False) if i >= self.max_redirects: raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects) # Release the connection back into the pool. resp.close() url = resp.headers['location'] method = req.method # Handle redirection without scheme (see: RFC 1808 Section 4) if url.startswith('//'): parsed_rurl = urlparse(resp.url) url = '%s:%s' % (parsed_rurl.scheme, url) # The scheme should be lower case... parsed = urlparse(url) url = parsed.geturl() # Facilitate relative 'location' headers, as allowed by RFC 7231. # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') # Compliant with RFC3986, we percent encode the url. if not parsed.netloc: url = urljoin(resp.url, requote_uri(url)) else: url = requote_uri(url) prepared_request.url = to_native_string(url) # Cache the url, unless it redirects to itself. if resp.is_permanent_redirect and req.url != prepared_request.url: self.redirect_cache[req.url] = prepared_request.url # http://tools.ietf.org/html/rfc7231#section-6.4.4 if (resp.status_code == codes.see_other and method != 'HEAD'): method = 'GET' # Do what the browsers do, despite standards... # First, turn 302s into GETs. if resp.status_code == codes.found and method != 'HEAD': method = 'GET' # Second, if a POST is responded to with a 301, turn it into a GET. # This bizarre behaviour is explained in Issue 1704. if resp.status_code == codes.moved and method == 'POST': method = 'GET' prepared_request.method = method # https://github.com/kennethreitz/requests/issues/1084 if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect): if 'Content-Length' in prepared_request.headers: del prepared_request.headers['Content-Length'] prepared_request.body = None headers = prepared_request.headers try: del headers['Cookie'] except KeyError: pass # Extract any cookies sent on the response to the cookiejar # in the new request. Because we've mutated our copied prepared # request, use the old one that we haven't yet touched. extract_cookies_to_jar(prepared_request._cookies, req, resp.raw) prepared_request._cookies.update(self.cookies) prepared_request.prepare_cookies(prepared_request._cookies) # Rebuild auth and proxy information. proxies = self.rebuild_proxies(prepared_request, proxies) self.rebuild_auth(prepared_request, resp) # Override the original request. req = prepared_request resp = self.send( req, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies, allow_redirects=False, ) extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) i += 1 yield resp
<SYSTEM_TASK:> Iterates over the response data. When stream=True is set on the <END_TASK> <USER_TASK:> Description: def iter_content(self, chunk_size=1, decode_unicode=False): """Iterates over the response data. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. The chunk size is the number of bytes it should read into memory. This is not necessarily the length of each item returned as decoding can take place. If decode_unicode is True, content will be decoded using the best available encoding based on the response. """
def generate(): try: # Special case for urllib3. try: for chunk in self.raw.stream(chunk_size, decode_content=True): yield chunk except ProtocolError as e: raise ChunkedEncodingError(e) except DecodeError as e: raise ContentDecodingError(e) except ReadTimeoutError as e: raise ConnectionError(e) except AttributeError: # Standard file-like object. while True: chunk = self.raw.read(chunk_size) if not chunk: break yield chunk self._content_consumed = True if self._content_consumed and isinstance(self._content, bool): raise StreamConsumedError() # simulate reading small chunks of the content reused_chunks = iter_slices(self._content, chunk_size) stream_chunks = generate() chunks = reused_chunks if self._content_consumed else stream_chunks if decode_unicode: chunks = stream_decode_response_unicode(chunks, self) return chunks
<SYSTEM_TASK:> A decorator to register a subcommand with the global `Subcommands` instance. <END_TASK> <USER_TASK:> Description: def command(name=None): """A decorator to register a subcommand with the global `Subcommands` instance. """
def decorator(f): _commands.append((name, f)) return f return decorator
<SYSTEM_TASK:> Top-level driver for creating subcommand-based programs. <END_TASK> <USER_TASK:> Description: def main(program=None, version=None, doc_template=None, commands=None, argv=None, exit_at_end=True): """Top-level driver for creating subcommand-based programs. Args: program: The name of your program. version: The version string for your program. doc_template: The top-level docstring template for your program. If `None`, a standard default version is applied. commands: A `Subcommands` instance. argv: The command-line arguments to parse. If `None`, this defaults to `sys.argv[1:]` exit_at_end: Whether to call `sys.exit()` at the end of the function. There are two ways to use this function. First, you can pass `program`, `version`, and `doc_template`, in which case `docopt_subcommands` will use these arguments along with the subcommands registered with `command()` to define you program. The second way to use this function is to pass in a `Subcommands` objects via the `commands` argument. In this case the `program`, `version`, and `doc_template` arguments are ignored, and the `Subcommands` instance takes precedence. In both cases the `argv` argument can be used to specify the arguments to be parsed. """
if commands is None: if program is None: raise ValueError( '`program` required if subcommand object not provided') if version is None: raise ValueError( '`version` required if subcommand object not provided') commands = Subcommands(program, version, doc_template=doc_template) for name, handler in _commands: commands.add_command(handler, name) if argv is None: argv = sys.argv[1:] result = commands(argv) if exit_at_end: sys.exit(result) else: return result
<SYSTEM_TASK:> Overridden process_response would "pipe" response.body through BeautifulSoup. <END_TASK> <USER_TASK:> Description: def process_response(self, request, response, spider): """Overridden process_response would "pipe" response.body through BeautifulSoup."""
return response.replace(body=str(BeautifulSoup(response.body, self.parser)))
<SYSTEM_TASK:> Generator producing test_methods, with an optional dataset. <END_TASK> <USER_TASK:> Description: def _expand_datasets(test_functions): """ Generator producing test_methods, with an optional dataset. :param test_functions: Iterator over tuples of test name and test unbound function. :type test_functions: `iterator` of `tuple` of (`unicode`, `function`) :return: Generator yielding a tuple of - method_name : Name of the test method - unbound function : Unbound function that will be the test method. - dataset name : String representation of the given dataset - dataset : Tuple representing the args for a test - param factory : Function that returns params for the test method :rtype: `generator` of `tuple` of ( `unicode`, `function`, `unicode` or None, `tuple` or None, `function` or None, ) """
for name, func in test_functions: dataset_tuples = chain( [(None, getattr(func, 'genty_datasets', {}))], getattr(func, 'genty_dataproviders', []), ) no_datasets = True for dataprovider, datasets in dataset_tuples: for dataset_name, dataset in six.iteritems(datasets): no_datasets = False yield name, func, dataset_name, dataset, dataprovider if no_datasets: # yield the original test method, unaltered yield name, func, None, None, None
<SYSTEM_TASK:> Generator producing test_methods, with any repeat count unrolled. <END_TASK> <USER_TASK:> Description: def _expand_repeats(test_functions): """ Generator producing test_methods, with any repeat count unrolled. :param test_functions: Sequence of tuples of - method_name : Name of the test method - unbound function : Unbound function that will be the test method. - dataset name : String representation of the given dataset - dataset : Tuple representing the args for a test - param factory : Function that returns params for the test method :type test_functions: `iterator` of `tuple` of (`unicode`, `function`, `unicode` or None, `tuple` or None, `function`) :return: Generator yielding a tuple of (method_name, unbound function, dataset, name dataset, repeat_suffix) :rtype: `generator` of `tuple` of (`unicode`, `function`, `unicode` or None, `tuple` or None, `function`, `unicode`) """
for name, func, dataset_name, dataset, dataprovider in test_functions: repeat_count = getattr(func, 'genty_repeat_count', 0) if repeat_count: for i in range(1, repeat_count + 1): repeat_suffix = _build_repeat_suffix(i, repeat_count) yield ( name, func, dataset_name, dataset, dataprovider, repeat_suffix, ) else: yield name, func, dataset_name, dataset, dataprovider, None
<SYSTEM_TASK:> Return the suffix string to identify iteration X out of Y. <END_TASK> <USER_TASK:> Description: def _build_repeat_suffix(iteration, count): """ Return the suffix string to identify iteration X out of Y. For example, with a count of 100, this will build strings like "iteration_053" or "iteration_008". :param iteration: Current iteration. :type iteration: `int` :param count: Total number of iterations. :type count: `int` :return: Repeat suffix. :rtype: `unicode` """
format_width = int(math.ceil(math.log(count + 1, 10))) new_suffix = 'iteration_{0:0{width}d}'.format( iteration, width=format_width ) return new_suffix
<SYSTEM_TASK:> Return a nice human friendly name, that almost looks like code. <END_TASK> <USER_TASK:> Description: def _build_final_method_name( method_name, dataset_name, dataprovider_name, repeat_suffix, ): """ Return a nice human friendly name, that almost looks like code. Example: a test called 'test_something' with a dataset of (5, 'hello') Return: "test_something(5, 'hello')" Example: a test called 'test_other_stuff' with dataset of (9) and repeats Return: "test_other_stuff(9) iteration_<X>" :param method_name: Base name of the method to add. :type method_name: `unicode` :param dataset_name: Base name of the data set. :type dataset_name: `unicode` or None :param dataprovider_name: If there's a dataprovider involved, then this is its name. :type dataprovider_name: `unicode` or None :param repeat_suffix: Suffix to append to the name of the generated method. :type repeat_suffix: `unicode` or None :return: The fully composed name of the generated test method. :rtype: `unicode` """
# For tests using a dataprovider, append "_<dataprovider_name>" to # the test method name suffix = '' if dataprovider_name: suffix = '_{0}'.format(dataprovider_name) if not dataset_name and not repeat_suffix: return '{0}{1}'.format(method_name, suffix) if dataset_name: # Nosetest multi-processing code parses the full test name # to discern package/module names. Thus any periods in the test-name # causes that code to fail. So replace any periods with the unicode # middle-dot character. Yes, this change is applied independent # of the test runner being used... and that's fine since there is # no real contract as to how the fabricated tests are named. dataset_name = dataset_name.replace('.', REPLACE_FOR_PERIOD_CHAR) # Place data_set info inside parens, as if it were a function call suffix = '{0}({1})'.format(suffix, dataset_name or "") if repeat_suffix: suffix = '{0} {1}'.format(suffix, repeat_suffix) test_method_name_for_dataset = "{0}{1}".format( method_name, suffix, ) return test_method_name_for_dataset
<SYSTEM_TASK:> Add the described method to the given class. <END_TASK> <USER_TASK:> Description: def _add_method_to_class( target_cls, method_name, func, dataset_name, dataset, dataprovider, repeat_suffix, ): """ Add the described method to the given class. :param target_cls: Test class to which to add a method. :type target_cls: `class` :param method_name: Base name of the method to add. :type method_name: `unicode` :param func: The underlying test function to call. :type func: `callable` :param dataset_name: Base name of the data set. :type dataset_name: `unicode` or None :param dataset: Tuple containing the args of the dataset. :type dataset: `tuple` or None :param repeat_suffix: Suffix to append to the name of the generated method. :type repeat_suffix: `unicode` or None :param dataprovider: The unbound function that's responsible for generating the actual params that will be passed to the test function. Can be None. :type dataprovider: `callable` """
# pylint: disable=too-many-arguments test_method_name_for_dataset = _build_final_method_name( method_name, dataset_name, dataprovider.__name__ if dataprovider else None, repeat_suffix, ) test_method_for_dataset = _build_test_method(func, dataset, dataprovider) test_method_for_dataset = functools.update_wrapper( test_method_for_dataset, func, ) test_method_name_for_dataset = encode_non_ascii_string( test_method_name_for_dataset, ) test_method_for_dataset.__name__ = test_method_name_for_dataset test_method_for_dataset.genty_generated_test = True # Add the method to the class under the proper name setattr(target_cls, test_method_name_for_dataset, test_method_for_dataset)
<SYSTEM_TASK:> Close any open window. <END_TASK> <USER_TASK:> Description: def close(self): """Close any open window. Note that this only works with non-blocking methods. """
if self._process: # Be nice first. self._process.send_signal(signal.SIGINT) # If it doesn't close itself promptly, be brutal. # Python 3.2+ added the timeout option to wait() and the # corresponding TimeoutExpired exception. If they exist, use them. if hasattr(subprocess, 'TimeoutExpired'): try: self._process.wait(timeout=1) except subprocess.TimeoutExpired: self._process.send_signal(signal.SIGKILL) # Otherwise, roll our own polling loop. else: # Give it 1s, checking every 10ms. count = 0 while count < 100: if self._process.poll() is not None: break time.sleep(0.01) # Still hasn't quit. if self._process.poll() is None: self._process.send_signal(signal.SIGKILL) # Clean up. self._process = None
<SYSTEM_TASK:> Show an error window. <END_TASK> <USER_TASK:> Description: def error(self, message, rofi_args=None, **kwargs): """Show an error window. This method blocks until the user presses a key. Fullscreen mode is not supported for error windows, and if specified will be ignored. Parameters ---------- message: string Error message to show. """
rofi_args = rofi_args or [] # Generate arguments list. args = ['rofi', '-e', message] args.extend(self._common_args(allow_fullscreen=False, **kwargs)) args.extend(rofi_args) # Close any existing window and show the error. self._run_blocking(args)
<SYSTEM_TASK:> Show a status message. <END_TASK> <USER_TASK:> Description: def status(self, message, rofi_args=None, **kwargs): """Show a status message. This method is non-blocking, and intended to give a status update to the user while something is happening in the background. To close the window, either call the close() method or use any of the display methods to replace it with a different window. Fullscreen mode is not supported for status messages and if specified will be ignored. Parameters ---------- message: string Progress message to show. """
rofi_args = rofi_args or [] # Generate arguments list. args = ['rofi', '-e', message] args.extend(self._common_args(allow_fullscreen=False, **kwargs)) args.extend(rofi_args) # Update the status. self._run_nonblocking(args)
<SYSTEM_TASK:> Show a list of options and return user selection. <END_TASK> <USER_TASK:> Description: def select(self, prompt, options, rofi_args=None, message="", select=None, **kwargs): """Show a list of options and return user selection. This method blocks until the user makes their choice. Parameters ---------- prompt: string The prompt telling the user what they are selecting. options: list of strings The options they can choose from. Any newline characters are replaced with spaces. message: string, optional Message to show between the prompt and the options. This can contain Pango markup, and any text content should be escaped. select: integer, optional Set which option is initially selected. keyN: tuple (string, string); optional Custom key bindings where N is one or greater. The first entry in the tuple should be a string defining the key, e.g., "Alt+x" or "Delete". Note that letter keys should be lowercase ie.e., Alt+a not Alt+A. The second entry should be a short string stating the action the key will take. This is displayed to the user at the top of the dialog. If None or an empty string, it is not displayed (but the binding is still set). By default, key1 through key9 are set to ("Alt+1", None) through ("Alt+9", None) respectively. Returns ------- tuple (index, key) The index of the option the user selected, or -1 if they cancelled the dialog. Key indicates which key was pressed, with 0 being 'OK' (generally Enter), -1 being 'Cancel' (generally escape), and N being custom key N. """
rofi_args = rofi_args or [] # Replace newlines and turn the options into a single string. optionstr = '\n'.join(option.replace('\n', ' ') for option in options) # Set up arguments. args = ['rofi', '-dmenu', '-p', prompt, '-format', 'i'] if select is not None: args.extend(['-selected-row', str(select)]) # Key bindings to display. display_bindings = [] # Configure the key bindings. user_keys = set() for k, v in kwargs.items(): # See if the keyword name matches the needed format. if not k.startswith('key'): continue try: keynum = int(k[3:]) except ValueError: continue # Add it to the set. key, action = v user_keys.add(keynum) args.extend(['-kb-custom-{0:s}'.format(k[3:]), key]) if action: display_bindings.append("<b>{0:s}</b>: {1:s}".format(key, action)) # And the global exit bindings. exit_keys = set() next_key = 10 for key in self.exit_hotkeys: while next_key in user_keys: next_key += 1 exit_keys.add(next_key) args.extend(['-kb-custom-{0:d}'.format(next_key), key]) next_key += 1 # Add any displayed key bindings to the message. message = message or "" if display_bindings: message += "\n" + " ".join(display_bindings) message = message.strip() # If we have a message, add it to the arguments. if message: args.extend(['-mesg', message]) # Add in common arguments. args.extend(self._common_args(**kwargs)) args.extend(rofi_args) # Run the dialog. returncode, stdout = self._run_blocking(args, input=optionstr) # Figure out which option was selected. stdout = stdout.strip() index = int(stdout) if stdout else -1 # And map the return code to a key. if returncode == 0: key = 0 elif returncode == 1: key = -1 elif returncode > 9: key = returncode - 9 if key in exit_keys: raise SystemExit() else: self.exit_with_error("Unexpected rofi returncode {0:d}.".format(results.returncode)) # And return. return index, key
<SYSTEM_TASK:> A generic entry box. <END_TASK> <USER_TASK:> Description: def generic_entry(self, prompt, validator=None, message=None, rofi_args=None, **kwargs): """A generic entry box. Parameters ---------- prompt: string Text prompt for the entry. validator: function, optional A function to validate and convert the value entered by the user. It should take one parameter, the string that the user entered, and return a tuple (value, error). The value should be the users entry converted to the appropriate Python type, or None if the entry was invalid. The error message should be a string telling the user what was wrong, or None if the entry was valid. The prompt will be re-displayed to the user (along with the error message) until they enter a valid value. If no validator is given, the text that the user entered is returned as-is. message: string Optional message to display under the entry. Returns ------- The value returned by the validator, or None if the dialog was cancelled. Examples -------- Enforce a minimum entry length: >>> r = Rofi() >>> validator = lambda s: (s, None) if len(s) > 6 else (None, "Too short") >>> r.generic_entry('Enter a 7-character or longer string: ', validator) """
error = "" rofi_args = rofi_args or [] # Keep going until we get something valid. while True: args = ['rofi', '-dmenu', '-p', prompt, '-format', 's'] # Add any error to the given message. msg = message or "" if error: msg = '<span color="#FF0000" font_weight="bold">{0:s}</span>\n{1:s}'.format(error, msg) msg = msg.rstrip('\n') # If there is actually a message to show. if msg: args.extend(['-mesg', msg]) # Add in common arguments. args.extend(self._common_args(**kwargs)) args.extend(rofi_args) # Run it. returncode, stdout = self._run_blocking(args, input="") # Was the dialog cancelled? if returncode == 1: return None # Get rid of the trailing newline and check its validity. text = stdout.rstrip('\n') if validator: value, error = validator(text) if not error: return value else: return text
<SYSTEM_TASK:> Prompt the user to enter a piece of text. <END_TASK> <USER_TASK:> Description: def text_entry(self, prompt, message=None, allow_blank=False, strip=True, rofi_args=None, **kwargs): """Prompt the user to enter a piece of text. Parameters ---------- prompt: string Prompt to display to the user. message: string, optional Message to display under the entry line. allow_blank: Boolean Whether to allow blank entries. strip: Boolean Whether to strip leading and trailing whitespace from the entered value. Returns ------- string, or None if the dialog was cancelled. """
def text_validator(text): if strip: text = text.strip() if not allow_blank: if not text: return None, "A value is required." return text, None return self.generic_entry(prompt, text_validator, message, rofi_args, **kwargs)
<SYSTEM_TASK:> Prompt the user to enter an integer. <END_TASK> <USER_TASK:> Description: def integer_entry(self, prompt, message=None, min=None, max=None, rofi_args=None, **kwargs): """Prompt the user to enter an integer. Parameters ---------- prompt: string Prompt to display to the user. message: string, optional Message to display under the entry line. min, max: integer, optional Minimum and maximum values to allow. If None, no limit is imposed. Returns ------- integer, or None if the dialog is cancelled. """
# Sanity check. if (min is not None) and (max is not None) and not (max > min): raise ValueError("Maximum limit has to be more than the minimum limit.") def integer_validator(text): error = None # Attempt to convert to integer. try: value = int(text) except ValueError: return None, "Please enter an integer value." # Check its within limits. if (min is not None) and (value < min): return None, "The minimum allowable value is {0:d}.".format(min) if (max is not None) and (value > max): return None, "The maximum allowable value is {0:d}.".format(max) return value, None return self.generic_entry(prompt, integer_validator, message, rofi_args, **kwargs)
<SYSTEM_TASK:> Prompt the user to enter a floating point number. <END_TASK> <USER_TASK:> Description: def float_entry(self, prompt, message=None, min=None, max=None, rofi_args=None, **kwargs): """Prompt the user to enter a floating point number. Parameters ---------- prompt: string Prompt to display to the user. message: string, optional Message to display under the entry line. min, max: float, optional Minimum and maximum values to allow. If None, no limit is imposed. Returns ------- float, or None if the dialog is cancelled. """
# Sanity check. if (min is not None) and (max is not None) and not (max > min): raise ValueError("Maximum limit has to be more than the minimum limit.") def float_validator(text): error = None # Attempt to convert to float. try: value = float(text) except ValueError: return None, "Please enter a floating point value." # Check its within limits. if (min is not None) and (value < min): return None, "The minimum allowable value is {0}.".format(min) if (max is not None) and (value > max): return None, "The maximum allowable value is {0}.".format(max) return value, None return self.generic_entry(prompt, float_validator, message, rofi_args, **kwargs)
<SYSTEM_TASK:> Prompt the user to enter a decimal number. <END_TASK> <USER_TASK:> Description: def decimal_entry(self, prompt, message=None, min=None, max=None, rofi_args=None, **kwargs): """Prompt the user to enter a decimal number. Parameters ---------- prompt: string Prompt to display to the user. message: string, optional Message to display under the entry line. min, max: Decimal, optional Minimum and maximum values to allow. If None, no limit is imposed. Returns ------- Decimal, or None if the dialog is cancelled. """
# Sanity check. if (min is not None) and (max is not None) and not (max > min): raise ValueError("Maximum limit has to be more than the minimum limit.") def decimal_validator(text): error = None # Attempt to convert to decimal. try: value = Decimal(text) except InvalidOperation: return None, "Please enter a decimal value." # Check its within limits. if (min is not None) and (value < min): return None, "The minimum allowable value is {0}.".format(min) if (max is not None) and (value > max): return None, "The maximum allowable value is {0}.".format(max) return value, None return self.generic_entry(prompt, decimal_validator, message, rofi_args, **kwargs)
<SYSTEM_TASK:> Prompt the user to enter a date. <END_TASK> <USER_TASK:> Description: def date_entry(self, prompt, message=None, formats=['%x', '%d/%m/%Y'], show_example=False, rofi_args=None, **kwargs): """Prompt the user to enter a date. Parameters ---------- prompt: string Prompt to display to the user. message: string, optional Message to display under the entry line. formats: list of strings, optional The formats that the user can enter dates in. These should be format strings as accepted by the datetime.datetime.strptime() function from the standard library. They are tried in order, and the first that returns a date object without error is selected. Note that the '%x' in the default list is the current locale's date representation. show_example: Boolean If True, today's date in the first format given is appended to the message. Returns ------- datetime.date, or None if the dialog is cancelled. """
def date_validator(text): # Try them in order. for format in formats: try: dt = datetime.strptime(text, format) except ValueError: continue else: # This one worked; good enough for us. return (dt.date(), None) # None of the formats worked. return (None, 'Please enter a valid date.') # Add an example to the message? if show_example: message = message or "" message += "Today's date in the correct format: " + datetime.now().strftime(formats[0]) return self.generic_entry(prompt, date_validator, message, rofi_args, **kwargs)
<SYSTEM_TASK:> Prompt the user to enter a time. <END_TASK> <USER_TASK:> Description: def time_entry(self, prompt, message=None, formats=['%X', '%H:%M', '%I:%M', '%H.%M', '%I.%M'], show_example=False, rofi_args=None, **kwargs): """Prompt the user to enter a time. Parameters ---------- prompt: string Prompt to display to the user. message: string, optional Message to display under the entry line. formats: list of strings, optional The formats that the user can enter times in. These should be format strings as accepted by the datetime.datetime.strptime() function from the standard library. They are tried in order, and the first that returns a time object without error is selected. Note that the '%X' in the default list is the current locale's time representation. show_example: Boolean If True, the current time in the first format given is appended to the message. Returns ------- datetime.time, or None if the dialog is cancelled. """
def time_validator(text): # Try them in order. for format in formats: try: dt = datetime.strptime(text, format) except ValueError: continue else: # This one worked; good enough for us. return (dt.time(), None) # None of the formats worked. return (None, 'Please enter a valid time.') # Add an example to the message? if show_example: message = message or "" message += "Current time in the correct format: " + datetime.now().strftime(formats[0]) return self.generic_entry(prompt, time_validator, message, rofi_args=None, **kwargs)
<SYSTEM_TASK:> Prompt the user to enter a date and time. <END_TASK> <USER_TASK:> Description: def datetime_entry(self, prompt, message=None, formats=['%x %X'], show_example=False, rofi_args=None, **kwargs): """Prompt the user to enter a date and time. Parameters ---------- prompt: string Prompt to display to the user. message: string, optional Message to display under the entry line. formats: list of strings, optional The formats that the user can enter the date and time in. These should be format strings as accepted by the datetime.datetime.strptime() function from the standard library. They are tried in order, and the first that returns a datetime object without error is selected. Note that the '%x %X' in the default list is the current locale's date and time representation. show_example: Boolean If True, the current date and time in the first format given is appended to the message. Returns ------- datetime.datetime, or None if the dialog is cancelled. """
def datetime_validator(text): # Try them in order. for format in formats: try: dt = datetime.strptime(text, format) except ValueError: continue else: # This one worked; good enough for us. return (dt, None) # None of the formats worked. return (None, 'Please enter a valid date and time.') # Add an example to the message? if show_example: message = message or "" message += "Current date and time in the correct format: " + datetime.now().strftime(formats[0]) return self.generic_entry(prompt, datetime_validator, message, rofi_args, **kwargs)