code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
@unittest.skip("Unimplemented Function") <NEW_LINE> class MinimumSpanningTree(unittest.TestCase): <NEW_LINE> <INDENT> def run_and_check(self, vertexes, adjacent, cost): <NEW_LINE> <INDENT> result = minimum_spanning_tree(vertexes, adjacent, cost) <NEW_LINE> <DEDENT> def value(self, vertexes, result): <NEW_LINE> <INDENT> value = 0 <NEW_LINE> for v in vertexes: <NEW_LINE> <INDENT> for a in n.adjacent(): <NEW_LINE> <INDENT> value += v.cost(a) <NEW_LINE> <DEDENT> <DEDENT> return value / 2 <NEW_LINE> <DEDENT> def minimum(self, graph): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> def test_test(self): <NEW_LINE> <INDENT> g, f = WeightedNode.random_graph(100) <NEW_LINE> <DEDENT> def test_circle(self): <NEW_LINE> <INDENT> assert(False) <NEW_LINE> <DEDENT> def test_tree(self): <NEW_LINE> <INDENT> assert(False) <NEW_LINE> <DEDENT> def test_random_graphs(self): <NEW_LINE> <INDENT> assert(False) <NEW_LINE> <DEDENT> def test_invalid_inputs(self): <NEW_LINE> <INDENT> assert(False) <NEW_LINE> <DEDENT> def test_time_complexity(self): <NEW_LINE> <INDENT> assert(False)
Test Strategy: numpy has Kruskals algorithm, which I can use as a reference. I'm going to need new data sets to test this with... - Undirected - Randomly Generated Notes on MSTs: - If all edge weights are unique then the MST is also unique. - The value of the minimum spanning tree is the minimum even if the trees are not unique.
6259906a45492302aabfdcb9
class KwicParser: <NEW_LINE> <INDENT> def __init__(self, callback): <NEW_LINE> <INDENT> self.callback = callback <NEW_LINE> self.generator = None <NEW_LINE> self.reset() <NEW_LINE> <DEDENT> def reset(self): <NEW_LINE> <INDENT> self.next_mark = 0 <NEW_LINE> self.group = None <NEW_LINE> <DEDENT> def get_generator(self, from_mark): <NEW_LINE> <INDENT> if not self.generator or self.next_mark > (from_mark or 0): <NEW_LINE> <INDENT> assert(self.generator is None) <NEW_LINE> self.generator = iter(self) <NEW_LINE> <DEDENT> return self.generator <NEW_LINE> <DEDENT> @classmethod <NEW_LINE> def read_token_count(cls): <NEW_LINE> <INDENT> ret = 0 <NEW_LINE> for i in (cls(lambda e: [0])): <NEW_LINE> <INDENT> ret += 1 <NEW_LINE> <DEDENT> return ret <NEW_LINE> <DEDENT> def __iter__(self): <NEW_LINE> <INDENT> self.reset() <NEW_LINE> kwic_path = write_kwic_index() <NEW_LINE> for event, elem in ET.iterparse( kwic_path, events=['start', 'end'] ): <NEW_LINE> <INDENT> if event == 'start' and elem.tag == 'item': <NEW_LINE> <INDENT> item = elem <NEW_LINE> <DEDENT> if event == 'end' and (elem.tag in ['string', 'kwicindex']): <NEW_LINE> <INDENT> if elem.tag != 'kwicindex': <NEW_LINE> <INDENT> item.text = elem.text or '' <NEW_LINE> <DEDENT> res = self._add_token_to_group(item) <NEW_LINE> if res is not None: <NEW_LINE> <INDENT> for r in self.callback(res): <NEW_LINE> <INDENT> self.next_mark += 1 <NEW_LINE> yield r <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> def _add_token_to_group(self, token): <NEW_LINE> <INDENT> ret = None <NEW_LINE> if self.group is None: <NEW_LINE> <INDENT> self.group = token <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> group = self.group <NEW_LINE> token_lemma = token.attrib.get('lemma', '') <NEW_LINE> group_lemma = group.attrib.get('lemma', '') <NEW_LINE> lemmas = group_lemma + token_lemma <NEW_LINE> if token_lemma == group_lemma and lemmas.lower() != lemmas: <NEW_LINE> <INDENT> group.attrib['following'] = token.attrib.get('following', '') <NEW_LINE> group.text = (group.text + ' ' + token.text) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> ret = group <NEW_LINE> ret.attrib['seq_order'] = self.next_mark <NEW_LINE> self.group = token <NEW_LINE> <DEDENT> <DEDENT> return ret
A generator that iterates through the KWIC XML file. It yields items (token and lemma element) for each <string> element. A callback system allows you transform the item into other objects before they are yielded. It also exposes a cursor (self.next_mark) to help with QuerySet slices. This class was created to tie a Generator (parsing the XML) with the current position in that XML (.next_mark) so we can reuse it. There are no built-in mechanism with Python 3 to easily attach a variable to a generator and access it from inside or outside so this class BINDS the instance of KwicParser with its generator via get_generator() Usage: for token_element in KwickParser(cb): ... DO NOT USE iter(p)
6259906a26068e7796d4e11b
class KeyPathGenerator(object): <NEW_LINE> <INDENT> def __init__(self, source_path, candidate_paths): <NEW_LINE> <INDENT> self.source_path = source_path <NEW_LINE> self.candidate_paths = candidate_paths <NEW_LINE> self.matched_candidate_paths = [] <NEW_LINE> self.node_key_counter = 0 <NEW_LINE> self.tnode = 0.8 <NEW_LINE> self._key_source_path(source_path) <NEW_LINE> self.matched_candidate_paths = self._match_candidate_paths(candidate_paths) <NEW_LINE> for candidate_path in self.matched_candidate_paths: <NEW_LINE> <INDENT> self._key_candidate_path(candidate_path) <NEW_LINE> <DEDENT> <DEDENT> def _key_source_path(self, source_path): <NEW_LINE> <INDENT> for i,a in enumerate(source_path): <NEW_LINE> <INDENT> if i == 0: <NEW_LINE> <INDENT> if a.key is None: <NEW_LINE> <INDENT> a.key = unichr(self.node_key_counter) <NEW_LINE> self.node_key_counter+=1 <NEW_LINE> <DEDENT> <DEDENT> for j,b in enumerate(source_path[i+1:]): <NEW_LINE> <INDENT> if a == b: <NEW_LINE> <INDENT> b.key = a.key <NEW_LINE> break <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if b.key is None: <NEW_LINE> <INDENT> b.key = unichr(self.node_key_counter) <NEW_LINE> self.node_key_counter+=1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> def source_key_path(self): <NEW_LINE> <INDENT> path = [n.key for n in self.source_path] <NEW_LINE> return ''.join(path) if len(path)>1 else path[0] <NEW_LINE> <DEDENT> def matched_candidate_key_paths(self): <NEW_LINE> <INDENT> keyed_paths = [] <NEW_LINE> matched_paths = [] <NEW_LINE> for candidate_path in self.matched_candidate_paths: <NEW_LINE> <INDENT> path = [n.key for n in candidate_path] <NEW_LINE> path = ''.join(path) if len(path)>1 else path[0] <NEW_LINE> keyed_paths.append(path) <NEW_LINE> matched_paths.append(candidate_path) <NEW_LINE> <DEDENT> return (keyed_paths, matched_paths) <NEW_LINE> <DEDENT> def _match_candidate_paths(self, candidate_paths): <NEW_LINE> <INDENT> matched_candidate_paths = set() <NEW_LINE> for candidate_path in candidate_paths: <NEW_LINE> <INDENT> m = self._match_candidate_path(candidate_path) <NEW_LINE> if m is True: <NEW_LINE> <INDENT> matched_candidate_paths.update([candidate_path]) <NEW_LINE> continue <NEW_LINE> <DEDENT> <DEDENT> return matched_candidate_paths <NEW_LINE> <DEDENT> def _match_candidate_path(self, candidate_path): <NEW_LINE> <INDENT> for a in self.source_path: <NEW_LINE> <INDENT> for b in candidate_path: <NEW_LINE> <INDENT> if a.matches_candidate(b, self.tnode): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> def _key_candidate_path(self, candidate_path): <NEW_LINE> <INDENT> matchFound = False <NEW_LINE> for a in self.source_path: <NEW_LINE> <INDENT> for b in candidate_path: <NEW_LINE> <INDENT> if a.matches_candidate(b, self.tnode): <NEW_LINE> <INDENT> b.key = a.key <NEW_LINE> matchFound = True <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> for b in candidate_path: <NEW_LINE> <INDENT> if b.key is None: <NEW_LINE> <INDENT> b.key = unichr(self.node_key_counter) <NEW_LINE> self.node_key_counter+=1
Candidate Target Path Key Generator
6259906a44b2445a339b7550
class Graph(object): <NEW_LINE> <INDENT> def __init__(self, config): <NEW_LINE> <INDENT> self.config = config <NEW_LINE> self.layers = [] <NEW_LINE> self.names = [] <NEW_LINE> for layer_type, layer_params in config: <NEW_LINE> <INDENT> self.__check_layer(layer_type) <NEW_LINE> layer = self.__create_layer(layer_type, layer_params) <NEW_LINE> if layer.name == layer_type: <NEW_LINE> <INDENT> layer_index = 0 <NEW_LINE> for name in self.names[::-1]: <NEW_LINE> <INDENT> if layer_type in name: <NEW_LINE> <INDENT> layer_index = int(name.split("_")[1]) + 1 <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> layer.name = layer_type + "_" + str(layer_index) <NEW_LINE> <DEDENT> self.names.append(layer.name) <NEW_LINE> self.layers.append(layer) <NEW_LINE> <DEDENT> <DEDENT> def __getitem__(self, key): <NEW_LINE> <INDENT> if type(key) == int: <NEW_LINE> <INDENT> return self.layers[key] <NEW_LINE> <DEDENT> if key not in self.names: <NEW_LINE> <INDENT> raise KeyError( "{} is not in the graph".format(key) ) <NEW_LINE> <DEDENT> return self.layers[self.names.index(key)] <NEW_LINE> <DEDENT> def __len__(self): <NEW_LINE> <INDENT> return len(self.layers) <NEW_LINE> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> return "\n".join([str(layer) for layer in self.layers]) <NEW_LINE> <DEDENT> def __check_layer(self, layer_type): <NEW_LINE> <INDENT> if layer_type not in globals(): <NEW_LINE> <INDENT> raise NameError( "{} is not an valid layer name!".format(layer_type) ) <NEW_LINE> <DEDENT> <DEDENT> def __create_layer(self, layer_type, layer_params): <NEW_LINE> <INDENT> if layer_params: <NEW_LINE> <INDENT> return globals()[layer_type](**layer_params) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return globals()[layer_type]()
The graph or network structure of a neural network. Arguments: config (list): a list of tuples with each tuple contains the name and parameters of a layer. Attributes: config (list): a list of tuples with each tuple contains the name and parameters of a layer. layers (list): a list of layers. Each layer is a layer object instantiated using a class from the "layer" module. names (list): a list of layer names.
6259906abe8e80087fbc086d
class CommentSubstitution(BaseSubstitution): <NEW_LINE> <INDENT> def __init__(self, context, **kwargs): <NEW_LINE> <INDENT> super(CommentSubstitution, self).__init__(context, **kwargs) <NEW_LINE> <DEDENT> @property <NEW_LINE> def event(self): <NEW_LINE> <INDENT> return self.context.REQUEST.get('event') <NEW_LINE> <DEDENT> @property <NEW_LINE> def comment(self): <NEW_LINE> <INDENT> return self.event.comment
Comment string substitution
6259906a5fdd1c0f98e5f767
class GSystem(object): <NEW_LINE> <INDENT> def __init__(self, especes, reactions): <NEW_LINE> <INDENT> super(GSystem, self).__init__() <NEW_LINE> self.especes = especes <NEW_LINE> self.reactions = reactions
docstring for GSystem
6259906a8da39b475be049cd
class ActiveManager(models.Manager): <NEW_LINE> <INDENT> def active(self): <NEW_LINE> <INDENT> return super(ActiveManager, self).get_queryset().filter(active=True)
An extended manager to return active objects.
6259906a66673b3332c31bdf
class IJSONRPCRequest(IJSONRPCApplicationRequest, IHTTPCredentials, IHTTPRequest, ISkinnable): <NEW_LINE> <INDENT> jsonID = zope.interface.Attribute("""JSON-RPC ID for the request""")
JSON-RPC request.
6259906a63b5f9789fe86944
class MainPageLocators(): <NEW_LINE> <INDENT> SEARCH_username = (By.ID, "your_name") <NEW_LINE> SEARCH_password = (By.ID, "pass") <NEW_LINE> SEARCH_Loginbutton = (By.XPATH, "//button[@type='submit']") <NEW_LINE> SEARCH_Logo = (By.XPATH, "//a[@class='nav-item nav-link nav_text']") <NEW_LINE> SEARCH_ErrorMessage = (By.XPATH, "//div[@id='alert_msg']") <NEW_LINE> SEARCH_RequiredMessage =(By.CLASS_NAME,"errorlist")
A class for main page locators. All main page locators should come here
6259906a460517430c432c46
class __unmatched_case__(Syntax): <NEW_LINE> <INDENT> def __or__(self, line): <NEW_LINE> <INDENT> if line.is_match: <NEW_LINE> <INDENT> MatchStack.get_frame().matched = True <NEW_LINE> return __matched_case__(line.return_value) <NEW_LINE> <DEDENT> return self <NEW_LINE> <DEDENT> def __invert__(self): <NEW_LINE> <INDENT> value = MatchStack.get_frame().value <NEW_LINE> MatchStack.pop() <NEW_LINE> raise IncompletePatternError(value)
This class represents a caseof expression in mid-evaluation, when zero or more lines have been tested, but before a match has been found.
6259906a3317a56b869bf134
class NamingNodeManager(NodeManager): <NEW_LINE> <INDENT> def get_key(self, node): <NEW_LINE> <INDENT> return self._get_hash_value(node.obj) <NEW_LINE> <DEDENT> def _get_hash_value(self, obj): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> hash_value = hash(obj) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> hash_value = id(obj) <NEW_LINE> <DEDENT> return hash_value
The node manager for a naming tree.
6259906a38b623060ffaa443
class CustomUserCreationForm(forms.ModelForm): <NEW_LINE> <INDENT> error_messages = { 'duplicate_username': _("A user with that username already exists."), 'password_mismatch': _("The two password fields didn't match."), } <NEW_LINE> username = forms.RegexField(label=_("Username"), max_length=30, regex=r'^[\w.@+-]+$', help_text=_("Required. 30 characters or fewer. Letters, digits and " "@/./+/-/_ only."), error_messages={ 'invalid': _("This value may contain only letters, numbers and " "@/./+/-/_ characters.")}) <NEW_LINE> password1 = forms.CharField(label=_("Password"), widget=forms.PasswordInput) <NEW_LINE> password2 = forms.CharField(label=_("Password confirmation"), widget=forms.PasswordInput, help_text=_("Enter the same password as above, for verification.")) <NEW_LINE> class Meta: <NEW_LINE> <INDENT> model = CustomUser <NEW_LINE> fields = ("username",) <NEW_LINE> <DEDENT> def clean_username(self): <NEW_LINE> <INDENT> username = self.cleaned_data["username"] <NEW_LINE> try: <NEW_LINE> <INDENT> CustomUser._default_manager.get(username=username) <NEW_LINE> <DEDENT> except CustomUser.DoesNotExist: <NEW_LINE> <INDENT> return username <NEW_LINE> <DEDENT> raise forms.ValidationError(self.error_messages['duplicate_username']) <NEW_LINE> <DEDENT> def clean_password2(self): <NEW_LINE> <INDENT> password1 = self.cleaned_data.get("password1") <NEW_LINE> password2 = self.cleaned_data.get("password2") <NEW_LINE> if password1 and password2 and password1 != password2: <NEW_LINE> <INDENT> raise forms.ValidationError( self.error_messages['password_mismatch']) <NEW_LINE> <DEDENT> return password2 <NEW_LINE> <DEDENT> def save(self, commit=True): <NEW_LINE> <INDENT> user = super(CustomUserCreationForm, self).save(commit=False) <NEW_LINE> user.set_password(self.cleaned_data["password1"]) <NEW_LINE> if commit: <NEW_LINE> <INDENT> user.save() <NEW_LINE> <DEDENT> return user
A form that creates a user, with no privileges, from the given username and password.
6259906acc0a2c111447c6c1
class HueLightLevel(GenericHueGaugeSensorEntity): <NEW_LINE> <INDENT> device_class = DEVICE_CLASS_ILLUMINANCE <NEW_LINE> unit_of_measurement = LIGHT_LUX <NEW_LINE> @property <NEW_LINE> def state(self): <NEW_LINE> <INDENT> if self.sensor.lightlevel is None: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> return round(float(10 ** ((self.sensor.lightlevel - 1) / 10000)), 2) <NEW_LINE> <DEDENT> @property <NEW_LINE> def extra_state_attributes(self): <NEW_LINE> <INDENT> attributes = super().extra_state_attributes <NEW_LINE> attributes.update( { "lightlevel": self.sensor.lightlevel, "daylight": self.sensor.daylight, "dark": self.sensor.dark, "threshold_dark": self.sensor.tholddark, "threshold_offset": self.sensor.tholdoffset, } ) <NEW_LINE> return attributes
The light level sensor entity for a Hue motion sensor device.
6259906a32920d7e50bc7828
class PrefetchAccessFilterValue(object): <NEW_LINE> <INDENT> swagger_types = { 'model': 'str', 'field': 'str', 'value': 'str', 'can': 'dict(str, bool)' } <NEW_LINE> attribute_map = { 'model': 'model', 'field': 'field', 'value': 'value', 'can': 'can' } <NEW_LINE> def __init__(self, model=None, field=None, value=None, can=None): <NEW_LINE> <INDENT> self._model = None <NEW_LINE> self._field = None <NEW_LINE> self._value = None <NEW_LINE> self._can = None <NEW_LINE> self.discriminator = None <NEW_LINE> if model is not None: <NEW_LINE> <INDENT> self.model = model <NEW_LINE> <DEDENT> if field is not None: <NEW_LINE> <INDENT> self.field = field <NEW_LINE> <DEDENT> if value is not None: <NEW_LINE> <INDENT> self.value = value <NEW_LINE> <DEDENT> if can is not None: <NEW_LINE> <INDENT> self.can = can <NEW_LINE> <DEDENT> <DEDENT> @property <NEW_LINE> def model(self): <NEW_LINE> <INDENT> return self._model <NEW_LINE> <DEDENT> @model.setter <NEW_LINE> def model(self, model): <NEW_LINE> <INDENT> self._model = model <NEW_LINE> <DEDENT> @property <NEW_LINE> def field(self): <NEW_LINE> <INDENT> return self._field <NEW_LINE> <DEDENT> @field.setter <NEW_LINE> def field(self, field): <NEW_LINE> <INDENT> self._field = field <NEW_LINE> <DEDENT> @property <NEW_LINE> def value(self): <NEW_LINE> <INDENT> return self._value <NEW_LINE> <DEDENT> @value.setter <NEW_LINE> def value(self, value): <NEW_LINE> <INDENT> self._value = value <NEW_LINE> <DEDENT> @property <NEW_LINE> def can(self): <NEW_LINE> <INDENT> return self._can <NEW_LINE> <DEDENT> @can.setter <NEW_LINE> def can(self, can): <NEW_LINE> <INDENT> self._can = can <NEW_LINE> <DEDENT> def to_dict(self): <NEW_LINE> <INDENT> result = {} <NEW_LINE> for attr, _ in six.iteritems(self.swagger_types): <NEW_LINE> <INDENT> value = getattr(self, attr) <NEW_LINE> if isinstance(value, list): <NEW_LINE> <INDENT> result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) <NEW_LINE> <DEDENT> elif hasattr(value, "to_dict"): <NEW_LINE> <INDENT> result[attr] = value.to_dict() <NEW_LINE> <DEDENT> elif isinstance(value, dict): <NEW_LINE> <INDENT> result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> result[attr] = value <NEW_LINE> <DEDENT> <DEDENT> return result <NEW_LINE> <DEDENT> def to_str(self): <NEW_LINE> <INDENT> return pprint.pformat(self.to_dict()) <NEW_LINE> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> return self.to_str() <NEW_LINE> <DEDENT> def __eq__(self, other): <NEW_LINE> <INDENT> if not isinstance(other, PrefetchAccessFilterValue): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return self.__dict__ == other.__dict__ <NEW_LINE> <DEDENT> def __ne__(self, other): <NEW_LINE> <INDENT> return not self == other
NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually.
6259906a9c8ee82313040d79
class ExternalServiceId(GuidEnum): <NEW_LINE> <INDENT> @staticmethod <NEW_LINE> def __new__(self,guid): <NEW_LINE> <INDENT> pass
Unique identifier of an external service. ExternalServiceId(guid: Guid)
6259906a8a43f66fc4bf3975
class ImageWriter: <NEW_LINE> <INDENT> def __init__(self, *, location_root='/media/sda1/camera', capture_period=0.5, image_format='jpg'): <NEW_LINE> <INDENT> self.location_root = os.path.abspath(location_root) <NEW_LINE> self.capture_period = capture_period <NEW_LINE> self.image_format = image_format <NEW_LINE> self.active = True <NEW_LINE> self._location = None <NEW_LINE> self.has_image = False <NEW_LINE> self.size = None <NEW_LINE> self.lock = threading.Condition() <NEW_LINE> self._thread = threading.Thread(target=self._run, daemon=True) <NEW_LINE> self._thread.start() <NEW_LINE> <DEDENT> def setImage(self, img): <NEW_LINE> <INDENT> if not self.active: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if self.size is None or self.size[0] != img.shape[0] or self.size[1] != img.shape[1]: <NEW_LINE> <INDENT> h, w = img.shape[:2] <NEW_LINE> self.size = (h, w) <NEW_LINE> self.out1 = np.empty((h, w, 3), dtype=np.uint8) <NEW_LINE> self.out2 = np.empty((h, w, 3), dtype=np.uint8) <NEW_LINE> <DEDENT> with self.lock: <NEW_LINE> <INDENT> cv2.copyMakeBorder(img, 0, 0, 0, 0, cv2.BORDER_CONSTANT, value=(0,0,255), dst=self.out1) <NEW_LINE> self.has_image = True <NEW_LINE> self.lock.notify() <NEW_LINE> <DEDENT> <DEDENT> @property <NEW_LINE> def location(self): <NEW_LINE> <INDENT> if self._location is None: <NEW_LINE> <INDENT> if not os.path.exists(self.location_root): <NEW_LINE> <INDENT> raise IOError("Logging disabled, %s does not exist" % self.location_root) <NEW_LINE> <DEDENT> self._location = self.location_root + '/%s' % time.strftime('%Y-%m-%d %H.%M.%S') <NEW_LINE> logger.info("Logging to %s", self._location) <NEW_LINE> os.makedirs(self._location, exist_ok=True) <NEW_LINE> <DEDENT> return self._location <NEW_LINE> <DEDENT> def _run(self): <NEW_LINE> <INDENT> last = time.time() <NEW_LINE> logger.info("Storage thread started") <NEW_LINE> try: <NEW_LINE> <INDENT> while True: <NEW_LINE> <INDENT> with self.lock: <NEW_LINE> <INDENT> now = time.time() <NEW_LINE> while (not self.has_image) or (now - last) < self.capture_period: <NEW_LINE> <INDENT> self.lock.wait() <NEW_LINE> now = time.time() <NEW_LINE> <DEDENT> self.out2, self.out1 = self.out1, self.out2 <NEW_LINE> self.has_image = False <NEW_LINE> <DEDENT> fname = '%s/%.2f.%s' % (self.location, now, self.image_format) <NEW_LINE> cv2.imwrite(fname, self.out2) <NEW_LINE> last = now <NEW_LINE> <DEDENT> <DEDENT> except IOError as e: <NEW_LINE> <INDENT> logger.error("Error logging images: %s", e) <NEW_LINE> <DEDENT> logger.warn("Storage thread exited") <NEW_LINE> self.active = False
Creates a thread that periodically writes images to a specified directory. Useful for looking at images after a match has completed. The default location is ``/media/sda1/camera``. The folder ``/media/sda1`` is the default location that USB drives inserted into the RoboRIO are mounted at. The USB drive must have a directory in it named ``camera``. .. note:: It is recommended to only write images when something useful (such as targeting) is happening, otherwise you'll end up with a lot of images written to disk that you probably aren't interested in. Intended usage is:: self.image_writer = ImageWriter() .. while True: img = .. if self.logging_enabled: self.image_writer.setImage(img)
6259906a7d43ff2487428002
class _erfs(Function): <NEW_LINE> <INDENT> def _eval_aseries(self, n, args0, x, logx): <NEW_LINE> <INDENT> point = args0[0] <NEW_LINE> if point is S.Infinity: <NEW_LINE> <INDENT> z = self.args[0] <NEW_LINE> l = [ 1/sqrt(S.Pi) * C.factorial(2*k)*(-S( 4))**(-k)/C.factorial(k) * (1/z)**(2*k + 1) for k in xrange(0, n) ] <NEW_LINE> o = C.Order(1/z**(2*n + 1), x) <NEW_LINE> return (Add(*l))._eval_nseries(x, n, logx) + o <NEW_LINE> <DEDENT> t = point.extract_multiplicatively(S.ImaginaryUnit) <NEW_LINE> if t is S.Infinity: <NEW_LINE> <INDENT> z = self.args[0] <NEW_LINE> l = [ 1/sqrt(S.Pi) * C.factorial(2*k)*(-S( 4))**(-k)/C.factorial(k) * (1/z)**(2*k + 1) for k in xrange(0, n) ] <NEW_LINE> o = C.Order(1/z**(2*n + 1), x) <NEW_LINE> return (Add(*l))._eval_nseries(x, n, logx) + o <NEW_LINE> <DEDENT> return super(_erfs, self)._eval_aseries(n, args0, x, logx) <NEW_LINE> <DEDENT> def fdiff(self, argindex=1): <NEW_LINE> <INDENT> if argindex == 1: <NEW_LINE> <INDENT> z = self.args[0] <NEW_LINE> return -2/sqrt(S.Pi) + 2*z*_erfs(z) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ArgumentIndexError(self, argindex) <NEW_LINE> <DEDENT> <DEDENT> def _eval_rewrite_as_intractable(self, z): <NEW_LINE> <INDENT> return (S.One - erf(z))*C.exp(z**2)
Helper function to make the `\mathrm{erf}(z)` function tractable for the Gruntz algorithm.
6259906a56b00c62f0fb40b2
class AreaInput(graphene.InputObjectType): <NEW_LINE> <INDENT> area_id = graphene.Int(required=False) <NEW_LINE> name = graphene.String(required=False) <NEW_LINE> description = graphene.String(required=False) <NEW_LINE> created = graphene.String(required=False) <NEW_LINE> updated = graphene.String(required=False)
Area Input
6259906ad486a94d0ba2d7a1
class TestDeployVMVolumeCreationFailure(cloudstackTestCase): <NEW_LINE> <INDENT> def setUp(self): <NEW_LINE> <INDENT> self.testdata = self.testClient.getParsedTestDataConfig() <NEW_LINE> self.apiclient = self.testClient.getApiClient() <NEW_LINE> self.domain = get_domain(self.apiclient) <NEW_LINE> self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests()) <NEW_LINE> self.testdata["mode"] = self.zone.networktype <NEW_LINE> self.template = get_template(self.apiclient, self.zone.id, self.testdata["ostype"]) <NEW_LINE> self.account = Account.create( self.apiclient, self.testdata["account"], domainid=self.domain.id ) <NEW_LINE> self.service_offering = ServiceOffering.create( self.apiclient, self.testdata["service_offerings"]["small"] ) <NEW_LINE> self.virtual_machine = VirtualMachine.create( self.apiclient, self.testdata["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, templateid=self.template.id) <NEW_LINE> self.mock_volume_failure = SimulatorMock.create( apiclient=self.apiclient, command="CopyCommand", count=6) <NEW_LINE> self.cleanup = [ self.service_offering, self.account, self.mock_volume_failure ] <NEW_LINE> <DEDENT> @attr(tags = ['selfservice']) <NEW_LINE> def test_deploy_vm_volume_creation_failure(self): <NEW_LINE> <INDENT> self.virtual_machine = None <NEW_LINE> with self.assertRaises(Exception): <NEW_LINE> <INDENT> self.virtual_machine = VirtualMachine.create( self.apiclient, self.testdata["virtual_machine2"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, templateid=self.template.id) <NEW_LINE> <DEDENT> self.mock_volume_failure = self.mock_volume_failure.query(self.apiclient) <NEW_LINE> self.assertEqual( self.mock_volume_failure.count, 2, msg="Volume failure mock not executed") <NEW_LINE> self.virtual_machine = VirtualMachine.create( self.apiclient, self.testdata["virtual_machine3"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, templateid=self.template.id) <NEW_LINE> list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) <NEW_LINE> self.assertTrue(isinstance(list_vms, list) and len(list_vms) > 0, msg="List VM response empty") <NEW_LINE> vm = list_vms[0] <NEW_LINE> self.assertEqual( vm.id, self.virtual_machine.id, "VM ids do not match") <NEW_LINE> self.assertEqual( vm.name, self.virtual_machine.name, "VM names do not match") <NEW_LINE> self.assertEqual( vm.state, "Running", msg="VM is not in Running state") <NEW_LINE> self.mock_volume_failure = self.mock_volume_failure.query(self.apiclient) <NEW_LINE> self.assertEqual( self.mock_volume_failure.count, 0, msg="Volume failure mock not executed") <NEW_LINE> <DEDENT> def tearDown(self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> cleanup_resources(self.apiclient, self.cleanup) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> self.debug("Warning! Exception in tearDown: %s" % e)
Test VM deploy into user account with volume creation failure
6259906a435de62698e9d5ee
class AdminOnlySessionMiddleware(SessionMiddleware): <NEW_LINE> <INDENT> def process_request(self, request): <NEW_LINE> <INDENT> if request.path.startswith(reverse('admin:index')): <NEW_LINE> <INDENT> super(AdminOnlySessionMiddleware, self).process_request(request) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> <DEDENT> def process_response(self, request, response): <NEW_LINE> <INDENT> if request.path.startswith(reverse('admin:index')): <NEW_LINE> <INDENT> return super(AdminOnlySessionMiddleware, self).process_response(request, response) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return response
Only do the session stuff for admin urls. The frontend relies on auth tokens.
6259906a7b25080760ed88d3
class StorageRecord(dict): <NEW_LINE> <INDENT> eid_type = None <NEW_LINE> def __init__(self, storage, eid, element): <NEW_LINE> <INDENT> super().__init__(element) <NEW_LINE> self.storage = storage <NEW_LINE> self.eid = eid <NEW_LINE> <DEDENT> def __hash__(self): <NEW_LINE> <INDENT> return hash(self.eid)
A record in the storage container's database. Attributes: eid_type: Element identifier type. storage: Storage container whose database contains this record. eid: Element identifier value.
6259906a1b99ca4002290127
class Route(Document): <NEW_LINE> <INDENT> iata = StringField(required=True) <NEW_LINE> origin = ReferenceField(Airport) <NEW_LINE> destination = ReferenceField(Airport) <NEW_LINE> aircraft = StringField()
Route document structure
6259906abe8e80087fbc086f
class PercentScore(Score): <NEW_LINE> <INDENT> _description = ('100.0 is considered perfect agreement between the ' 'observation and the prediction. 0.0 is the worst possible ' 'agreement') <NEW_LINE> def _check_score(self, score): <NEW_LINE> <INDENT> if not (0.0 <= score <= 100.0): <NEW_LINE> <INDENT> raise errors.InvalidScoreError(("Score of %f must be in " "range 0.0-100.0" % score)) <NEW_LINE> <DEDENT> <DEDENT> @property <NEW_LINE> def sort_key(self): <NEW_LINE> <INDENT> return float(self.score)/100 <NEW_LINE> <DEDENT> def __str__(self): <NEW_LINE> <INDENT> return '%.1f%%' % self.score
A percent score. A float in the range [0,0,100.0] where higher is better.
6259906a44b2445a339b7551
class Merge(ElemWise): <NEW_LINE> <INDENT> __slots__ = '_child', 'children' <NEW_LINE> @property <NEW_LINE> def schema(self): <NEW_LINE> <INDENT> return schema_concat(self.children) <NEW_LINE> <DEDENT> @property <NEW_LINE> def fields(self): <NEW_LINE> <INDENT> return list(concat(child.fields for child in self.children)) <NEW_LINE> <DEDENT> def _subterms(self): <NEW_LINE> <INDENT> yield self <NEW_LINE> for i in self.children: <NEW_LINE> <INDENT> for node in i._subterms(): <NEW_LINE> <INDENT> yield node <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> def _get_field(self, key): <NEW_LINE> <INDENT> for child in self.children: <NEW_LINE> <INDENT> if key in child.fields: <NEW_LINE> <INDENT> if isscalar(child.dshape.measure): <NEW_LINE> <INDENT> return child <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return child[key] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> def _project(self, key): <NEW_LINE> <INDENT> if not isinstance(key, (tuple, list)): <NEW_LINE> <INDENT> raise TypeError("Expected tuple or list, got %s" % key) <NEW_LINE> <DEDENT> return merge(*[self[c] for c in key]) <NEW_LINE> <DEDENT> def _leaves(self): <NEW_LINE> <INDENT> return list(unique(concat(i._leaves() for i in self.children)))
Merge many fields together Examples -------- >>> accounts = Symbol('accounts', 'var * {name: string, x: int, y: real}') >>> merge(accounts.name, z=accounts.x + accounts.y).fields ['name', 'z']
6259906af548e778e596cd6f
class CreatureGenerator(object): <NEW_LINE> <INDENT> logged = Logged() <NEW_LINE> @logged <NEW_LINE> def __init__(self, configuration, model, action_factory, rng): <NEW_LINE> <INDENT> super(CreatureGenerator, self).__init__() <NEW_LINE> self.configuration = configuration <NEW_LINE> self.model = model <NEW_LINE> self.action_factory = action_factory <NEW_LINE> self.rng = rng <NEW_LINE> <DEDENT> @logged <NEW_LINE> def generate_creature(self, name): <NEW_LINE> <INDENT> config = self.__get_creature_config(name) <NEW_LINE> new_creature = Character(self.model, self.action_factory, EffectsCollection(), self.rng) <NEW_LINE> new_creature.name = config.name <NEW_LINE> new_creature.body = config.body <NEW_LINE> new_creature.finesse = config.finesse <NEW_LINE> new_creature.mind = config.mind <NEW_LINE> new_creature.hit_points = config.hp <NEW_LINE> new_creature.speed = config.speed <NEW_LINE> new_creature.icon = config.icons <NEW_LINE> new_creature.attack = config.attack <NEW_LINE> for spec in config.effect_handles: <NEW_LINE> <INDENT> new_handle = EffectHandle(trigger = spec.trigger, effect = spec.effect, parameters = spec.parameters, charges = spec.charges) <NEW_LINE> new_creature.add_effect_handle(new_handle) <NEW_LINE> <DEDENT> if not config.ai == None: <NEW_LINE> <INDENT> new_creature.artificial_intelligence = config.ai(new_creature) <NEW_LINE> <DEDENT> return new_creature <NEW_LINE> <DEDENT> @logged <NEW_LINE> def __get_creature_config(self, name): <NEW_LINE> <INDENT> return self.configuration.get_by_name(name)
Class used to generate creatures
6259906a66673b3332c31be1
class TLSCheckError(Exception): <NEW_LINE> <INDENT> pass
Custom TLS error to catch changed thimbprints numbers.
6259906acb5e8a47e493cd75
class BaseModel: <NEW_LINE> <INDENT> def __init__(self, *args, **kwargs): <NEW_LINE> <INDENT> if len(kwargs) > 0: <NEW_LINE> <INDENT> for key in kwargs.keys(): <NEW_LINE> <INDENT> if key == '__class__': <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> elif key == 'created_at' or key == 'updated_at': <NEW_LINE> <INDENT> setattr(self, key, datetime.strptime(kwargs[key], '%Y-%m-%dT%H:%M:%S.%f')) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> setattr(self, key, kwargs[key]) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> self.id = str(uuid.uuid4()) <NEW_LINE> self.created_at = datetime.now() <NEW_LINE> self.updated_at = datetime.now() <NEW_LINE> models.storage.new(self) <NEW_LINE> <DEDENT> <DEDENT> def __str__(self): <NEW_LINE> <INDENT> return ("[{}] ({}) {}".format(self.__class__.__name__, self.id, self.__dict__)) <NEW_LINE> <DEDENT> def save(self): <NEW_LINE> <INDENT> self.updated_at = datetime.now() <NEW_LINE> models.storage.save() <NEW_LINE> <DEDENT> def to_dict(self): <NEW_LINE> <INDENT> new_dict = self.__dict__.copy() <NEW_LINE> new_dict["__class__"] = self.__class__.__name__ <NEW_LINE> new_dict["created_at"] = self.created_at.isoformat() <NEW_LINE> new_dict["updated_at"] = self.updated_at.isoformat() <NEW_LINE> return new_dict
Class to manage the database
6259906ad6c5a102081e390c
class AsyncMirrorSyncCompletionDetailTest(unittest.TestCase): <NEW_LINE> <INDENT> def test_async_mirror_sync_completion_detail(self): <NEW_LINE> <INDENT> async_mirror_sync_completion_detail_obj = AsyncMirrorSyncCompletionDetail() <NEW_LINE> self.assertNotEqual(async_mirror_sync_completion_detail_obj, None)
NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually.
6259906a21bff66bcd72444a
class ConstantOp(Op): <NEW_LINE> <INDENT> def __init__(self, value, name="Constant"): <NEW_LINE> <INDENT> super(ConstantOp, self).__init__(name) <NEW_LINE> self._value = value <NEW_LINE> self._graph = graph.get_default_graph() <NEW_LINE> self._graph.add_to_graph(self) <NEW_LINE> <DEDENT> def get_value(self): <NEW_LINE> <INDENT> return self._value <NEW_LINE> <DEDENT> def forward(self): <NEW_LINE> <INDENT> return self._value <NEW_LINE> <DEDENT> def grad(self, partial_derivative_opname=None): <NEW_LINE> <INDENT> return 0
The constant operation which contains one initialized value.
6259906a460517430c432c47
class MSEMasterFile: <NEW_LINE> <INDENT> def __init__(self, encoding): <NEW_LINE> <INDENT> self.encoding = encoding <NEW_LINE> self.reconds_count = 0 <NEW_LINE> self.file_handle = None <NEW_LINE> <DEDENT> def load(self): <NEW_LINE> <INDENT> if os.path.isfile('EMASTER'): <NEW_LINE> <INDENT> self.file_handle = open('EMASTER', 'rb') <NEW_LINE> self.reconds_count = struct.unpack("H", self.file_handle.read(2))[0] <NEW_LINE> self.last_file = struct.unpack("H", self.file_handle.read(2))[0] <NEW_LINE> <DEDENT> return <NEW_LINE> <DEDENT> def close(self): <NEW_LINE> <INDENT> if self.file_handle is not None: <NEW_LINE> <INDENT> self.file_handle.close() <NEW_LINE> self.file_handle = None <NEW_LINE> <DEDENT> <DEDENT> def load_symbol(self, i): <NEW_LINE> <INDENT> symbol = Stock() <NEW_LINE> self.file_handle.seek( (i+1)*192) <NEW_LINE> self.file_handle.seek(2, os.SEEK_CUR) <NEW_LINE> symbol.file_number = struct.unpack("B", self.file_handle.read(1))[0] <NEW_LINE> if symbol.file_number == 0: <NEW_LINE> <INDENT> return symbol <NEW_LINE> <DEDENT> symbol.filename = 'F%d' % symbol.file_number <NEW_LINE> symbol.datafile_ext = '.DAT' <NEW_LINE> self.file_handle.seek(3, os.SEEK_CUR) <NEW_LINE> symbol.fields = struct.unpack("B", self.file_handle.read(1))[0] <NEW_LINE> self.file_handle.seek(4, os.SEEK_CUR) <NEW_LINE> name = self.file_handle.read(14) <NEW_LINE> symbol.stock_symbol = paddedString(name, 'ascii') <NEW_LINE> self.file_handle.seek(7, os.SEEK_CUR) <NEW_LINE> name = self.file_handle.read(16) <NEW_LINE> symbol.stock_name = paddedString(name, self.encoding) <NEW_LINE> self.file_handle.seek(12, os.SEEK_CUR) <NEW_LINE> symbol.time_frame = struct.unpack("c", self.file_handle.read(1))[0].decode('ascii') <NEW_LINE> self.file_handle.seek(3, os.SEEK_CUR) <NEW_LINE> symbol.first_date = float2date(fmsbin2ieee(self.file_handle.read(4))) <NEW_LINE> self.file_handle.seek(4, os.SEEK_CUR) <NEW_LINE> symbol.last_date = float2date(fmsbin2ieee(self.file_handle.read(4))) <NEW_LINE> return symbol
Metastock extended index file
6259906a009cb60464d02d1d
class PinSage(nn.Module): <NEW_LINE> <INDENT> def __init__(self, num_nodes, feature_sizes, T, restart_prob, max_nodes, use_feature=False, G=None): <NEW_LINE> <INDENT> super(PinSage, self).__init__() <NEW_LINE> self.T = T <NEW_LINE> self.restart_prob = restart_prob <NEW_LINE> self.max_nodes = max_nodes <NEW_LINE> self.in_features = feature_sizes[0] <NEW_LINE> self.out_features = feature_sizes[-1] <NEW_LINE> self.n_layers = len(feature_sizes) - 1 <NEW_LINE> self.convs = nn.ModuleList() <NEW_LINE> for i in range(self.n_layers): <NEW_LINE> <INDENT> self.convs.append(PinSageConv( feature_sizes[i], feature_sizes[i+1], feature_sizes[i+1])) <NEW_LINE> <DEDENT> self.h = create_embeddings(num_nodes, self.in_features) <NEW_LINE> self.use_feature = use_feature <NEW_LINE> if use_feature: <NEW_LINE> <INDENT> self.emb = nn.ModuleDict() <NEW_LINE> self.proj = nn.ModuleDict() <NEW_LINE> for key, scheme in G.node_attr_schemes().items(): <NEW_LINE> <INDENT> if scheme.dtype == torch.int64: <NEW_LINE> <INDENT> self.emb[key] = nn.Embedding( G.ndata[key].max().item() + 1, self.in_features, padding_idx=0) <NEW_LINE> <DEDENT> elif scheme.dtype == torch.float32: <NEW_LINE> <INDENT> self.proj[key] = nn.Sequential( nn.Linear(scheme.shape[0], self.in_features), nn.LeakyReLU(), ) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> def forward(self, G, nodeset): <NEW_LINE> <INDENT> if self.use_feature: <NEW_LINE> <INDENT> h = mix_embeddings(self.h, G.ndata, self.emb, self.proj) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> h = self.h <NEW_LINE> <DEDENT> nodeflow = randomwalk.random_walk_nodeflow( G, nodeset, self.n_layers, self.restart_prob, self.max_nodes, self.T) <NEW_LINE> for i, (nodeset, nb_weights, nb_nodes) in enumerate(nodeflow): <NEW_LINE> <INDENT> new_embeddings = self.convs[i](h, nodeset, nb_nodes, nb_weights) <NEW_LINE> h = put_embeddings(h, nodeset, new_embeddings) <NEW_LINE> <DEDENT> h_new = get_embeddings(h, nodeset) <NEW_LINE> return h_new
Completes a multi-layer PinSage convolution G: DGLGraph feature_sizes: the dimensionality of input/hidden/output features T: number of neighbors we pick for each node restart_prob: restart probability max_nodes: max number of nodes visited for each seed
6259906a796e427e5384ff5b
class GeminateConsonants(SuggestionStrategy): <NEW_LINE> <INDENT> def suggest(self, word): <NEW_LINE> <INDENT> start = 1 <NEW_LINE> for i in range(start, len(word)-1): <NEW_LINE> <INDENT> candidate = list(word) <NEW_LINE> prev = candidate[i-1] <NEW_LINE> char = candidate[i] <NEW_LINE> next = candidate[i+1] <NEW_LINE> if prev == '\u0D4D' or next == '\u0D4D': <NEW_LINE> <INDENT> i = i+1 <NEW_LINE> continue <NEW_LINE> <DEDENT> if self.isConsonant(char): <NEW_LINE> <INDENT> candidate[i] = char + '\u0D4D' + char <NEW_LINE> i = i+1 <NEW_LINE> yield ''.join(candidate) <NEW_LINE> <DEDENT> <DEDENT> start = 1 <NEW_LINE> for i in range(start, len(word)-2): <NEW_LINE> <INDENT> candidate = list(word) <NEW_LINE> char = candidate[i] <NEW_LINE> next = candidate[i+1] <NEW_LINE> then = candidate[i+2] <NEW_LINE> if char == then: <NEW_LINE> <INDENT> if self.isConsonant(char) and next == '\u0D4D' and self.isConsonant(then): <NEW_LINE> <INDENT> candidate[i] = char <NEW_LINE> candidate[i+1] = '' <NEW_LINE> candidate[i+2] = '' <NEW_LINE> i = i+2 <NEW_LINE> yield ''.join(candidate)
Consonant to geminated consonant, if the consonant does not has adjacent virama പച്ചതത്ത -> പച്ചത്തത്ത
6259906a56ac1b37e63038d4
class TestSelectFindStart(unittest.TestCase): <NEW_LINE> <INDENT> def setUp(self): <NEW_LINE> <INDENT> rng = np.random.default_rng(6183) <NEW_LINE> cols = ['Shift', 'test', 'N_0', r'\sum H_0j N_j', 'alt', 'Proj. Energy'] <NEW_LINE> means = [-0.1, 10.0, 11.0, -9.0, 20.0, -0.2] <NEW_LINE> sine_periods = list(range(1, len(cols)+1)) <NEW_LINE> noise_facs = [0.1*mean for mean in means] <NEW_LINE> self._df_mock = create_qmc_frame(rng, cols, means, sine_periods, noise_facs, frac_not_convergeds=[0.2]*len(cols), num_mc_its=300) <NEW_LINE> iterations = pd.DataFrame(list(range(1, 1501, 5)), columns=['iterations']) <NEW_LINE> self._df_mock = pd.concat([iterations, self._df_mock], axis=1) <NEW_LINE> <DEDENT> def test_select_blocking(self): <NEW_LINE> <INDENT> start_it = find_startit.select_find_start('blocking')( self._df_mock, self._df_mock['iterations'].iloc[-1], 'iterations', ['Shift', 'test', 'N_0', r'\sum H_0j N_j', 'alt'], None) <NEW_LINE> try: <NEW_LINE> <INDENT> self.assertEqual(start_it, 491) <NEW_LINE> <DEDENT> except AssertionError: <NEW_LINE> <INDENT> warnings.warn("Starting iteration " + str(start_it) + " does not match expected value " + str(491) + ". This is" " likely due to a rounding error and should not cause further issues.") <NEW_LINE> return 0 <NEW_LINE> <DEDENT> <DEDENT> def test_select_mser(self): <NEW_LINE> <INDENT> start_it = find_startit.select_find_start('mser')( self._df_mock, self._df_mock['iterations'].iloc[-1], 'iterations', None, 'alt') <NEW_LINE> self.assertEqual(start_it, 376) <NEW_LINE> <DEDENT> def test_select_invalid(self): <NEW_LINE> <INDENT> with self.assertRaisesRegex(ValueError, "The find start iteration " "selected in 'start_its', 'testi', is not " "available!"): <NEW_LINE> <INDENT> _ = find_startit.select_find_start('testi')( self._df_mock, self._df_mock['iterations'].iloc[-1], 'iterations', None, 'alt')
Test `select_find_start()`.
6259906a4e4d562566373beb
class HelpDemo(object): <NEW_LINE> <INDENT> pass
This is a demo for using help() on a class
6259906a8a43f66fc4bf3977
class VizForm(forms.ModelForm): <NEW_LINE> <INDENT> def __init__(self, *args, **kwargs): <NEW_LINE> <INDENT> super(VizForm, self).__init__(*args, **kwargs) <NEW_LINE> self.fields['title'].required = False <NEW_LINE> <DEDENT> class Meta: <NEW_LINE> <INDENT> model = SCOTUSMap <NEW_LINE> fields = [ 'cluster_start', 'cluster_end', 'title', 'notes', ] <NEW_LINE> widgets = { 'cluster_start': forms.HiddenInput(), 'cluster_end': forms.HiddenInput(), 'title': forms.TextInput(attrs={'class': 'form-control'}), 'notes': forms.Textarea(attrs={'class': 'form-control'}), }
NB: The VizEditForm subclasses this!
6259906a3eb6a72ae038be45
class Artifact: <NEW_LINE> <INDENT> def __init__(self, name: str): <NEW_LINE> <INDENT> self.name = name <NEW_LINE> self.basedir = ARTIFACT_STORE_DIR / name <NEW_LINE> <DEDENT> def _retrieve(self, cmd: CmdType): <NEW_LINE> <INDENT> self.basedir.mkdir(exist_ok=True) <NEW_LINE> print_status(f'Retrieving artifact "{self.name}" ...') <NEW_LINE> logger.debug("By running: %s in %s", cmd, self.basedir) <NEW_LINE> run_cmd(cmd, output_verbosity=1, cwd=self.basedir)
Base class for external ressources
6259906a7d847024c075dbbf
class InputTermNotAtomic(ZiplineError): <NEW_LINE> <INDENT> msg = ( "Can't compute {parent} with non-atomic input {child}." )
Raised when a non-atomic term is specified as an input to an FFC term with a lookback window.
6259906aa17c0f6771d5d79a
class BaseFilter(object): <NEW_LINE> <INDENT> search_fields = {} <NEW_LINE> @classmethod <NEW_LINE> def build_q(cls, params, request=None): <NEW_LINE> <INDENT> return build_q(cls.get_search_fields(), params, request) <NEW_LINE> <DEDENT> @classmethod <NEW_LINE> def get_search_fields(cls): <NEW_LINE> <INDENT> sfdict = {} <NEW_LINE> for klass in tuple(cls.__bases__) + (cls, ): <NEW_LINE> <INDENT> if hasattr(klass, 'search_fields'): <NEW_LINE> <INDENT> sfdict.update(klass.search_fields) <NEW_LINE> <DEDENT> <DEDENT> return sfdict
Base class providing an interface for mapping a form to a query
6259906aadb09d7d5dc0bd4f
class RoleManager(object): <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> self._roles = [] <NEW_LINE> <DEDENT> def add_role(self, role): <NEW_LINE> <INDENT> self._assert_name_not_exists(role.name) <NEW_LINE> self._roles.append(role) <NEW_LINE> <DEDENT> def create_role(self, name, parent_names=None, default_evaluator=None): <NEW_LINE> <INDENT> if parent_names is not None: <NEW_LINE> <INDENT> parents = [self.get_role(n) for n in parent_names] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> parents = None <NEW_LINE> <DEDENT> role = Role(name, parents, default_evaluator) <NEW_LINE> self.add_role(role) <NEW_LINE> return role <NEW_LINE> <DEDENT> def get_names(self): <NEW_LINE> <INDENT> return list(map(lambda x: x.name, self._roles)) <NEW_LINE> <DEDENT> def get_role(self, name): <NEW_LINE> <INDENT> roles = list(filter(lambda r: r.name == name, self._roles)) <NEW_LINE> if len(roles) != 1: <NEW_LINE> <INDENT> raise ValueError("Role '{}' does not exist".format(name)) <NEW_LINE> <DEDENT> return roles[0] <NEW_LINE> <DEDENT> def _assert_name_not_exists(self, name): <NEW_LINE> <INDENT> assert len(list(filter(lambda r: r.name == name, self._roles))) == 0
Container for roles. Each role must have unique name.
6259906abaa26c4b54d50a8c
class QLearningAgent(ReinforcementAgent): <NEW_LINE> <INDENT> def __init__(self, **args): <NEW_LINE> <INDENT> ReinforcementAgent.__init__(self, **args) <NEW_LINE> "*** YOUR CODE HERE ***" <NEW_LINE> self.qValues = util.Counter() <NEW_LINE> <DEDENT> def getQValue(self, state, action): <NEW_LINE> <INDENT> return self.qValues[(state, action)] <NEW_LINE> <DEDENT> def computeValueFromQValues(self, state): <NEW_LINE> <INDENT> if self.getLegalActions(state): <NEW_LINE> <INDENT> return max(self.getQValue(state, actions) for actions in self.getLegalActions(state)) <NEW_LINE> <DEDENT> return 0.0 <NEW_LINE> <DEDENT> def computeActionFromQValues(self, state): <NEW_LINE> <INDENT> if self.getLegalActions(state): <NEW_LINE> <INDENT> tempo = util.Counter() <NEW_LINE> for actions in self.getLegalActions(state): <NEW_LINE> <INDENT> tempo[actions] = self.getQValue(state, actions) <NEW_LINE> <DEDENT> return tempo.argMax() <NEW_LINE> <DEDENT> return None <NEW_LINE> <DEDENT> def getAction(self, state): <NEW_LINE> <INDENT> legalActions = self.getLegalActions(state) <NEW_LINE> action = None <NEW_LINE> action = None <NEW_LINE> if self.getLegalActions(state): <NEW_LINE> <INDENT> if util.flipCoin(self.epsilon): <NEW_LINE> <INDENT> Qbest= max(self.getQValue(state,action)) <NEW_LINE> sample = reward + self.gamma * Qbest <NEW_LINE> self.epsilon = self.gamma * (0.5 + (self.getQValue(state, action) - Qbest) / (0.01 + 2 * (max(self.getQValue(state, action)) - min(sel.getQValue(state,action))))) <NEW_LINE> action = random.choice(self.getLegalActions(state)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> action = self.computeActionFromQValues(state) <NEW_LINE> <DEDENT> <DEDENT> return action <NEW_LINE> <DEDENT> def update(self, state, action, nextState, reward): <NEW_LINE> <INDENT> sample = reward <NEW_LINE> if self.getLegalActions(nextState): <NEW_LINE> <INDENT> sample = self.discount * max(self.getQValue(nextState, actions) for actions in self.getLegalActions(nextState)) + reward <NEW_LINE> <DEDENT> self.qValues[(state, action)] = (1-self.alpha) * self.getQValue(state, action) + self.alpha * sample <NEW_LINE> <DEDENT> def getPolicy(self, state): <NEW_LINE> <INDENT> return self.computeActionFromQValues(state) <NEW_LINE> <DEDENT> def getValue(self, state): <NEW_LINE> <INDENT> return self.computeValueFromQValues(state)
Q-Learning Agent Functions you should fill in: - computeValueFromQValues - computeActionFromQValues - getQValue - getAction - update Instance variables you have access to - self.epsilon (exploration prob) - self.alpha (learning rate) - self.discount (discount rate) Functions you should use - self.getLegalActions(state) which returns legal actions for a state
6259906a99cbb53fe68326cb
class coreWrapper: <NEW_LINE> <INDENT> def __init__(self, confUser): <NEW_LINE> <INDENT> self.interval = confUser['interval'] <NEW_LINE> self.overlap = confUser['overlap'] <NEW_LINE> self.datadir = os.path.join(confUser['DATADIR'], 'result') <NEW_LINE> pass <NEW_LINE> <DEDENT> def assetStartWrapping(self, FilterKnowledge, dim=2, intresedCode= [107, 130, 170]): <NEW_LINE> <INDENT> filterDic = FilterKnowledge.assetFilterDic(dim, intresedCode= intresedCode, genF= False) <NEW_LINE> paramDic = genIODic(self.interval, self.overlap) <NEW_LINE> data = FilterKnowledge.arrX <NEW_LINE> parmsGenerator = self.__inputsGenerator(data, filterDic, paramDic) <NEW_LINE> pool = ThreadPool() <NEW_LINE> results = pool.map(self.__multi_run_wrapper, parmsGenerator) <NEW_LINE> pool.close() <NEW_LINE> pool.join() <NEW_LINE> <DEDENT> def __multi_run_wrapper(self, args): <NEW_LINE> <INDENT> print('Unpacking params, for epoch:',args[-1]) <NEW_LINE> return self.core_wrapper(*args) <NEW_LINE> <DEDENT> def core_wrapper(self, data, filt, interval, overlap, fn, genJson= True): <NEW_LINE> <INDENT> assert data.shape[0] == filt.shape[0] <NEW_LINE> print(data.shape, filt.shape, interval, overlap, fn) <NEW_LINE> cluster = mapper.single_linkage() <NEW_LINE> cover = mapper.cover.cube_cover_primitive(interval, overlap) <NEW_LINE> metricpar = {'metric': 'euclidean'} <NEW_LINE> mapper_output = mapper.jushacore(data, filt, cover=cover, cutoff=None, cluster=cluster, metricpar=metricpar, verbose=False) <NEW_LINE> mapper.scale_graph(mapper_output, filt, cover=cover, weighting='inverse', exponent=1, verbose=False) <NEW_LINE> if mapper_output.stopFlag: <NEW_LINE> <INDENT> print('{0} Stopped! Too many nodes or too long time'.format(fn)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print('{0} Successed! '.format(fn)) <NEW_LINE> print('type check! ',type(mapper_output)) <NEW_LINE> import pickle as pkl <NEW_LINE> with open('G.pkl', 'wb') as f: <NEW_LINE> <INDENT> pkl.dump(mapper_output, f) <NEW_LINE> <DEDENT> baseDir = self.datadir <NEW_LINE> to_d3js_graph(mapper_output, fn, baseDir, genJson) <NEW_LINE> print('Core ran finished! with: {0}'.format(fn)) <NEW_LINE> <DEDENT> <DEDENT> def __inputsGenerator(self, data, filterDic, paramDic): <NEW_LINE> <INDENT> assert isinstance(data, np.ndarray) <NEW_LINE> assert not np.isnan(data).sum() <NEW_LINE> ans = [] <NEW_LINE> for kp, vp in paramDic.items(): <NEW_LINE> <INDENT> for kf, vf in filterDic.items(): <NEW_LINE> <INDENT> yield (data, vf, vp[0], vp[1], kp+ '_'+ kf) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> def __coreInputsChecker(*args): <NEW_LINE> <INDENT> pass
6259906a7c178a314d78e7de
class MultipleRegistrationException(Exception): <NEW_LINE> <INDENT> def __init__(self, event_id, emitter): <NEW_LINE> <INDENT> self.event_id = event_id <NEW_LINE> self.emitter = emitter <NEW_LINE> <DEDENT> def __str__(self): <NEW_LINE> <INDENT> return "MultipleRegistrationException: %s on %s" % (self.event_id, self.emitter)
Exception thrown if the event was already registered
6259906ab7558d5895464b23
class DumbAlgo(BaseAlgorithm): <NEW_LINE> <INDENT> def __init__(self, space, value=5, scoring=0, judgement=None, suspend=False, done=False, **nested_algo): <NEW_LINE> <INDENT> self._times_called_suspend = 0 <NEW_LINE> self._times_called_is_done = 0 <NEW_LINE> self._num = None <NEW_LINE> self._points = None <NEW_LINE> self._results = None <NEW_LINE> self._score_point = None <NEW_LINE> self._judge_point = None <NEW_LINE> self._measurements = None <NEW_LINE> super(DumbAlgo, self).__init__(space, value=value, scoring=scoring, judgement=judgement, suspend=suspend, done=done, **nested_algo) <NEW_LINE> <DEDENT> def suggest(self, num=1): <NEW_LINE> <INDENT> self._num = num <NEW_LINE> return [self.value] * num <NEW_LINE> <DEDENT> def observe(self, points, results): <NEW_LINE> <INDENT> self._points = points <NEW_LINE> self._results = results <NEW_LINE> <DEDENT> def score(self, point): <NEW_LINE> <INDENT> self._score_point = point <NEW_LINE> return self.scoring <NEW_LINE> <DEDENT> def judge(self, point, measurements): <NEW_LINE> <INDENT> self._judge_point = point <NEW_LINE> self._measurements = measurements <NEW_LINE> return self.judgement <NEW_LINE> <DEDENT> @property <NEW_LINE> def should_suspend(self): <NEW_LINE> <INDENT> self._times_called_suspend += 1 <NEW_LINE> return self.suspend <NEW_LINE> <DEDENT> @property <NEW_LINE> def is_done(self): <NEW_LINE> <INDENT> self._times_called_is_done += 1 <NEW_LINE> return self.done
Stab class for `BaseAlgorithm`.
6259906a3d592f4c4edbc6c5
class pp_children_as_list (object): <NEW_LINE> <INDENT> def __init__(self, val): <NEW_LINE> <INDENT> self.val = val <NEW_LINE> <DEDENT> def to_string(self): <NEW_LINE> <INDENT> return 'children_as_list_val' <NEW_LINE> <DEDENT> def children (self): <NEW_LINE> <INDENT> return [('one', 1)]
Throw error from display_hint
6259906a66673b3332c31be3
class LogoutView(View): <NEW_LINE> <INDENT> def get(self, request): <NEW_LINE> <INDENT> logout(request) <NEW_LINE> return redirect(reverse('goods:index'))
登出
6259906acb5e8a47e493cd76
class Range(Number): <NEW_LINE> <INDENT> paramType = 'range'
Define numeric parameter with valid values in a given range. >>> @argument('value', types.Range, min=10, max=100, step=10) ... def func(value): ... pass
6259906a7d847024c075dbc0
class Suite(Pmf): <NEW_LINE> <INDENT> def Update(self, data): <NEW_LINE> <INDENT> for hypo in self.Values(): <NEW_LINE> <INDENT> like = self.Likelihood(data, hypo) <NEW_LINE> self.Mult(hypo, like) <NEW_LINE> <DEDENT> return self.Normalize() <NEW_LINE> <DEDENT> def LogUpdate(self, data): <NEW_LINE> <INDENT> for hypo in self.Values(): <NEW_LINE> <INDENT> like = self.LogLikelihood(data, hypo) <NEW_LINE> self.Incr(hypo, like) <NEW_LINE> <DEDENT> <DEDENT> def UpdateSet(self, dataset): <NEW_LINE> <INDENT> for data in dataset: <NEW_LINE> <INDENT> for hypo in self.Values(): <NEW_LINE> <INDENT> like = self.Likelihood(data, hypo) <NEW_LINE> self.Mult(hypo, like) <NEW_LINE> <DEDENT> <DEDENT> return self.Normalize() <NEW_LINE> <DEDENT> def LogUpdateSet(self, dataset): <NEW_LINE> <INDENT> for data in dataset: <NEW_LINE> <INDENT> self.LogUpdate(data) <NEW_LINE> <DEDENT> <DEDENT> def Likelihood(self, data, hypo): <NEW_LINE> <INDENT> raise UnimplementedMethodException() <NEW_LINE> <DEDENT> def LogLikelihood(self, data, hypo): <NEW_LINE> <INDENT> raise UnimplementedMethodException() <NEW_LINE> <DEDENT> def Print(self): <NEW_LINE> <INDENT> for hypo, prob in sorted(self.Items()): <NEW_LINE> <INDENT> print(hypo, prob) <NEW_LINE> <DEDENT> <DEDENT> def MakeOdds(self): <NEW_LINE> <INDENT> for hypo, prob in self.Items(): <NEW_LINE> <INDENT> if prob: <NEW_LINE> <INDENT> self.Set(hypo, Odds(prob)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.Remove(hypo) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> def MakeProbs(self): <NEW_LINE> <INDENT> for hypo, odds in self.Items(): <NEW_LINE> <INDENT> self.Set(hypo, Probability(odds))
Represents a suite of hypotheses and their probabilities.
6259906a21bff66bcd72444c
class LibError(PackageError): <NEW_LINE> <INDENT> pass
Raise when a library or executable is pulling in the wrong version of a library.
6259906a796e427e5384ff5d
class Movie(): <NEW_LINE> <INDENT> def __init__(self, full_path): <NEW_LINE> <INDENT> file_info = guessit(os.path.basename(full_path)) <NEW_LINE> if 'title' not in file_info: <NEW_LINE> <INDENT> raise ValueError('File name doesn\'t contain enough information \'{0}\''.format( os.path.basename(full_path))) <NEW_LINE> <DEDENT> self.full_path = full_path <NEW_LINE> self.title = file_info['title'] <NEW_LINE> try: <NEW_LINE> <INDENT> self.year = file_info['year'] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> self.year = None <NEW_LINE> <DEDENT> <DEDENT> @property <NEW_LINE> def file_name(self): <NEW_LINE> <INDENT> return os.path.basename(self.full_path) <NEW_LINE> <DEDENT> def get_sortable_info(self): <NEW_LINE> <INDENT> if self.year is not None: <NEW_LINE> <INDENT> return (self.title, self.title) <NEW_LINE> <DEDENT> return self.title <NEW_LINE> <DEDENT> def get_new_file_name(self, movie): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return '{0} ({1}){2}'.format(movie['title'], movie['year'], os.path.splitext(self.full_path)[-1]) <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> return '{0}{1}'.format(movie['title'], os.path.splitext(self.full_path)[-1]) <NEW_LINE> <DEDENT> <DEDENT> def __str__(self): <NEW_LINE> <INDENT> return self.get_file_name
Allow the manipulation of data representing a movie.
6259906a442bda511e95d94b
class Object(object): <NEW_LINE> <INDENT> BACKGROUND = 0 <NEW_LINE> PERSON = 1 <NEW_LINE> CAT = 2 <NEW_LINE> DOG = 3 <NEW_LINE> _LABELS = { BACKGROUND: 'BACKGROUND', PERSON: 'PERSON', CAT: 'CAT', DOG: 'DOG', } <NEW_LINE> def __init__(self, bounding_box, kind, score): <NEW_LINE> <INDENT> self.bounding_box = bounding_box <NEW_LINE> self.kind = kind <NEW_LINE> self.score = score <NEW_LINE> <DEDENT> def __str__(self): <NEW_LINE> <INDENT> return 'kind=%s(%d), score=%f, bbox=%s' % (self._LABELS[self.kind], self.kind, self.score, str(self.bounding_box))
Object detection result.
6259906af7d966606f7494ae
class ContentEntryParentTest(unittest.TestCase): <NEW_LINE> <INDENT> def setUp(self): <NEW_LINE> <INDENT> self.content = atom.data.Content() <NEW_LINE> <DEDENT> def testConvertToAndFromElementTree(self): <NEW_LINE> <INDENT> self.content.text = 'my content' <NEW_LINE> self.content.type = 'text' <NEW_LINE> self.content.src = 'my source' <NEW_LINE> self.assertTrue(self.content.text == 'my content') <NEW_LINE> self.assertTrue(self.content.type == 'text') <NEW_LINE> self.assertTrue(self.content.src == 'my source') <NEW_LINE> new_content = atom.core.parse(self.content.ToString(), atom.data.Content) <NEW_LINE> self.assertTrue(self.content.text == new_content.text) <NEW_LINE> self.assertTrue(self.content.type == new_content.type) <NEW_LINE> self.assertTrue(self.content.src == new_content.src) <NEW_LINE> <DEDENT> def testContentConstructorSetsSrc(self): <NEW_LINE> <INDENT> new_content = atom.data.Content(src='abcd') <NEW_LINE> self.assertEqual(new_content.src, 'abcd') <NEW_LINE> <DEDENT> def testContentFromString(self): <NEW_LINE> <INDENT> content_xml = '<content xmlns="http://www.w3.org/2005/Atom" type="test"/>' <NEW_LINE> content = atom.core.parse(content_xml, atom.data.Content) <NEW_LINE> self.assertTrue(isinstance(content, atom.data.Content)) <NEW_LINE> self.assertEqual(content.type, 'test')
The test accesses hidden methods in atom.FeedEntryParent
6259906acc0a2c111447c6c3
class AWSConnection(object): <NEW_LINE> <INDENT> def __init__(self, ansible_obj, resources, boto3=True): <NEW_LINE> <INDENT> ansible_obj.deprecate("The 'ansible.module_utils.aws.batch.AWSConnection' class is deprecated please use 'AnsibleAWSModule.client()'", version='2.14') <NEW_LINE> self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=boto3) <NEW_LINE> self.resource_client = dict() <NEW_LINE> if not resources: <NEW_LINE> <INDENT> resources = ['batch'] <NEW_LINE> <DEDENT> resources.append('iam') <NEW_LINE> for resource in resources: <NEW_LINE> <INDENT> aws_connect_kwargs.update(dict(region=self.region, endpoint=self.endpoint, conn_type='client', resource=resource )) <NEW_LINE> self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs) <NEW_LINE> <DEDENT> if not self.region: <NEW_LINE> <INDENT> self.region = self.resource_client['batch'].meta.region_name <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4] <NEW_LINE> <DEDENT> except (ClientError, ValueError, KeyError, IndexError): <NEW_LINE> <INDENT> self.account_id = '' <NEW_LINE> <DEDENT> <DEDENT> def client(self, resource='batch'): <NEW_LINE> <INDENT> return self.resource_client[resource]
Create the connection object and client objects as required.
6259906a32920d7e50bc782c
class HistoricCSVDataHandler(DataHandler): <NEW_LINE> <INDENT> def __init__(self, events, csv_dir, symbol_list): <NEW_LINE> <INDENT> self.events = events <NEW_LINE> self.csv_dir = csv_dir <NEW_LINE> self.symbol_list = symbol_list <NEW_LINE> self.symbol_data = {} <NEW_LINE> self.latest_symbol_data = {} <NEW_LINE> self.continue_backtest = True <NEW_LINE> self._open_convert_csv_files() <NEW_LINE> <DEDENT> def _open_convert_csv_files(self): <NEW_LINE> <INDENT> comb_index = None <NEW_LINE> for s in self.symbol_list: <NEW_LINE> <INDENT> self.symbol_data[s] = pd.io.parsers.read_csv( os.path.join(self.csv_dir, '%s.csv' % s), header=0, index_col=0, parse_dates=True, names=[ 'datetime', 'open', 'high', 'low', 'close', 'volume', 'adj_close' ] ).sort() <NEW_LINE> if comb_index is None: <NEW_LINE> <INDENT> comb_index = self.symbol_data[s].index <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> comb_index.union(self.symbol_data[s].index) <NEW_LINE> <DEDENT> self.latest_symbol_data[s] = [] <NEW_LINE> <DEDENT> for s in self.symbol_list: <NEW_LINE> <INDENT> self.symbol_data[s] = self.symbol_data[s].reindex( index=comb_index, method='pad').iterrows() <NEW_LINE> <DEDENT> <DEDENT> def _get_new_bar(self, symbol): <NEW_LINE> <INDENT> for b in self.symbol_data[symbol]: <NEW_LINE> <INDENT> yield b
HistoricCSVDataHandler is designed o read CSV files for each requested symbol from disk and provide an interface to obtain the "latest" bar in a manner identical to a live trading interface.
6259906a5fcc89381b266d4a
class NonNegativeLinearRegression(LinearModel, RegressorMixin): <NEW_LINE> <INDENT> def __init__(self, fit_intercept=True, normalize=False, copy_X=True): <NEW_LINE> <INDENT> self.fit_intercept = fit_intercept <NEW_LINE> self.normalize = normalize <NEW_LINE> self.copy_X = copy_X <NEW_LINE> <DEDENT> def fit(self, X, y, sample_weight=None): <NEW_LINE> <INDENT> X, y = check_X_y(X, y, y_numeric=True, multi_output=False) <NEW_LINE> if sample_weight is not None and np.atleast_1d(sample_weight).ndim > 1: <NEW_LINE> <INDENT> raise ValueError("Sample weights must be 1D array or scalar") <NEW_LINE> <DEDENT> X, y, X_offset, y_offset, X_scale = self._preprocess_data( X, y, fit_intercept=self.fit_intercept, normalize=self.normalize, copy=self.copy_X, sample_weight=sample_weight) <NEW_LINE> if sample_weight is not None: <NEW_LINE> <INDENT> X, y = _rescale_data(X, y, sample_weight) <NEW_LINE> <DEDENT> self.coef_, result = nnls(X, y.squeeze()) <NEW_LINE> if np.all(self.coef_ == 0): <NEW_LINE> <INDENT> raise ConvergenceWarning("All coefficients estimated to be zero in" " the non-negative least squares fit.") <NEW_LINE> <DEDENT> self._set_intercept(X_offset, y_offset, X_scale) <NEW_LINE> self.opt_result_ = OptimizeResult(success=True, status=0, x=self.coef_, fun=result) <NEW_LINE> return self
Non-negative least squares linear regression. Parameters ---------- fit_intercept : boolean, optional whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. This parameter is ignored when `fit_intercept` is set to False. When the regressors are normalized, note that this makes the estimated coefficients more robust and almost independent of the number of samples. The same property is not valid for standardized data. However, if you wish to standardize, please use `preprocessing.StandardScaler` before calling `fit` on an estimator with `normalize=False`. copy_X : boolean, optional, default True If True, X will be copied; else, it may be overwritten. Attributes ---------- coef_ : array, shape (n_features, ) Estimated coefficients for the linear regression problem. intercept_ : array Independent term in the linear model. opt_result_ : OptimizeResult Result of non-negative least squares optimization
6259906a8a43f66fc4bf3979
class Solution: <NEW_LINE> <INDENT> def arrayPairSum(self, nums): <NEW_LINE> <INDENT> nums.sort() <NEW_LINE> n = len(nums) <NEW_LINE> result = 0 <NEW_LINE> for i in range(int(n / 2)): <NEW_LINE> <INDENT> result += nums[i * 2] <NEW_LINE> <DEDENT> return result
@param nums: an array @return: the sum of min(ai, bi) for all i from 1 to n
6259906a8e71fb1e983bd2ad
class ServiceManagerTestCase(utils.BaseTestCase): <NEW_LINE> <INDENT> def test_override_manager_method(self): <NEW_LINE> <INDENT> serv = ExtendedService() <NEW_LINE> serv.start() <NEW_LINE> self.assertEqual(serv.test_method(), 'service')
Test cases for Services.
6259906ae1aae11d1e7cf400
class DoPublicCommentCount(comments.DoCommentCount): <NEW_LINE> <INDENT> def __call__(self, parser, token): <NEW_LINE> <INDENT> bits = token.contents.split() <NEW_LINE> if len(bits) != 6: <NEW_LINE> <INDENT> raise template.TemplateSyntaxError("'%s' tag takes five arguments" % bits[0]) <NEW_LINE> <DEDENT> if bits[1] != 'for': <NEW_LINE> <INDENT> raise template.TemplateSyntaxError("first argument to '%s' tag must be 'for'" % bits[0]) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> app_name, model_name = bits[2].split('.') <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> raise template.TemplateSyntaxError("second argument to '%s tag must be in the format app_name.model_name'" % bits[0]) <NEW_LINE> <DEDENT> model = get_model(app_name, model_name) <NEW_LINE> if model is None: <NEW_LINE> <INDENT> raise template.TemplateSyntaxError("'%s' tag got invalid model '%s.%s'" % (bits[0], app_name, model_name)) <NEW_LINE> <DEDENT> content_type = ContentType.objects.get_for_model(model) <NEW_LINE> var_name, object_id = None, None <NEW_LINE> if bits[3].isdigit(): <NEW_LINE> <INDENT> object_id = bits[3] <NEW_LINE> try: <NEW_LINE> <INDENT> content_type.get_object_for_this_type(pk=object_id) <NEW_LINE> <DEDENT> except ObjectDoesNotExist: <NEW_LINE> <INDENT> raise template.TemplateSyntaxError("'%s' tag got reference to %s object with id %s, which doesn't exist" % (bits[0], content_type.name, object_id)) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> var_name = bits[3] <NEW_LINE> <DEDENT> if bits[4] != 'as': <NEW_LINE> <INDENT> raise template.TemplateSyntaxError("fourth argument to '%s' tag must be 'as'" % bits[0]) <NEW_LINE> <DEDENT> return PublicCommentCountNode(app_name, model_name, var_name, object_id, bits[5], self.free)
Retrieves the number of comments attached to a particular object and stores them in a context variable. The difference between this tag and Django's built-in comment count tags is that this tag will only count comments with ``is_public=True``. If your application uses any sort of comment moderation which sets ``is_public=False``, you'll probably want to use this tag, as it gives an accurate count of the comments which will be publicly displayed. Syntax:: {% get_public_comment_count for [app_name].[model_name] [object_id] as [varname] %} or:: {% get_public_free_comment_count for [app_name].[model_name] [object_id] as [varname] %} Example:: {% get_public_comment_count for weblog.entry entry.id as comment_count %} When called as ``get_public_comment_list``, this tag counts instances of ``Comment`` (comments which require registration). When called as ``get_public_free_comment_count``, this tag counts instances of ``FreeComment`` (comments which do not require registration).
6259906a435de62698e9d5f2
class HasStrophe(HasLinkTo): <NEW_LINE> <INDENT> def __init__(self, argument): <NEW_LINE> <INDENT> super().__init__(argument) <NEW_LINE> self._namespace = "http://www.knora.org/ontology/text-structure" <NEW_LINE> self._name = "hasStrophe"
Relating a verse poem to a strophe it contains.
6259906a45492302aabfdcbf
class Rectangle(BaseGeometry): <NEW_LINE> <INDENT> def __init__(self, width, height): <NEW_LINE> <INDENT> self.integer_validator("height", height) <NEW_LINE> self.integer_validator("width", width) <NEW_LINE> self.__height = height <NEW_LINE> self.__width = width <NEW_LINE> <DEDENT> def area(self): <NEW_LINE> <INDENT> return self.__height * self.__width <NEW_LINE> <DEDENT> def __str__(self): <NEW_LINE> <INDENT> return "[Rectangle] {:d}/{:d}".format(self.__width, self.__height)
Dfine a rect from BaseGeometry
6259906a97e22403b383c6f4
class Patch200(PullsComment): <NEW_LINE> <INDENT> pass
OK
6259906a66673b3332c31be5
class RatingTypeMap(DefaultDict[K, V]): <NEW_LINE> <INDENT> def __init__(self, default_factory, *args, **kwargs): <NEW_LINE> <INDENT> super().__init__(default_factory, *args, **kwargs) <NEW_LINE> for rating in (RatingType.GLOBAL, RatingType.LADDER_1V1): <NEW_LINE> <INDENT> self.__getitem__(rating)
A thin wrapper around `defaultdict` which stores RatingType keys as strings.
6259906ad6c5a102081e3910
class AnalysisMixin: <NEW_LINE> <INDENT> @property <NEW_LINE> def listing_url(self): <NEW_LINE> <INDENT> app = self.model._meta.app_label <NEW_LINE> cls = self.model.__name__.lower() <NEW_LINE> return reverse(f'admin:{app}_{cls}_changelist')
Helper for going back to analysis.
6259906af548e778e596cd74
class ScreenDistanceSpec(NumberSpec): <NEW_LINE> <INDENT> def prepare_value(self, cls, name, value): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> if value is not None and value < 0: <NEW_LINE> <INDENT> raise ValueError("Distances must be positive or None!") <NEW_LINE> <DEDENT> <DEDENT> except TypeError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> return super(ScreenDistanceSpec, self).prepare_value(cls, name, value) <NEW_LINE> <DEDENT> def to_serializable(self, obj, name, val): <NEW_LINE> <INDENT> d = super(ScreenDistanceSpec, self).to_serializable(obj, name, val) <NEW_LINE> d["units"] = "screen" <NEW_LINE> return d
A |DataSpec| property that accepts numeric fixed values for screen distances, and also provides an associated units property that reports ``"screen"`` as the units. .. note:: Units are always ``"screen"``.
6259906a56ac1b37e63038d6
class ChristmasExtractor(Extractor): <NEW_LINE> <INDENT> def __init__(self, ref=None): <NEW_LINE> <INDENT> super(ChristmasExtractor, self).__init__(ref) <NEW_LINE> self.pattern = re.compile(r"\b(christmas(\seve)*)\b", re.IGNORECASE) <NEW_LINE> <DEDENT> def extract(self, text): <NEW_LINE> <INDENT> matches = self.pattern.findall(text) <NEW_LINE> if not matches: <NEW_LINE> <INDENT> return [] <NEW_LINE> <DEDENT> if self.ref: <NEW_LINE> <INDENT> cur_year = self.ref.year <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> cur_year = datetime.datetime.today().year <NEW_LINE> <DEDENT> result = [] <NEW_LINE> for match in matches: <NEW_LINE> <INDENT> if len(match[0]) == 13: <NEW_LINE> <INDENT> date_obj = datetime.date(cur_year, 12, 24) <NEW_LINE> <DEDENT> elif len(match[0]) == 9: <NEW_LINE> <INDENT> date_obj = datetime.date(cur_year, 12, 25) <NEW_LINE> <DEDENT> result.append(date_obj) <NEW_LINE> <DEDENT> return result
Extract Christmas or Christmas Eve from text.
6259906a0a50d4780f7069b4
class GalleryCreate(CreateView): <NEW_LINE> <INDENT> template_name = 'yawg/gadmin/gallery_edit.html' <NEW_LINE> model = Gallery <NEW_LINE> fields = ['gallery_name', 'gallery_alias']
Create new Gallery (model)
6259906acc0a2c111447c6c4
class SMUnfoldCommandClass(): <NEW_LINE> <INDENT> def GetResources(self): <NEW_LINE> <INDENT> __dir__ = os.path.dirname(__file__) <NEW_LINE> iconPath = os.path.join( __dir__, 'Resources', 'icons' ) <NEW_LINE> return {'Pixmap' : os.path.join( iconPath , 'SMUnfold.svg'), 'MenuText': QtCore.QT_TRANSLATE_NOOP('SheetMetal','Unfold'), 'ToolTip' : QtCore.QT_TRANSLATE_NOOP('SheetMetal','Flatten folded sheet metal object')} <NEW_LINE> <DEDENT> def Activated(self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> taskd = SMUnfoldTaskPanel() <NEW_LINE> <DEDENT> except ValueError as e: <NEW_LINE> <INDENT> SMErrorBox(e.args[0]) <NEW_LINE> return <NEW_LINE> <DEDENT> pg = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/sheetmetal") <NEW_LINE> if pg.GetBool("bendSketch"): <NEW_LINE> <INDENT> taskd.checkSeparate.setCheckState(QtCore.Qt.CheckState.Checked) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> taskd.checkSeparate.setCheckState(QtCore.Qt.CheckState.Unchecked) <NEW_LINE> <DEDENT> if pg.GetBool("genSketch"): <NEW_LINE> <INDENT> taskd.checkSketch.setCheckState(QtCore.Qt.CheckState.Checked) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> taskd.checkSketch.setCheckState(QtCore.Qt.CheckState.Unchecked) <NEW_LINE> <DEDENT> taskd.bendColor.setColor(pg.GetString("bendColor")) <NEW_LINE> taskd.genColor.setColor(pg.GetString("genColor")) <NEW_LINE> taskd.internalColor.setColor(pg.GetString("intColor")) <NEW_LINE> FreeCADGui.Control.showDialog(taskd) <NEW_LINE> return <NEW_LINE> <DEDENT> def IsActive(self): <NEW_LINE> <INDENT> if len(Gui.Selection.getSelection()) != 1 or len(Gui.Selection.getSelectionEx()[0].SubElementNames) != 1: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> selobj = Gui.Selection.getSelection()[0] <NEW_LINE> selFace = Gui.Selection.getSelectionEx()[0].SubObjects[0] <NEW_LINE> if type(selFace) != Part.Face: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return True
Unfold object
6259906a8a43f66fc4bf397b
class TokenBucketQueue(object): <NEW_LINE> <INDENT> RateLimitExceeded = RateLimitExceeded <NEW_LINE> def __init__(self, fill_rate, queue=None, capacity=1): <NEW_LINE> <INDENT> self._bucket = TokenBucket(fill_rate, capacity) <NEW_LINE> self.queue = queue <NEW_LINE> if not self.queue: <NEW_LINE> <INDENT> self.queue = Queue() <NEW_LINE> <DEDENT> <DEDENT> def put(self, item, block=True): <NEW_LINE> <INDENT> self.queue.put(item, block=block) <NEW_LINE> <DEDENT> def put_nowait(self, item): <NEW_LINE> <INDENT> return self.put(item, block=False) <NEW_LINE> <DEDENT> def get(self, block=True): <NEW_LINE> <INDENT> get = block and self.queue.get or self.queue.get_nowait <NEW_LINE> if not self._bucket.can_consume(1): <NEW_LINE> <INDENT> raise RateLimitExceeded() <NEW_LINE> <DEDENT> return get() <NEW_LINE> <DEDENT> def get_nowait(self): <NEW_LINE> <INDENT> return self.get(block=False) <NEW_LINE> <DEDENT> def qsize(self): <NEW_LINE> <INDENT> return self.queue.qsize() <NEW_LINE> <DEDENT> def empty(self): <NEW_LINE> <INDENT> return self.queue.empty() <NEW_LINE> <DEDENT> def clear(self): <NEW_LINE> <INDENT> return self.items.clear() <NEW_LINE> <DEDENT> def wait(self, block=False): <NEW_LINE> <INDENT> while True: <NEW_LINE> <INDENT> remaining = self.expected_time() <NEW_LINE> if not remaining: <NEW_LINE> <INDENT> return self.get(block=block) <NEW_LINE> <DEDENT> sleep(remaining) <NEW_LINE> <DEDENT> <DEDENT> def expected_time(self, tokens=1): <NEW_LINE> <INDENT> return self._bucket.expected_time(tokens) <NEW_LINE> <DEDENT> @property <NEW_LINE> def items(self): <NEW_LINE> <INDENT> return self.queue.queue
Queue with rate limited get operations. This uses the token bucket algorithm to rate limit the queue on get operations. :param fill_rate: The rate in tokens/second that the bucket will be refilled. :keyword capacity: Maximum number of tokens in the bucket. Default is 1.
6259906a9c8ee82313040d7c
@dataclass <NEW_LINE> class SearchResultId(BaseModel): <NEW_LINE> <INDENT> kind: Optional[str] = field(default=None) <NEW_LINE> videoId: Optional[str] = field(default=None, repr=False) <NEW_LINE> channelId: Optional[str] = field(default=None, repr=False) <NEW_LINE> playlistId: Optional[str] = field(default=None, repr=False)
A class representing the search result id info. Refer: https://developers.google.com/youtube/v3/docs/search#id
6259906a01c39578d7f14329
@implementer(interfaces.IPushProducer) <NEW_LINE> class _NoPushProducer(object): <NEW_LINE> <INDENT> def pauseProducing(self): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> def resumeProducing(self): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> def registerProducer(self, producer, streaming): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> def unregisterProducer(self): <NEW_LINE> <INDENT> pass
A no-op version of L{interfaces.IPushProducer}, used to abstract over the possibility that a L{HTTPChannel} transport does not provide L{IPushProducer}.
6259906aadb09d7d5dc0bd53
class UnexpectedParameters(TraversalError): <NEW_LINE> <INDENT> pass
Unexpected namespace parameters were provided.
6259906a435de62698e9d5f4
class ListFlavor(lister.Lister): <NEW_LINE> <INDENT> log = logging.getLogger(__name__ + ".ListFlavor") <NEW_LINE> def take_action(self, parsed_args): <NEW_LINE> <INDENT> self.log.debug("take_action(%s)" % parsed_args) <NEW_LINE> compute_client = self.app.client_manager.compute <NEW_LINE> columns = ( "ID", "Name", "RAM", "Disk", "Ephemeral", "Swap", "VCPUs", "RXTX Factor", "Is Public", "Extra Specs" ) <NEW_LINE> data = compute_client.flavors.list() <NEW_LINE> return (columns, (utils.get_item_properties( s, columns, ) for s in data))
List flavor command
6259906a7c178a314d78e7e0
class EmploymentVerificationGetRequest(ModelNormal): <NEW_LINE> <INDENT> allowed_values = { } <NEW_LINE> validations = { } <NEW_LINE> additional_properties_type = None <NEW_LINE> _nullable = False <NEW_LINE> @cached_property <NEW_LINE> def openapi_types(): <NEW_LINE> <INDENT> return { 'access_token': (str,), 'client_id': (str,), 'secret': (str,), } <NEW_LINE> <DEDENT> @cached_property <NEW_LINE> def discriminator(): <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> attribute_map = { 'access_token': 'access_token', 'client_id': 'client_id', 'secret': 'secret', } <NEW_LINE> _composed_schemas = {} <NEW_LINE> required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', ]) <NEW_LINE> @convert_js_args_to_python_args <NEW_LINE> def __init__(self, access_token, *args, **kwargs): <NEW_LINE> <INDENT> _check_type = kwargs.pop('_check_type', True) <NEW_LINE> _spec_property_naming = kwargs.pop('_spec_property_naming', False) <NEW_LINE> _path_to_item = kwargs.pop('_path_to_item', ()) <NEW_LINE> _configuration = kwargs.pop('_configuration', None) <NEW_LINE> _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) <NEW_LINE> if args: <NEW_LINE> <INDENT> raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) <NEW_LINE> <DEDENT> self._data_store = {} <NEW_LINE> self._check_type = _check_type <NEW_LINE> self._spec_property_naming = _spec_property_naming <NEW_LINE> self._path_to_item = _path_to_item <NEW_LINE> self._configuration = _configuration <NEW_LINE> self._visited_composed_classes = _visited_composed_classes + (self.__class__,) <NEW_LINE> self.access_token = access_token <NEW_LINE> for var_name, var_value in kwargs.items(): <NEW_LINE> <INDENT> if var_name not in self.attribute_map and self._configuration is not None and self._configuration.discard_unknown_keys and self.additional_properties_type is None: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> setattr(self, var_name, var_value)
NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values.
6259906a3d592f4c4edbc6c9
class Stack: <NEW_LINE> <INDENT> def __init__(self, e): <NEW_LINE> <INDENT> print("Initialized Stack") <NEW_LINE> self.elements = [] <NEW_LINE> self.elements = e <NEW_LINE> <DEDENT> def push(self, element): <NEW_LINE> <INDENT> self.elements.append(element) <NEW_LINE> <DEDENT> def pop(self): <NEW_LINE> <INDENT> return self.elements.pop(-1)
a simple implementation of the stack data structure
6259906a44b2445a339b7554
class ReportTo: <NEW_LINE> <INDENT> def __init__( self, max_age: int, include_subdomains: bool = False, group: Optional[str] = None, *endpoints: List[Dict[str, Union[str, int]]], ) -> None: <NEW_LINE> <INDENT> self.header = "Report-To" <NEW_LINE> report_to_endpoints = json.dumps(endpoints) <NEW_LINE> report_to_object: Dict[str, Union[str, int]] = { "max_age": max_age, "endpoints": report_to_endpoints, } <NEW_LINE> if group: <NEW_LINE> <INDENT> report_to_object["group"] = group <NEW_LINE> <DEDENT> if include_subdomains: <NEW_LINE> <INDENT> report_to_object["include_subdomains"] = include_subdomains <NEW_LINE> <DEDENT> self.value = json.dumps(report_to_object) <NEW_LINE> <DEDENT> def set(self, value: str) -> "ReportTo": <NEW_LINE> <INDENT> self.value = value <NEW_LINE> return self
Configure reporting endpoints Resources: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/report-to https://developers.google.com/web/updates/2018/09/reportingapi :param max_age: endpoint TIL in seconds :type max_age: int :param include_subdomains: enable for subdomains, defaults to False :type include_subdomains: bool, optional :param group: endpoint name, defaults to None :type group: Optional[str], optional :param endpoints: variable number of endpoints :type endpoints: List[Dict[str, Union[str, int]]]
6259906a66673b3332c31be7
class DistAbstraction(object): <NEW_LINE> <INDENT> def __init__(self, req): <NEW_LINE> <INDENT> self.req = req <NEW_LINE> <DEDENT> def dist(self, finder): <NEW_LINE> <INDENT> raise NotImplementedError(self.dist) <NEW_LINE> <DEDENT> def prep_for_dist(self, finder): <NEW_LINE> <INDENT> raise NotImplementedError(self.dist)
Abstracts out the wheel vs non-wheel Resolver.resolve() logic. The requirements for anything installable are as follows: - we must be able to determine the requirement name (or we can't correctly handle the non-upgrade case). - we must be able to generate a list of run-time dependencies without installing any additional packages (or we would have to either burn time by doing temporary isolated installs or alternatively violate pips 'don't start installing unless all requirements are available' rule - neither of which are desirable). - for packages with setup requirements, we must also be able to determine their requirements without installing additional packages (for the same reason as run-time dependencies) - we must be able to create a Distribution object exposing the above metadata.
6259906aa8370b77170f1baf
class SetterCommand(Command): <NEW_LINE> <INDENT> def __init__(self,c): <NEW_LINE> <INDENT> Command.__init__(self,c) <NEW_LINE> self.value=c.getAttribute("value") <NEW_LINE> <DEDENT> def execute(self,para,log): <NEW_LINE> <INDENT> f=replaceValues(self.filename,para) <NEW_LINE> v=replaceValues(self.value,para) <NEW_LINE> s=replaceValues(self.subexpression,para) <NEW_LINE> k=replaceValues(self.key,para) <NEW_LINE> try: <NEW_LINE> <INDENT> dictFile=ParsedParameterFile(f,backup=True) <NEW_LINE> val=dictFile[k] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> self.error("Key: ",k,"not existing in File",f) <NEW_LINE> <DEDENT> except IOError: <NEW_LINE> <INDENT> e = sys.exc_info()[1] <NEW_LINE> self.error("Problem with file",k,":",e) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> exec_("dictFile[k]"+s+"=v") <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> e = sys.exc_info()[1] <NEW_LINE> error("Problem with subexpression:",sys.exc_info()[0],":",e) <NEW_LINE> <DEDENT> dictFile.writeFile() <NEW_LINE> return True,None
Common class for commands that operate on dictionaries
6259906ae76e3b2f99fda1eb
@collection( name='summary-statistics', properties={ 'title': 'Summary Statistics', 'description': 'Listing of summary statistics', }) <NEW_LINE> class SummaryStatistic(Item): <NEW_LINE> <INDENT> item_type = 'summary_statistic' <NEW_LINE> schema = load_schema('encoded:schemas/summary_statistic.json') <NEW_LINE> embedded_list = Item.embedded_list + lab_award_attribution_embed_list
Summary statistics class.
6259906af548e778e596cd76
class SimpleDatasetPredictor(DatasetPredictorBase): <NEW_LINE> <INDENT> def __init__(self, config, dataset): <NEW_LINE> <INDENT> super(SimpleDatasetPredictor, self).__init__(config, dataset) <NEW_LINE> self.predictor = OfflinePredictor(config) <NEW_LINE> <DEDENT> @HIDE_DOC <NEW_LINE> def get_result(self): <NEW_LINE> <INDENT> self.dataset.reset_state() <NEW_LINE> try: <NEW_LINE> <INDENT> sz = len(self.dataset) <NEW_LINE> <DEDENT> except NotImplementedError: <NEW_LINE> <INDENT> sz = 0 <NEW_LINE> <DEDENT> with get_tqdm(total=sz, disable=(sz == 0)) as pbar: <NEW_LINE> <INDENT> for dp in self.dataset: <NEW_LINE> <INDENT> res = self.predictor(*dp) <NEW_LINE> yield res <NEW_LINE> pbar.update()
Simply create one predictor and run it on the DataFlow.
6259906a4e4d562566373bf0
class SourceControlConfigurationClient(object): <NEW_LINE> <INDENT> def __init__( self, credential: "AsyncTokenCredential", subscription_id: str, base_url: Optional[str] = None, **kwargs: Any ) -> None: <NEW_LINE> <INDENT> if not base_url: <NEW_LINE> <INDENT> base_url = 'https://management.azure.com' <NEW_LINE> <DEDENT> self._config = SourceControlConfigurationClientConfiguration(credential, subscription_id, **kwargs) <NEW_LINE> self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs) <NEW_LINE> client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} <NEW_LINE> self._serialize = Serializer(client_models) <NEW_LINE> self._serialize.client_side_validation = False <NEW_LINE> self._deserialize = Deserializer(client_models) <NEW_LINE> self.source_control_configurations = SourceControlConfigurationsOperations( self._client, self._config, self._serialize, self._deserialize) <NEW_LINE> self.operations = Operations( self._client, self._config, self._serialize, self._deserialize) <NEW_LINE> <DEDENT> async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse: <NEW_LINE> <INDENT> path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } <NEW_LINE> http_request.url = self._client.format_url(http_request.url, **path_format_arguments) <NEW_LINE> stream = kwargs.pop("stream", True) <NEW_LINE> pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs) <NEW_LINE> return pipeline_response.http_response <NEW_LINE> <DEDENT> async def close(self) -> None: <NEW_LINE> <INDENT> await self._client.close() <NEW_LINE> <DEDENT> async def __aenter__(self) -> "SourceControlConfigurationClient": <NEW_LINE> <INDENT> await self._client.__aenter__() <NEW_LINE> return self <NEW_LINE> <DEDENT> async def __aexit__(self, *exc_details) -> None: <NEW_LINE> <INDENT> await self._client.__aexit__(*exc_details)
KubernetesConfiguration Client. :ivar source_control_configurations: SourceControlConfigurationsOperations operations :vartype source_control_configurations: azure.mgmt.kubernetesconfiguration.v2021_03_01.aio.operations.SourceControlConfigurationsOperations :ivar operations: Operations operations :vartype operations: azure.mgmt.kubernetesconfiguration.v2021_03_01.aio.operations.Operations :param credential: Credential needed for the client to connect to Azure. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :param subscription_id: The Azure subscription ID. This is a GUID-formatted string (e.g. 00000000-0000-0000-0000-000000000000). :type subscription_id: str :param str base_url: Service URL :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
6259906a2c8b7c6e89bd4fcf
class BaseModel(models.Model): <NEW_LINE> <INDENT> created = models.DateTimeField( 'created at', auto_now_add=True, help_text='Date time on which the object was created.', ) <NEW_LINE> modified = models.DateTimeField( 'modified at', auto_now=True, help_text='Date time on which the object was last modified.', ) <NEW_LINE> class Meta: <NEW_LINE> <INDENT> abstract = True <NEW_LINE> get_latest_by = 'created' <NEW_LINE> ordering = ['-created', '-modified']
BaseModel acts as an abstract base class from which every other model in the project will inherit. This class provides every table with the following attributes: + created (DateTime): Store the datetime the object was created. + modified (DateTime): Store the last datetime the object was modified.
6259906a2ae34c7f260ac8d2
class TimeLabel(Gtk.Label): <NEW_LINE> <INDENT> def __init__(self, time_=0): <NEW_LINE> <INDENT> Gtk.Label.__init__(self) <NEW_LINE> self.__widths = {} <NEW_LINE> self._disabled = False <NEW_LINE> self.set_time(time_) <NEW_LINE> <DEDENT> def do_get_preferred_width(self): <NEW_LINE> <INDENT> widths = Gtk.Label.do_get_preferred_width(self) <NEW_LINE> num_chars = len(gdecode(self.get_text())) <NEW_LINE> max_widths = self.__widths.get(num_chars, widths) <NEW_LINE> widths = max(widths[0], max_widths[0]), max(widths[1], max_widths[1]) <NEW_LINE> self.__widths[num_chars] = widths <NEW_LINE> return widths <NEW_LINE> <DEDENT> def set_time(self, time_): <NEW_LINE> <INDENT> self._last_time = time_ <NEW_LINE> if self._disabled: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> self.set_text(util.format_time_display(time_)) <NEW_LINE> <DEDENT> def set_disabled(self, disabled): <NEW_LINE> <INDENT> self._disabled = disabled <NEW_LINE> if disabled: <NEW_LINE> <INDENT> self.set_text(u"‒\u2236‒‒") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.set_time(self._last_time)
A label for displaying the running time It tries to minimize size changes due to unequal character widths with the same number of characters. e.g. a time display -> 04:20
6259906afff4ab517ebcf005
class SampleFrac(PartitionMethod): <NEW_LINE> <INDENT> def __init__(self, frac): <NEW_LINE> <INDENT> self.fraction = frac <NEW_LINE> <DEDENT> def __call__(self, udf): <NEW_LINE> <INDENT> return udf.sample(frac=self.fraction)
Randomly select a fraction of test rows per user/item. :param frac: the fraction of items to select for testing. :paramtype frac: double
6259906a91f36d47f2231a84
class Solution: <NEW_LINE> <INDENT> def FindElements(self, Matrix): <NEW_LINE> <INDENT> m = len(Matrix) <NEW_LINE> s = set(Matrix[0]) <NEW_LINE> new_s = set() <NEW_LINE> for i in range(1, m): <NEW_LINE> <INDENT> new_s.clear() <NEW_LINE> for j in Matrix[i]: <NEW_LINE> <INDENT> if j in s: <NEW_LINE> <INDENT> new_s.add(j) <NEW_LINE> <DEDENT> <DEDENT> s = set(new_s) <NEW_LINE> <DEDENT> return new_s.pop()
@param Matrix: the input @return: the element which appears every row
6259906a55399d3f05627d0c
class SelflinkBot(MultipleSitesBot, BaseUnlinkBot): <NEW_LINE> <INDENT> summary_key = 'selflink-remove' <NEW_LINE> def __init__(self, generator, **kwargs): <NEW_LINE> <INDENT> super().__init__(**kwargs) <NEW_LINE> self.generator = generator <NEW_LINE> <DEDENT> def _create_callback(self): <NEW_LINE> <INDENT> callback = super()._create_callback() <NEW_LINE> callback.additional_choices += [_BoldChoice(self.current_page, callback)] <NEW_LINE> return callback <NEW_LINE> <DEDENT> def treat_page(self): <NEW_LINE> <INDENT> if '<imagemap>' in self.current_page.text: <NEW_LINE> <INDENT> pywikibot.output( 'Skipping page {} because it contains an image map.' .format(self.current_page.title(as_link=True))) <NEW_LINE> return <NEW_LINE> <DEDENT> self.unlink(self.current_page)
Self-link removal bot.
6259906a99fddb7c1ca639c5
class Tests(IMP.test.TestCase): <NEW_LINE> <INDENT> def test_wlc(self): <NEW_LINE> <INDENT> wlc = IMP.misc.WormLikeChain(200, 3.4) <NEW_LINE> self.check_unary_function_min(wlc, 0, 250, .5, 0) <NEW_LINE> self.check_unary_function_deriv(wlc, 0, 250, .5) <NEW_LINE> self.assertGreater(wlc.evaluate_with_derivative(180)[1], 4.2)
Tests for WLC unary function
6259906a67a9b606de547697
class TemplateResult(DictMixin, object): <NEW_LINE> <INDENT> def __init__(self, *a, **kw): <NEW_LINE> <INDENT> self.__dict__["_d"] = dict(*a, **kw) <NEW_LINE> self._d.setdefault("__body__", u'') <NEW_LINE> self.__dict__['_parts'] = [] <NEW_LINE> self.__dict__["extend"] = self._parts.extend <NEW_LINE> self._d.setdefault("__body__", None) <NEW_LINE> <DEDENT> def keys(self): <NEW_LINE> <INDENT> return self._d.keys() <NEW_LINE> <DEDENT> def _prepare_body(self): <NEW_LINE> <INDENT> if self._parts: <NEW_LINE> <INDENT> value = u"".join(self._parts) <NEW_LINE> self._parts[:] = [] <NEW_LINE> body = self._d.get('__body__') <NEW_LINE> if body: <NEW_LINE> <INDENT> self._d['__body__'] = body + value <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._d['__body__'] = value <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> def __getitem__(self, name): <NEW_LINE> <INDENT> if name == "__body__": <NEW_LINE> <INDENT> self._prepare_body() <NEW_LINE> <DEDENT> return self._d[name] <NEW_LINE> <DEDENT> def __setitem__(self, name, value): <NEW_LINE> <INDENT> if name == "__body__": <NEW_LINE> <INDENT> self._prepare_body() <NEW_LINE> <DEDENT> return self._d.__setitem__(name, value) <NEW_LINE> <DEDENT> def __delitem__(self, name): <NEW_LINE> <INDENT> if name == "__body__": <NEW_LINE> <INDENT> self._prepare_body() <NEW_LINE> <DEDENT> return self._d.__delitem__(name) <NEW_LINE> <DEDENT> def __getattr__(self, key): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return self[key] <NEW_LINE> <DEDENT> except KeyError as k: <NEW_LINE> <INDENT> raise AttributeError(k) <NEW_LINE> <DEDENT> <DEDENT> def __setattr__(self, key, value): <NEW_LINE> <INDENT> self[key] = value <NEW_LINE> <DEDENT> def __delattr__(self, key): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> del self[key] <NEW_LINE> <DEDENT> except KeyError as k: <NEW_LINE> <INDENT> raise AttributeError(k) <NEW_LINE> <DEDENT> <DEDENT> def __unicode__(self): <NEW_LINE> <INDENT> self._prepare_body() <NEW_LINE> return self["__body__"] <NEW_LINE> <DEDENT> def __str__(self): <NEW_LINE> <INDENT> self._prepare_body() <NEW_LINE> return self["__body__"].encode('utf-8') <NEW_LINE> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> self._prepare_body() <NEW_LINE> return "<TemplateResult: %s>" % self._d <NEW_LINE> <DEDENT> def __iter__(self): <NEW_LINE> <INDENT> for k in self._d: <NEW_LINE> <INDENT> yield k <NEW_LINE> <DEDENT> <DEDENT> def __len__(self): <NEW_LINE> <INDENT> return len(self._d)
Dictionary like object for storing template output. The result of a template execution is usally a string, but sometimes it contains attributes set using $var. This class provides a simple dictionary like interface for storing the output of the template and the attributes. The output is stored with a special key __body__. Convering the the TemplateResult to string or unicode returns the value of __body__. When the template is in execution, the output is generated part by part and those parts are combined at the end. Parts are added to the TemplateResult by calling the `extend` method and the parts are combined seemlessly when __body__ is accessed. >>> d = TemplateResult(__body__='hello, world', x='foo') >>> d <TemplateResult: {'__body__': 'hello, world', 'x': 'foo'}> >>> print d hello, world >>> d.x 'foo' >>> d = TemplateResult() >>> d.extend([u'hello', u'world']) >>> d <TemplateResult: {'__body__': u'helloworld'}>
6259906a2ae34c7f260ac8d3
class SingleFileProgram(CLProgram): <NEW_LINE> <INDENT> def __init__(self, mode, filedesc, ext="", *args, **kwargs): <NEW_LINE> <INDENT> super().__init__(*args, **kwargs) <NEW_LINE> self.add_argument("file", metavar="file" + ext, nargs="?", type=FileType(mode), default="-", help=filedesc) <NEW_LINE> <DEDENT> def process(self, file, **kwargs): <NEW_LINE> <INDENT> with file: <NEW_LINE> <INDENT> super().process(file=file, **kwargs)
Program working with one file or standard streams.
6259906a23849d37ff8528a0
class Transaction: <NEW_LINE> <INDENT> def __init__(self, response): <NEW_LINE> <INDENT> self._response = response <NEW_LINE> <DEDENT> @property <NEW_LINE> def amount(self): <NEW_LINE> <INDENT> return self._response['amount'] <NEW_LINE> <DEDENT> @property <NEW_LINE> def id(self): <NEW_LINE> <INDENT> if 'transactionId' in self._response: <NEW_LINE> <INDENT> return self._response['transactionId'] <NEW_LINE> <DEDENT> return None
Transfer transaction
6259906aadb09d7d5dc0bd55
class TestConfirmQuit: <NEW_LINE> <INDENT> TESTS = { '': None, 'always': ['always'], 'never': ['never'], 'multiple-tabs,downloads': ['multiple-tabs', 'downloads'], 'downloads,multiple-tabs': ['downloads', 'multiple-tabs'], 'downloads,,multiple-tabs': ['downloads', None, 'multiple-tabs'], } <NEW_LINE> @pytest.fixture <NEW_LINE> def klass(self): <NEW_LINE> <INDENT> return configtypes.ConfirmQuit <NEW_LINE> <DEDENT> @pytest.mark.parametrize('val', TESTS.keys()) <NEW_LINE> def test_validate_valid(self, klass, val): <NEW_LINE> <INDENT> klass(none_ok=True).validate(val) <NEW_LINE> <DEDENT> @pytest.mark.parametrize('val', [ '', 'foo', 'downloads,foo', 'downloads,,multiple-tabs', 'downloads,multiple-tabs,downloads', 'always,downloads', 'never,downloads', ]) <NEW_LINE> def test_validate_invalid(self, klass, val): <NEW_LINE> <INDENT> with pytest.raises(configexc.ValidationError): <NEW_LINE> <INDENT> klass().validate(val) <NEW_LINE> <DEDENT> <DEDENT> @pytest.mark.parametrize('val, expected', TESTS.items()) <NEW_LINE> def test_transform(self, klass, val, expected): <NEW_LINE> <INDENT> assert klass().transform(val) == expected <NEW_LINE> <DEDENT> def test_complete(self, klass): <NEW_LINE> <INDENT> completions = [e[0] for e in klass().complete()] <NEW_LINE> assert 'always' in completions <NEW_LINE> assert 'never' in completions <NEW_LINE> assert 'multiple-tabs,downloads' in completions <NEW_LINE> for val in completions: <NEW_LINE> <INDENT> assert not 'always,' in val <NEW_LINE> assert not ',always' in val <NEW_LINE> assert not 'never,' in val <NEW_LINE> assert not ',never' in val
Test ConfirmQuit.
6259906abaa26c4b54d50a92
class DoAPI: <NEW_LINE> <INDENT> def __init__(self, token): <NEW_LINE> <INDENT> self.token = token <NEW_LINE> self.manager = digitalocean.Manager(token=token) <NEW_LINE> self.ssh_keys = self.manager.get_all_sshkeys() <NEW_LINE> self.droplets: List[Droplet] = [] <NEW_LINE> self.ip_addresses: List[str] = [] <NEW_LINE> self.tag = config.DROPLET_TAG <NEW_LINE> <DEDENT> def create_droplet(self, region: str): <NEW_LINE> <INDENT> name = "Temp-" + "".join(random.choices(string.digits, k=10)) <NEW_LINE> droplet = digitalocean.Droplet( token=self.token, name=name, region=region, image=config.DROPLET_IMAGE, size_slug=config.DROPLET_SIZE_SLUG, ssh_keys=self.ssh_keys, backups=False, tags=[self.tag], ) <NEW_LINE> droplet.create() <NEW_LINE> self.droplets.append(Droplet(droplet)) <NEW_LINE> <DEDENT> def destroy_batch(self): <NEW_LINE> <INDENT> test_droplets = self.manager.get_all_droplets(tag_name=self.tag) <NEW_LINE> for droplet in test_droplets: <NEW_LINE> <INDENT> droplet.destroy() <NEW_LINE> <DEDENT> <DEDENT> def create_batch(self, quantity): <NEW_LINE> <INDENT> for _ in range(quantity): <NEW_LINE> <INDENT> self.create_droplet(region=config.REGION) <NEW_LINE> <DEDENT> while not all(droplet.is_ready for droplet in self.droplets): <NEW_LINE> <INDENT> time.sleep(1) <NEW_LINE> <DEDENT> for droplet in self.droplets: <NEW_LINE> <INDENT> self.ip_addresses.append(droplet.ip_address)
Class for working with DigitalOcean's API
6259906ad486a94d0ba2d7aa
class ImageDataDisk(msrest.serialization.Model): <NEW_LINE> <INDENT> _validation = { 'lun': {'required': True}, } <NEW_LINE> _attribute_map = { 'lun': {'key': 'lun', 'type': 'int'}, 'snapshot': {'key': 'snapshot', 'type': 'SubResource'}, 'managed_disk': {'key': 'managedDisk', 'type': 'SubResource'}, 'blob_uri': {'key': 'blobUri', 'type': 'str'}, 'caching': {'key': 'caching', 'type': 'str'}, 'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'}, 'storage_account_type': {'key': 'storageAccountType', 'type': 'str'}, } <NEW_LINE> def __init__( self, *, lun: int, snapshot: Optional["SubResource"] = None, managed_disk: Optional["SubResource"] = None, blob_uri: Optional[str] = None, caching: Optional[Union[str, "CachingTypes"]] = None, disk_size_gb: Optional[int] = None, storage_account_type: Optional[Union[str, "StorageAccountTypes"]] = None, **kwargs ): <NEW_LINE> <INDENT> super(ImageDataDisk, self).__init__(**kwargs) <NEW_LINE> self.lun = lun <NEW_LINE> self.snapshot = snapshot <NEW_LINE> self.managed_disk = managed_disk <NEW_LINE> self.blob_uri = blob_uri <NEW_LINE> self.caching = caching <NEW_LINE> self.disk_size_gb = disk_size_gb <NEW_LINE> self.storage_account_type = storage_account_type
Describes a data disk. All required parameters must be populated in order to send to Azure. :ivar lun: Required. Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. :vartype lun: int :ivar snapshot: The snapshot. :vartype snapshot: ~azure.mgmt.compute.v2017_12_01.models.SubResource :ivar managed_disk: The managedDisk. :vartype managed_disk: ~azure.mgmt.compute.v2017_12_01.models.SubResource :ivar blob_uri: The Virtual Hard Disk. :vartype blob_uri: str :ivar caching: Specifies the caching requirements. :code:`<br>`:code:`<br>` Possible values are: :code:`<br>`:code:`<br>` **None** :code:`<br>`:code:`<br>` **ReadOnly** :code:`<br>`:code:`<br>` **ReadWrite** :code:`<br>`:code:`<br>` Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: "None", "ReadOnly", "ReadWrite". :vartype caching: str or ~azure.mgmt.compute.v2017_12_01.models.CachingTypes :ivar disk_size_gb: Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image. :code:`<br>`:code:`<br>` This value cannot be larger than 1023 GB. :vartype disk_size_gb: int :ivar storage_account_type: Specifies the storage account type for the managed disk. Possible values are: Standard_LRS or Premium_LRS. Possible values include: "Standard_LRS", "Premium_LRS". :vartype storage_account_type: str or ~azure.mgmt.compute.v2017_12_01.models.StorageAccountTypes
6259906a45492302aabfdcc3
class NotesListHandler(ListHandler): <NEW_LINE> <INDENT> def post(self, cont_name, list_name, **kwargs): <NEW_LINE> <INDENT> _id = kwargs.pop('cid') <NEW_LINE> container, permchecker, storage, mongo_validator, input_validator, keycheck = self._initialize_request(cont_name, list_name, _id) <NEW_LINE> payload = self.request.json_body <NEW_LINE> input_validator(payload, 'POST') <NEW_LINE> payload['_id'] = payload.get('_id') or str(util.ObjectId()) <NEW_LINE> payload['user'] = payload.get('user', self.uid) <NEW_LINE> payload['created'] = payload['modified'] = datetime.datetime.utcnow() <NEW_LINE> if payload.get('timestamp'): <NEW_LINE> <INDENT> payload['timestamp'] = dateutil.parser.parse(payload['timestamp']) <NEW_LINE> <DEDENT> result = keycheck(mongo_validator(permchecker(storage.exec_op)))('POST', _id=_id, payload=payload) <NEW_LINE> if result.modified_count == 1: <NEW_LINE> <INDENT> return {'modified':result.modified_count} <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.abort(404, 'Element not added in list {} of container {} {}'.format(storage.list_name, storage.cont_name, _id)) <NEW_LINE> <DEDENT> <DEDENT> def put(self, cont_name, list_name, **kwargs): <NEW_LINE> <INDENT> _id = kwargs.pop('cid') <NEW_LINE> container, permchecker, storage, mongo_validator, input_validator, keycheck = self._initialize_request(cont_name, list_name, _id, query_params=kwargs) <NEW_LINE> payload = self.request.json_body <NEW_LINE> input_validator(payload, 'PUT') <NEW_LINE> payload['modified'] = datetime.datetime.utcnow() <NEW_LINE> if payload.get('timestamp'): <NEW_LINE> <INDENT> payload['timestamp'] = dateutil.parser.parse(payload['timestamp']) <NEW_LINE> <DEDENT> result = keycheck(mongo_validator(permchecker(storage.exec_op)))('PUT', _id=_id, query_params=kwargs, payload=payload) <NEW_LINE> if result.matched_count == 0: <NEW_LINE> <INDENT> self.abort(404, 'Element not updated in list {} of container {} {}'.format(storage.list_name, storage.cont_name, _id)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return {'modified':result.modified_count}
NotesListHandler overrides post, put methods of ListHandler to add custom fields to the payload. e.g. _id, user, created, etc.
6259906abe8e80087fbc0877
class Webhook(pulumi.CustomResource): <NEW_LINE> <INDENT> def __init__(__self__, __name__, __opts__=None, branch_filter=None, project_name=None): <NEW_LINE> <INDENT> if not __name__: <NEW_LINE> <INDENT> raise TypeError('Missing resource name argument (for URN creation)') <NEW_LINE> <DEDENT> if not isinstance(__name__, basestring): <NEW_LINE> <INDENT> raise TypeError('Expected resource name to be a string') <NEW_LINE> <DEDENT> if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions): <NEW_LINE> <INDENT> raise TypeError('Expected resource options to be a ResourceOptions instance') <NEW_LINE> <DEDENT> __props__ = dict() <NEW_LINE> if branch_filter and not isinstance(branch_filter, basestring): <NEW_LINE> <INDENT> raise TypeError('Expected property branch_filter to be a basestring') <NEW_LINE> <DEDENT> __self__.branch_filter = branch_filter <NEW_LINE> __props__['branchFilter'] = branch_filter <NEW_LINE> if not project_name: <NEW_LINE> <INDENT> raise TypeError('Missing required property project_name') <NEW_LINE> <DEDENT> elif not isinstance(project_name, basestring): <NEW_LINE> <INDENT> raise TypeError('Expected property project_name to be a basestring') <NEW_LINE> <DEDENT> __self__.project_name = project_name <NEW_LINE> __props__['projectName'] = project_name <NEW_LINE> __self__.payload_url = pulumi.runtime.UNKNOWN <NEW_LINE> __self__.secret = pulumi.runtime.UNKNOWN <NEW_LINE> __self__.url = pulumi.runtime.UNKNOWN <NEW_LINE> super(Webhook, __self__).__init__( 'aws:codebuild/webhook:Webhook', __name__, __props__, __opts__) <NEW_LINE> <DEDENT> def set_outputs(self, outs): <NEW_LINE> <INDENT> if 'branchFilter' in outs: <NEW_LINE> <INDENT> self.branch_filter = outs['branchFilter'] <NEW_LINE> <DEDENT> if 'payloadUrl' in outs: <NEW_LINE> <INDENT> self.payload_url = outs['payloadUrl'] <NEW_LINE> <DEDENT> if 'projectName' in outs: <NEW_LINE> <INDENT> self.project_name = outs['projectName'] <NEW_LINE> <DEDENT> if 'secret' in outs: <NEW_LINE> <INDENT> self.secret = outs['secret'] <NEW_LINE> <DEDENT> if 'url' in outs: <NEW_LINE> <INDENT> self.url = outs['url']
Manages a CodeBuild webhook, which is an endpoint accepted by the CodeBuild service to trigger builds from source code repositories. Depending on the source type of the CodeBuild project, the CodeBuild service may also automatically create and delete the actual repository webhook as well.
6259906acb5e8a47e493cd79
class Resource(GameObject): <NEW_LINE> <INDENT> def __init__(self, world): <NEW_LINE> <INDENT> super(Resource, self).__init__(world) <NEW_LINE> self.radius = 25 <NEW_LINE> self.color = 'cyan'
a tasty clump of resources to be consumed
6259906a5166f23b2e244bbd
class PercentageDiscountBenefit(Benefit): <NEW_LINE> <INDENT> u <NEW_LINE> class Meta: <NEW_LINE> <INDENT> proxy = True <NEW_LINE> <DEDENT> def apply(self, basket, condition=None): <NEW_LINE> <INDENT> discount = Decimal('0.00') <NEW_LINE> affected_items = 0 <NEW_LINE> max_affected_items = self._effective_max_affected_items() <NEW_LINE> for line in basket.all_lines(): <NEW_LINE> <INDENT> if affected_items >= max_affected_items: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> if self.range.contains_product(line.product) and line.product.has_stockrecord: <NEW_LINE> <INDENT> price = getattr(line.product.stockrecord, self.price_field) <NEW_LINE> quantity = min(line.quantity_without_discount, max_affected_items - affected_items) <NEW_LINE> discount += self.value/100 * price * int(quantity) <NEW_LINE> affected_items += quantity <NEW_LINE> line.discount(discount, quantity) <NEW_LINE> <DEDENT> <DEDENT> if discount > 0 and condition: <NEW_LINE> <INDENT> condition.consume_items(basket) <NEW_LINE> <DEDENT> return discount
An offer benefit that gives a percentage discount
6259906a76e4537e8c3f0d6e
class Row: <NEW_LINE> <INDENT> def __init__(self, sheet_reader_instance, data_range, values): <NEW_LINE> <INDENT> self.sheet_reader_instance = sheet_reader_instance <NEW_LINE> self.workbook_id = sheet_reader_instance.workbook_id <NEW_LINE> self.sheet_name = sheet_reader_instance.sheet_name <NEW_LINE> self.write_map = sheet_reader_instance.write_map <NEW_LINE> self.data_start_cell = data_range[0] <NEW_LINE> self.current_row_index = get_row_from_cell(self.data_start_cell) <NEW_LINE> self.id = get_row_from_cell(self.data_start_cell) <NEW_LINE> self.header_map = sheet_reader_instance.header_map <NEW_LINE> self.values = values <NEW_LINE> <DEDENT> def __getitem__(self, key): <NEW_LINE> <INDENT> field_index = self.header_map.get(key, '___') <NEW_LINE> if field_index == '___': <NEW_LINE> <INDENT> raise KeyError <NEW_LINE> <DEDENT> return self.values[field_index] <NEW_LINE> <DEDENT> def __setitem__(self, key, value, immediate_update=False): <NEW_LINE> <INDENT> cell_col = self.header_map.get(key, None) <NEW_LINE> if not cell_col: <NEW_LINE> <INDENT> raise KeyError <NEW_LINE> <DEDENT> data_start_col = get_col_from_cell(self.data_start_cell) <NEW_LINE> data_start_col = excel_column_to_number(data_start_col) <NEW_LINE> cell_col = data_start_col + cell_col <NEW_LINE> cell_col = number_to_excel_column(cell_col) <NEW_LINE> destination_cell = cell_col + str(self.current_row_index) <NEW_LINE> if immediate_update: <NEW_LINE> <INDENT> self.sheet_reader_instance.connection.write_range(destination_cell, destination_cell, [[value]]) <NEW_LINE> return <NEW_LINE> <DEDENT> self.write_map[destination_cell] = value <NEW_LINE> if not self.sheet_reader_instance.auto_update: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if len(self.write_map.keys()) >= self.sheet_reader_instance.write_chunk_size: <NEW_LINE> <INDENT> self.sheet_reader_instance.update() <NEW_LINE> <DEDENT> <DEDENT> def __str__(self): <NEW_LINE> <INDENT> pretty_dict = {} <NEW_LINE> for k, v in self.header_map.items(): <NEW_LINE> <INDENT> pretty_dict[k] = self.values[v] <NEW_LINE> <DEDENT> return str(pretty_dict)
A dict like object that represent one row in a Google sheet. Read a value: row[column_name] Write a value: row[column_name] = new_value
6259906af548e778e596cd78
class BaseRemovedInRBToolsVersionWarning(DeprecationWarning): <NEW_LINE> <INDENT> @classmethod <NEW_LINE> def warn(cls, message, stacklevel=2): <NEW_LINE> <INDENT> warnings.warn(message, cls, stacklevel=stacklevel + 1)
Base class for a RBTools deprecation warning. All version-specific deprecation warnings inherit from this, allowing callers to check for Review Board deprecations without being tied to a specific version.
6259906a76e4537e8c3f0d6f
class Post(db.Model): <NEW_LINE> <INDENT> __tablename__ = 'posts' <NEW_LINE> __searchable__ = ['body', ] <NEW_LINE> __analyzer__ = ChineseAnalyzer() <NEW_LINE> id = db.Column(db.Integer, primary_key=True) <NEW_LINE> body = db.Column(db.Text) <NEW_LINE> body_html = db.Column(db.Text) <NEW_LINE> timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow) <NEW_LINE> author_id = db.Column(db.Integer, db.ForeignKey('users.id')) <NEW_LINE> comments = db.relationship('Comment', backref='post', lazy='dynamic') <NEW_LINE> @staticmethod <NEW_LINE> def generate_fake(count=100): <NEW_LINE> <INDENT> from random import seed, randint <NEW_LINE> import forgery_py <NEW_LINE> seed() <NEW_LINE> user_count = User.query.count() <NEW_LINE> for i in range(count): <NEW_LINE> <INDENT> u = User.query.offset(randint(0, user_count - 1)).first() <NEW_LINE> p = Post(body=forgery_py.lorem_ipsum.sentences(randint(1, 5)), timestamp=forgery_py.date.date(True), author=u) <NEW_LINE> db.session.add(p) <NEW_LINE> db.session.commit() <NEW_LINE> <DEDENT> <DEDENT> @staticmethod <NEW_LINE> def on_changed_body(target, value, oldvalue, initator): <NEW_LINE> <INDENT> allowed_tags = ['a', 'abbr', 'acronym', 'b', 'blockquote', 'code', 'em', 'i', 'li', 'ol', 'pre', 'strong', 'ul', 'h1', 'h2', 'h3', 'p'] <NEW_LINE> target.body_html = bleach.linkify(bleach.clean( markdown(value, output_format='html'), tags=allowed_tags, strip=True)) <NEW_LINE> <DEDENT> def to_json(self): <NEW_LINE> <INDENT> json_post = { 'url': url_for('api.get_post', id=self.id, _external=True), 'body': self.body, 'body_html': self.body_html, 'timestamp': self.timestamp, 'author': url_for('api.get_user', id=self.author_id, _external=True), 'comments': url_for('api.get_post_comments', id=self.id, _external=True), 'comment_count': self.comments.count() } <NEW_LINE> return json_post <NEW_LINE> <DEDENT> @staticmethod <NEW_LINE> def from_json(json_post): <NEW_LINE> <INDENT> body = json_post.get('body') <NEW_LINE> if body is None or body == '': <NEW_LINE> <INDENT> raise ValidationError('post does not have a body') <NEW_LINE> <DEDENT> return Post(body=body) <NEW_LINE> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> return '<Post %r>' % self.body
用户发表的博客文章; 博客文章的html代码缓存在body_html字段中,避免重复转换
6259906af7d966606f7494b1
class Rule: <NEW_LINE> <INDENT> def action(self, block, handler): <NEW_LINE> <INDENT> handler.start(self.type) <NEW_LINE> handler.feed(block) <NEW_LINE> handler.end(self.type) <NEW_LINE> return True
すべてのルールの基底クラス 全てのサブクラスがtypeという属性を持ち、文字列でタイプ名を格納している前提
6259906a4a966d76dd5f06e1
class TransformerLayer(nn.Module): <NEW_LINE> <INDENT> def __init__(self, size, num_heads, dropout=0.3): <NEW_LINE> <INDENT> super(TransformerLayer, self).__init__() <NEW_LINE> self.output_linear = nn.Linear(size, size) <NEW_LINE> self.norm = nn.LayerNorm(size) <NEW_LINE> self.dropout = nn.Dropout(dropout) <NEW_LINE> self.atten = MultiHeadedAttention(num_heads, size, dropout=dropout) <NEW_LINE> self.ffn = PositionwiseFeedForward(size, dropout=dropout) <NEW_LINE> <DEDENT> def forward(self, input, mask=None, batch_first=False): <NEW_LINE> <INDENT> atten_output = self.atten(input, input, input, mask, batch_first) <NEW_LINE> atten_output = self.output_linear(atten_output) <NEW_LINE> return self.ffn(self.norm(input + self.dropout(atten_output)))
attention -> add & norm -> PositionwiseFeedForward Note for code simplicity the norm is first as opposed to last.
6259906aa219f33f346c7ff4