function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
sequence
def estimStereoSIMMParams(self): self.computeStereoX() SXR = np.abs(self.XR) ** 2 SXL = np.abs(self.XL) ** 2 alphaR, alphaL, HGAMMA, HPHI, HF0, \ betaR, betaL, HM, WM, recoError2 = SIMM.Stereo_SIMM( # the data to be fitted to: SXR, SXL, # the basis matrices for the spectral combs WF0=self.SIMMParams['WF0'], # and for the elementary filters: WGAMMA=self.SIMMParams['WGAMMA'], # number of desired filters, accompaniment spectra: numberOfFilters=self.SIMMParams['K'], numberOfAccompanimentSpectralShapes=self.SIMMParams['R'], # if any, initial amplitude matrices for HGAMMA0=None, HPHI0=None, HF00=self.SIMMParams['HF00'], WM0=None, HM0=None, # Some more optional arguments, to control the "convergence" # of the algo numberOfIterations=self.SIMMParams['niter'], updateRulePower=1.0, stepNotes=self.SIMMParams['stepNotes'], lambdaHF0 = 0.0 / (1.0 * SXR.max()), alphaHF0=0.9, verbose=self.verbose, displayEvolution=False)
wslihgt/pyfasst
[ 87, 21, 87, 6, 1375254776 ]
def estimStereoSUIMMParams(self):
wslihgt/pyfasst
[ 87, 21, 87, 6, 1375254776 ]
def writeSeparatedSignals(self, suffix='.wav'): """Writes the separated signals to the files in self.files. If suffix contains 'VUIMM', then this method will take the WF0 and HF0 that contain the estimated unvoiced elements. """ if 'VUIMM' in suffix: WF0 = self.SIMMParams['WUF0'] HF0 = self.SIMMParams['HUF0'] else: WF0 = self.SIMMParams['WF0'] HF0 = self.SIMMParams['HF0']
wslihgt/pyfasst
[ 87, 21, 87, 6, 1375254776 ]
def writeSeparatedSignalsWithUnvoice(self): """A wrapper to give a decent name to the function: simply calling self.writeSeparatedSignals with the '_VUIMM.wav' suffix. """ self.writeSeparatedSignals(suffix='_VUIMM.wav')
wslihgt/pyfasst
[ 87, 21, 87, 6, 1375254776 ]
def checkChunkSize(self, maxFrames): """Computes the number of chunks of size maxFrames, and changes maxFrames in case it does not provide long enough chunks (especially the last chunk). """ totFrames = np.int32(self.computeNFrames()) nChunks = totFrames / maxFrames + 1 # checking size of last chunk, if "small", then making it # more even sized chunks if (totFrames-(nChunks-1)*maxFrames < self.stftParams['windowSizeInSamples'] / self.stftParams['hopsize'] ): print "Modifying the maxframes, such that chunks not too small" maxFrames = np.int(np.ceil(np.double(totFrames)/nChunks)) nChunks = totFrames/maxFrames print "The chunks are then maximum", maxFrames
wslihgt/pyfasst
[ 87, 21, 87, 6, 1375254776 ]
def __init__(self, name, identifier, anchor_token, token_to_move, between_tokens): structure.Rule.__init__(self, name, identifier) self.subphase = 2 self.anchor_token = anchor_token self.token_to_move = token_to_move self.between_tokens = between_tokens
jeremiah-c-leary/vhdl-style-guide
[ 129, 31, 129, 57, 1499106283 ]
def _analyze(self, lToi): for oToi in lToi: lTokens = oToi.get_tokens() for iToken, oToken in enumerate(lTokens): if isinstance(oToken, self.anchor_token): iStartIndex = iToken if isinstance(oToken, self.token_to_move): iMoveIndex = iToken if not (iStartIndex + 2 == iMoveIndex and isinstance(lTokens[iStartIndex + 1], parser.whitespace)): oViolation = violation.New(oToi.get_line_number(), oToi, self.solution) dAction = {} dAction['insertIndex'] = iStartIndex + 1 dAction['moveIndex'] = iMoveIndex oViolation.set_action(dAction) oViolation.set_remap() oViolation.fix_blank_lines = True self.add_violation(oViolation)
jeremiah-c-leary/vhdl-style-guide
[ 129, 31, 129, 57, 1499106283 ]
def __init__(self): self.word_to_cluster_dict = {} self.cluster_dict = {}
texta-tk/texta
[ 31, 7, 31, 2, 1459876722 ]
def cluster(self, embedding, n_clusters=None): vocab = list(embedding.wv.vocab.keys()) vocab_vectors = np.array([embedding[word] for word in vocab])
texta-tk/texta
[ 31, 7, 31, 2, 1459876722 ]
def query(self, word): try: return self.cluster_dict[self.word_to_cluster_dict[word]] except: return []
texta-tk/texta
[ 31, 7, 31, 2, 1459876722 ]
def text_to_clusters(self, text): text = [str(self.word_to_cluster_dict[word]) for word in text if word in self.word_to_cluster_dict] return ' '.join(text)
texta-tk/texta
[ 31, 7, 31, 2, 1459876722 ]
def test_intspansproduct_errors(): """Check instanciation errors of IntspansProduct.""" with pytest.raises(TypeError) as excinfo: IntspansProduct('4-9,10', 'a') assert str(excinfo.value) == 'elt_nb must be an int, found '\ '<class \'str\'> instead.' with pytest.raises(TypeError) as excinfo: IntspansProduct(10, 3) assert str(excinfo.value) == 'cartesianpower_spans must be a str, found '\ '<class \'int\'> instead.' with pytest.raises(RuntimeError) as excinfo: IntspansProduct('1×2×3', 4) assert str(excinfo.value) == 'Found 3 elements in this spans product: '\ '1×2×3, but 4 were expected.' with pytest.raises(ValueError) as excinfo: IntspansProduct('1×2×a') assert str(excinfo.value) == 'Syntax error found in this integers\' '\ 'span: a, what should complain with intspan syntax. See '\ 'http://intspan.readthedocs.io/en/latest/index.html'
nicolashainaux/mathmaker
[ 4, 1, 4, 34, 1468416792 ]
def test_intspansproduct_group_by_packs(): r = IntspansProduct('1,2×1,2×3,4×5,6') assert r._group_by_packs(r.spans, '2_2') == \ [[[intspan('1-2'), intspan('1-2')], [intspan('3-4'), intspan('5-6')]], [[intspan('1-2'), intspan('3-4')], [intspan('1-2'), intspan('5-6')]], [[intspan('1-2'), intspan('5-6')], [intspan('1-2'), intspan('3-4')]]] assert r._group_by_packs(r.spans, '3_1') == \ [[[intspan('1-2'), intspan('1-2'), intspan('3-4')], [intspan('5-6')]], [[intspan('1-2'), intspan('1-2'), intspan('5-6')], [intspan('3-4')]], [[intspan('1-2'), intspan('3-4'), intspan('5-6')], [intspan('1-2')]]] r = IntspansProduct('1,5×1,2×1,3×3,4') assert r._group_by_packs(r.spans, '2_2') == \ [[[intspan('1-2'), intspan('1,5')], [intspan('1,3'), intspan('3-4')]], [[intspan('1-2'), intspan('3-4')], [intspan('1,3'), intspan('1,5')]], [[intspan('1-2'), intspan('1,3')], [intspan('1,5'), intspan('3-4')]]] assert r._group_by_packs(r.spans, '3_1') == \ [[[intspan('1-2'), intspan('1,3'), intspan('1,5')], [intspan('3-4')]], [[intspan('1-2'), intspan('1,5'), intspan('3-4')], [intspan('1,3')]], [[intspan('1-2'), intspan('1,3'), intspan('3-4')], [intspan('1,5')]], [[intspan('1,3'), intspan('1,5'), intspan('3-4')], [intspan('1-2')]]] assert r._group_by_packs(r.spans, '1_1_1_1') == \ [[[intspan('1-2')], [intspan('1,3')], [intspan('1,5')], [intspan('3-4')]]] assert r._group_by_packs(r.spans, '4') == \ [[[intspan('1-2'), intspan('1,3'), intspan('1,5'), intspan('3-4')]]] r = IntspansProduct('1×2,3×2,4') with pytest.raises(ValueError) as excinfo: r._group_by_packs(r.spans, '3_2_1') assert str(excinfo.value) == "dist_code '3_2_1' cannot be used for a "\ 'list of 3 intspans.' assert r._group_by_packs(r.spans, '2_1') == \ [[[intspan('1'), intspan('2-3')], [intspan('2,4')]], [[intspan('1'), intspan('2,4')], [intspan('2-3')]], [[intspan('2-3'), intspan('2,4')], [intspan('1')]]] r = IntspansProduct('20-30×20-40×20-50×20-60×20-90') assert r._group_by_packs(r.spans, '3_2') == \ [[[intspan('20-30'), intspan('20-40'), intspan('20-50')], [intspan('20-60'), intspan('20-90')]], [[intspan('20-30'), intspan('20-40'), intspan('20-60')], [intspan('20-50'), intspan('20-90')]], [[intspan('20-30'), intspan('20-40'), intspan('20-90')], [intspan('20-50'), intspan('20-60')]], [[intspan('20-30'), intspan('20-50'), intspan('20-60')], [intspan('20-40'), intspan('20-90')]], [[intspan('20-30'), intspan('20-50'), intspan('20-90')], [intspan('20-40'), intspan('20-60')]], [[intspan('20-30'), intspan('20-60'), intspan('20-90')], [intspan('20-40'), intspan('20-50')]], [[intspan('20-40'), intspan('20-50'), intspan('20-60')], [intspan('20-30'), intspan('20-90')]], [[intspan('20-40'), intspan('20-50'), intspan('20-90')], [intspan('20-30'), intspan('20-60')]], [[intspan('20-40'), intspan('20-60'), intspan('20-90')], [intspan('20-30'), intspan('20-50')]], [[intspan('20-50'), intspan('20-60'), intspan('20-90')], [intspan('20-30'), intspan('20-40')]]]
nicolashainaux/mathmaker
[ 4, 1, 4, 34, 1468416792 ]
def test_intspansproduct_rebuild_spans_from_packs(): filtered_packs = [[intspan('1'), intspan('3')]] assert IntspansProduct._rebuild_spans_from_packs(filtered_packs, '3_2') \ == [[intspan('1'), intspan('1'), intspan('1'), intspan('3'), intspan('3')]] assert IntspansProduct._rebuild_spans_from_packs(filtered_packs, '2_2') \ == [[intspan('1'), intspan('1'), intspan('3'), intspan('3')]] assert IntspansProduct._rebuild_spans_from_packs(filtered_packs, '1_1') \ == [[intspan('1'), intspan('3')]] filtered_packs = [[intspan('1'), intspan('3')], [intspan('4'), intspan('5-6')]] assert IntspansProduct._rebuild_spans_from_packs(filtered_packs, '3_1') \ == [[intspan('1'), intspan('1'), intspan('1'), intspan('3')], [intspan('4'), intspan('4'), intspan('4'), intspan('5-6')]]
nicolashainaux/mathmaker
[ 4, 1, 4, 34, 1468416792 ]
def test_intspansproduct_random_draw_constructible(): r = IntspansProduct('3×4×6,7') d = r.random_draw(constructible=True) assert d == (3, 4, 6) d = r.random_draw(constructible=False) assert d == (3, 4, 7) r = IntspansProduct('3×4×7-10') with pytest.raises(RuntimeError) as excinfo: r.random_draw(constructible=True) assert str(excinfo.value) == 'Impossible to draw a constructible int '\ "tuple from ['3', '4', '7-10'].\n" r = IntspansProduct('4-5×8×12') d = r.random_draw(constructible=True) assert d == (5, 8, 12) d = r.random_draw(constructible=False) assert d == (4, 8, 12) r = IntspansProduct('4×8-9×12-16') d = r.random_draw(constructible=True) assert d[0:2] == (4, 9) r = IntspansProduct('4×12-16×8-9') d = r.random_draw(constructible=True) assert d[0:2] == (4, 9) r = IntspansProduct('4×1-1002×1006-2006') with pytest.raises(RuntimeError) as excinfo: r.random_draw(constructible=True) assert str(excinfo.value) == 'Impossible to draw a constructible int '\ "tuple from ['4', '1006-2006', '1-1002'].\n" r = IntspansProduct('4×4×4') d = r.random_draw(constructible=True) assert d == (4, 4, 4) r = IntspansProduct('3-5×3-5×3-5') with pytest.raises(RuntimeError) as excinfo: r.random_draw(constructible=False) assert str(excinfo.value) == 'Impossible to draw a not constructible int '\ "tuple from ['3-5', '3-5', '3-5'].\n" r = IntspansProduct('5-6×3-5×3-5') d = r.random_draw(constructible=False) assert d == (3, 3, 6) r = IntspansProduct('2-7×5×4-5') with pytest.raises(RuntimeError) as excinfo: r.random_draw(constructible=False) assert str(excinfo.value) == 'Impossible to draw a not constructible int '\ "tuple from ['5', '4-5', '2-7'].\n"
nicolashainaux/mathmaker
[ 4, 1, 4, 34, 1468416792 ]
def test_parse_sql_creation_query(): """Check if parse_sql_creation_query parses correctly.""" assert parse_sql_creation_query('''CREATE TABLE w3l (id INTEGER PRIMARY KEY, language TEXT, word TEXT, drawDate INTEGER)''') == \ ('w3l', ['id', 'language', 'word', 'drawDate']) assert parse_sql_creation_query('''CREATE TABLE int_pairs (id INTEGER PRIMARY KEY, nb1 INTEGER, nb2 INTEGER, lock_equal_products INTEGER, drawDate INTEGER, clever INTEGER, suits_for_deci1 INTEGER, suits_for_deci2 INTEGER)''') == \ ('int_pairs', ['id', 'nb1', 'nb2', 'lock_equal_products', 'drawDate', 'clever', 'suits_for_deci1', 'suits_for_deci2']) assert parse_sql_creation_query('''CREATE TABLE digits_places (id INTEGER PRIMARY KEY, place DECIMAL(4, 3), drawDate INTEGER)''') == \ ('digits_places', ['id', 'place', 'drawDate'])
nicolashainaux/mathmaker
[ 4, 1, 4, 34, 1468416792 ]
def _get_room_mapping(): return {(r.location.name, r.name): r for r in Room.query.options(lazyload(Room.owner), joinedload(Room.location))}
belokop/indico_bare
[ 1, 1, 1, 5, 1465204236 ]
def __init__(self, importer, old_event, event): self.importer = importer self.old_event = old_event self.event = event self.event_person_map = {} self.legacy_session_map = {} self.legacy_session_ids_used = set() self.legacy_contribution_map = {} self.legacy_contribution_type_map = {} self.legacy_contribution_field_map = {} self.legacy_field_option_id_map = {} # we know some relationships are empty; prevent SA from loading them set_committed_value(self.event, 'references', []) set_committed_value(self.event, 'person_links', [])
belokop/indico_bare
[ 1, 1, 1, 5, 1465204236 ]
def run(self): self.importer.print_success('Importing {}'.format(self.old_event), event_id=self.event.id) self.event.references = list(self._process_references(EventReference, self.old_event)) self._migrate_event_persons() self._migrate_event_persons_links() self._migrate_contribution_types() self._migrate_contribution_fields() self._migrate_sessions() self._migrate_contributions() self._migrate_abstracts() self._migrate_timetable()
belokop/indico_bare
[ 1, 1, 1, 5, 1465204236 ]
def _process_principal(self, principal_cls, principals, legacy_principal, name, read_access=None, full_access=None, roles=None, allow_emails=True): if legacy_principal is None: return elif isinstance(legacy_principal, basestring): user = self.importer.all_users_by_email.get(legacy_principal) principal = user or EmailPrincipal(legacy_principal) else: principal = self._convert_principal(legacy_principal) if principal is None: self.importer.print_warning(cformat('%{yellow}{} does not exist:%{reset} {}') .format(name, legacy_principal), always=False, event_id=self.event.id) return elif not allow_emails and isinstance(principal, EmailPrincipal): self.importer.print_warning(cformat('%{yellow}{} cannot be an email principal:%{reset} {}') .format(name, legacy_principal), always=False, event_id=self.event.id) return try: entry = principals[principal] except KeyError: entry = principal_cls(principal=principal, full_access=False, roles=[]) principals[principal] = entry if read_access: entry.read_access = True if full_access: entry.full_access = True if roles: entry.roles = sorted(set(entry.roles) | set(roles)) if not self.importer.quiet: self.importer.print_info(' - [{}] {}'.format(name.lower(), principal))
belokop/indico_bare
[ 1, 1, 1, 5, 1465204236 ]
def _process_ac(self, principal_cls, principals, ac, allow_emails=True): # read access for principal in ac.allowed: self._process_principal(principal_cls, principals, principal, 'Access', read_access=True, allow_emails=allow_emails) # email-based read access emails = getattr(ac, 'allowedEmail', []) self._process_principal_emails(principal_cls, principals, emails, 'Access', read_access=True, allow_emails=allow_emails) # managers for manager in ac.managers: self._process_principal(principal_cls, principals, manager, 'Manager', full_access=True, allow_emails=allow_emails) # email-based managers emails = getattr(ac, 'managersEmail', []) self._process_principal_emails(principal_cls, principals, emails, 'Manager', full_access=True, allow_emails=allow_emails)
belokop/indico_bare
[ 1, 1, 1, 5, 1465204236 ]
def _process_keywords(self, keywords): return map(convert_to_unicode, keywords.splitlines())
belokop/indico_bare
[ 1, 1, 1, 5, 1465204236 ]
def _get_person(self, old_person): email = getattr(old_person, '_email', None) or getattr(old_person, 'email', None) email = sanitize_email(convert_to_unicode(email).lower()) if email else email if not is_valid_mail(email, False): email = None return self.event_person_map.get(email) if email else self._create_person(old_person, with_event=True)
belokop/indico_bare
[ 1, 1, 1, 5, 1465204236 ]
def _update_link_data(self, link, data_list): for attr in PERSON_INFO_MAP.itervalues(): value = most_common(data_list, key=itemgetter(attr)) if value and value != getattr(link, attr): setattr(link, attr, value)
belokop/indico_bare
[ 1, 1, 1, 5, 1465204236 ]
def _migrate_event_persons_links(self): person_link_map = {} for chair in getattr(self.old_event, '_chairs', []): person = self._get_person(chair) if not person: continue link = person_link_map.get(person) if link: self.importer.print_warning( cformat('%{yellow!}Duplicated chair "{}" for event').format(person.full_name), event_id=self.event.id ) else: link = EventPersonLink(person=person, **self._get_person_data(chair)) person_link_map[person] = link self.event.person_links.append(link)
belokop/indico_bare
[ 1, 1, 1, 5, 1465204236 ]
def _migrate_sessions(self): sessions = [] friendly_id_map = {} friendly_ids_used = set() skipped = [] for id_, session in sorted(self.old_event.sessions.items(), key=lambda x: (x[0].isdigit(), int(x[0]) if x[0].isdigit() else x[0])): id_ = int(id_.lstrip('s')) # legacy: s123 if id_ in friendly_ids_used: skipped.append(session) continue friendly_id_map[session] = id_ friendly_ids_used.add(id_) for i, session in enumerate(skipped, (max(friendly_ids_used) if friendly_ids_used else 0) + 1): assert i not in friendly_ids_used friendly_id_map[session] = i friendly_ids_used.add(i) for old_session in self.old_event.sessions.itervalues(): sessions.append(self._migrate_session(old_session, friendly_id_map[old_session])) if sessions: self.event._last_friendly_session_id = max(s.friendly_id for s in sessions)
belokop/indico_bare
[ 1, 1, 1, 5, 1465204236 ]
def _migrate_contribution_fields(self): try: afm = self.old_event.abstractMgr._abstractFieldsMgr except AttributeError: return pos = 0 content_field = None for field in afm._fields: # it may happen that there is a second 'content' field (old version schemas) # in that case, let's use the first one as description and keep the second one as a field if field._id == 'content' and not content_field: content_field = field else: pos += 1 self._migrate_contribution_field(field, pos) if not content_field: self.importer.print_warning( cformat('%{yellow!}Event has no content field!%{reset}'), event_id=self.event.id) return def _positive_or_none(value): try: value = int(value) except (TypeError, ValueError): return None return value if value > 0 else None limitation = getattr(content_field, '_limitation', 'chars') settings = { 'is_active': bool(content_field._active), 'is_required': bool(content_field._isMandatory), 'max_words': _positive_or_none(content_field._maxLength) if limitation == 'words' else None, 'max_length': _positive_or_none(content_field._maxLength) if limitation == 'chars' else None } if settings != abstracts_settings.defaults['description_settings']: abstracts_settings.set(self.event, 'description_settings', settings)
belokop/indico_bare
[ 1, 1, 1, 5, 1465204236 ]
def _migrate_contributions(self): contribs = [] friendly_id_map = {} friendly_ids_used = set() skipped = [] for id_, contrib in sorted(self.old_event.contributions.items(), key=lambda x: (not x[0].isdigit(), int(x[0]) if x[0].isdigit() else x[0])): try: id_ = int(id_) # legacy: s1t2 except ValueError: skipped.append(contrib) continue if id_ in friendly_ids_used: skipped.append(contrib) continue friendly_id_map[contrib] = id_ friendly_ids_used.add(id_) for i, contrib in enumerate(skipped, (max(friendly_ids_used) if friendly_ids_used else 0) + 1): assert i not in friendly_ids_used friendly_id_map[contrib] = i friendly_ids_used.add(i) for old_contrib in self.old_event.contributions.itervalues(): contribs.append(self._migrate_contribution(old_contrib, friendly_id_map[old_contrib])) if contribs: self.event._last_friendly_contribution_id = max(c.friendly_id for c in contribs)
belokop/indico_bare
[ 1, 1, 1, 5, 1465204236 ]
def _migrate_contribution(self, old_contrib, friendly_id): ac = old_contrib._Contribution__ac description = old_contrib._fields.get('content', '') description = convert_to_unicode(getattr(description, 'value', description)) # str or AbstractFieldContent status = getattr(old_contrib, '_status', None) status_class = status.__class__.__name__ if status else None contrib = Contribution(event_new=self.event, friendly_id=friendly_id, title=convert_to_unicode(old_contrib.title), description=description, duration=old_contrib.duration, protection_mode=PROTECTION_MODE_MAP[ac._accessProtection], board_number=convert_to_unicode(getattr(old_contrib, '_boardNumber', '')), keywords=self._process_keywords(old_contrib._keywords), is_deleted=(status_class == 'ContribStatusWithdrawn')) if old_contrib._track is not None: contrib.track_id = int(old_contrib._track.id) if not self.importer.quiet: self.importer.print_info(cformat('%{cyan}Contribution%{reset} {}').format(contrib.title)) self.legacy_contribution_map[old_contrib] = contrib contrib.legacy_mapping = LegacyContributionMapping(event_new=self.event, legacy_contribution_id=old_contrib.id) # contribution type if old_contrib._type is not None: try: contrib.type = self.legacy_contribution_type_map[old_contrib._type] except AttributeError: self.importer.print_warning(cformat('%{yellow!}Invalid contrib type {}') .format(convert_to_unicode(old_contrib._type._name)), event_id=self.event.id) # ACLs (managers, read access, submitters) principals = {} self._process_ac(ContributionPrincipal, principals, ac) for submitter in old_contrib._submitters: self._process_principal(ContributionPrincipal, principals, submitter, 'Submitter', roles={'submit'}) self._process_principal_emails(ContributionPrincipal, principals, getattr(old_contrib, '_submittersEmail', []), 'Submitter', roles={'submit'}) contrib.acl_entries = set(principals.itervalues()) # speakers, authors and co-authors contrib.person_links = list(self._migrate_contribution_person_links(old_contrib)) # references ("report numbers") contrib.references = list(self._process_references(ContributionReference, old_contrib)) # contribution/abstract fields contrib.field_values = list(self._migrate_contribution_field_values(old_contrib)) contrib.subcontributions = [self._migrate_subcontribution(old_contrib, old_subcontrib, pos) for pos, old_subcontrib in enumerate(old_contrib._subConts, 1)] contrib._last_friendly_subcontribution_id = len(contrib.subcontributions) return contrib
belokop/indico_bare
[ 1, 1, 1, 5, 1465204236 ]
def _migrate_abstract_judgments(self, old_abstract): if not hasattr(old_abstract, '_trackJudgementsHistorical'): self.importer.print_warning( cformat('%{blue!}Abstract {} {yellow}had no judgment history!%{reset}').format(old_abstract._id), event_id=self.event.id) return history = old_abstract._trackJudgementsHistorical if not hasattr(history, 'iteritems'): self.importer.print_warning('Abstract {} had corrupt judgment history ({}).'.format(old_abstract._id, history), event_id=self.event.id) return for track_id, judgments in history.iteritems(): seen_judges = set() for old_judgment in judgments: judge = old_judgment._responsible.user if old_judgment._responsible else None if not judge: self.importer.print_warning( cformat('%{blue!}Abstract {} {yellow}had an empty judge ({})!%{reset}').format( old_abstract._id, old_judgment), event_id=self.event.id) continue elif judge in seen_judges: self.importer.print_warning( cformat("%{blue!}Abstract {}: {yellow}judge '{}' seen more than once ({})!%{reset}") .format(old_abstract._id, judge, old_judgment), event_id=self.event.id) continue new_judgment = Judgment(creation_dt=as_utc(old_judgment._date), track_id=old_judgment._track.id, judge=judge) seen_judges.add(judge) if old_judgment.__class__.__name__ == 'AbstractAcceptance' and old_judgment._contribType: contrib_type = old_judgment._contribType try: new_judgment.accepted_type = self.legacy_contribution_type_map[contrib_type] except KeyError: self.importer.print_warning( cformat("%{blue!}Abstract {}: {yellow}contribution type '{}' unknown!%{reset}") .format(old_abstract._id, getattr(contrib_type, '_name', contrib_type)), event_id=self.event.id) yield new_judgment
belokop/indico_bare
[ 1, 1, 1, 5, 1465204236 ]
def _migrate_abstract_field_values(self, old_abstract): fields = dict(getattr(old_abstract, '_fields', {})) fields.pop('content', None) for field_id, field_content in fields.iteritems(): value = convert_to_unicode(getattr(field_content, 'value', field_content)) if not value: continue try: new_field = self.legacy_contribution_field_map[field_id] except KeyError: self.importer.print_warning(cformat('%{yellow!}Contribution field "{}" does not exist') .format(field_id), event_id=self.event.id) continue new_value = self._process_contribution_field_value(field_id, value, new_field, AbstractFieldValue) if new_value: if not self.importer.quiet: self.importer.print_info(cformat('%{green} - [field]%{reset} {}: {}').format(new_field.title, new_value.data)) yield new_value
belokop/indico_bare
[ 1, 1, 1, 5, 1465204236 ]
def _migrate_subcontribution(self, old_contrib, old_subcontrib, position): subcontrib = SubContribution(position=position, friendly_id=position, duration=old_subcontrib.duration, title=convert_to_unicode(old_subcontrib.title), description=convert_to_unicode(old_subcontrib.description)) if not self.importer.quiet: self.importer.print_info(cformat(' %{cyan!}SubContribution%{reset} {}').format(subcontrib.title)) subcontrib.legacy_mapping = LegacySubContributionMapping(event_new=self.event, legacy_contribution_id=old_contrib.id, legacy_subcontribution_id=old_subcontrib.id) subcontrib.references = list(self._process_references(SubContributionReference, old_subcontrib)) subcontrib.person_links = list(self._migrate_subcontribution_person_links(old_subcontrib)) return subcontrib
belokop/indico_bare
[ 1, 1, 1, 5, 1465204236 ]
def _migrate_subcontribution_person_links(self, old_entry): person_link_map = {} person_link_data_map = defaultdict(list) for speaker in getattr(old_entry, 'speakers', []): person = self._get_person(speaker) if not person: continue person_link_data = self._get_person_data(speaker) person_link_data_map[person].append(person_link_data) link = person_link_map.get(person) if link: self._update_link_data(link, person_link_data_map[person]) self.importer.print_warning( cformat('%{yellow!}Duplicated speaker "{}" for sub-contribution').format(person.full_name), event_id=self.event.id ) else: link = SubContributionPersonLink(person=person, **person_link_data) person_link_map[person] = link yield link
belokop/indico_bare
[ 1, 1, 1, 5, 1465204236 ]
def _migrate_timetable(self): if not self.importer.quiet: self.importer.print_info(cformat('%{green}Timetable...')) self._migrate_timetable_entries(self.old_event._Conference__schedule._entries)
belokop/indico_bare
[ 1, 1, 1, 5, 1465204236 ]
def _migrate_contribution_timetable_entry(self, old_entry, session_block=None): old_contrib = old_entry._LinkedTimeSchEntry__owner contrib = self.legacy_contribution_map[old_contrib] contrib.timetable_entry = TimetableEntry(event_new=self.event, start_dt=old_contrib.startDate) self._migrate_location(old_contrib, contrib) if session_block: contrib.session = session_block.session contrib.session_block = session_block contrib.timetable_entry.parent = session_block.timetable_entry return contrib.timetable_entry
belokop/indico_bare
[ 1, 1, 1, 5, 1465204236 ]
def _migrate_block_timetable_entry(self, old_entry): old_block = old_entry._LinkedTimeSchEntry__owner try: session = self.legacy_session_map[old_block.session] except KeyError: self.importer.print_warning(cformat('%{yellow!}Found zombie session {}').format(old_block.session), event_id=self.event.id) session = self._migrate_session(old_block.session) session_block = SessionBlock(session=session, title=convert_to_unicode(old_block.title), duration=old_block.duration) session_block.timetable_entry = TimetableEntry(event_new=self.event, start_dt=old_block.startDate) if session.legacy_mapping is not None: session_block.legacy_mapping = LegacySessionBlockMapping(event_new=self.event, legacy_session_id=old_block.session.id, legacy_session_block_id=old_block.id) self._migrate_location(old_block, session_block) session_block.person_links = list(self._migrate_session_block_person_links(old_block)) self._migrate_timetable_entries(old_block._schedule._entries, session_block) return session_block.timetable_entry
belokop/indico_bare
[ 1, 1, 1, 5, 1465204236 ]
def _get_parent_location(self, obj, attr): type_ = obj.__class__.__name__ if type_ == 'SessionSlot': conf = obj.session.conference return getattr(conf, attr)[0] if getattr(conf, attr, None) else None elif type_ in ('BreakTimeSchEntry', 'Contribution', 'AcceptedContribution'): if type_ == 'AcceptedContribution': contrib_parent = obj._session if getattr(contrib_parent, attr, None): return getattr(contrib_parent, attr)[0] else: owner = contrib_parent.conference elif type_ == 'Contribution': contrib_parent = obj.parent if attr == 'places' and contrib_parent: places = getattr(contrib_parent, attr, None) return getattr(contrib_parent, 'place', None) if not places else places[0] if attr == 'rooms' and contrib_parent: rooms = getattr(contrib_parent, attr, None) return getattr(contrib_parent, 'room', None) if not rooms else rooms[0] elif type_ == 'BreakTimeSchEntry': owner = obj._sch._owner return self._get_parent_location(owner, attr) elif type_ == 'Conference': return getattr(obj, attr)[0] if getattr(obj, attr, None) else None elif type_ == 'Session': return self._get_parent_location(obj.conference, attr)
belokop/indico_bare
[ 1, 1, 1, 5, 1465204236 ]
def __init__(self, **kwargs): self.default_group_provider = kwargs.pop('default_group_provider') self.parallel = kwargs.pop('parallel') self.reference_types = kwargs.pop('reference_types') super(EventTimetableImporter, self).__init__(**kwargs) self.reference_type_map = {}
belokop/indico_bare
[ 1, 1, 1, 5, 1465204236 ]
def decorate_command(command): def _process_parallel(ctx, param, value): if value is None: return None n, i = map(int, value.split(':', 1)) if n <= 1: raise click.BadParameter('N must be >1') if i not in range(n): raise click.BadParameter('I must be in [0..{})'.format(n)) return n, i command = click.option('--default-group-provider', default='legacy-ldap', help="Name of the default group provider")(command) command = click.option('-R', '--reference-type', 'reference_types', multiple=True, help="Reference types ('report numbers'). Can be used multiple times " "to specify multiple reference types")(command) command = click.option('-P', '--parallel', metavar='N:I', callback=_process_parallel, help='Parallel mode - migrates only events with `ID mod N = I`. ' 'When using this, you need to run the script N times with ' 'I being in [0..N)')(command) return command
belokop/indico_bare
[ 1, 1, 1, 5, 1465204236 ]
def _load_data(self): self.print_step("Loading some data") self.room_mapping = _get_room_mapping() self.venue_mapping = {location.name: location for location in Location.query} self.all_users_by_email = {} for user in User.query.options(joinedload('_all_emails')): if user.is_deleted: continue for email in user.all_emails: self.all_users_by_email[email] = user
belokop/indico_bare
[ 1, 1, 1, 5, 1465204236 ]
def migrate_reference_types(self): if self.parallel and self.parallel[1]: self.print_step("Loading reference types") self.reference_type_map = {r.name: r for r in ReferenceType.query} return self.print_step("Migrating reference types") for name in self.reference_types: self.reference_type_map[name] = reftype = ReferenceType(name=name) db.session.add(reftype) self.print_success(name) db.session.commit()
belokop/indico_bare
[ 1, 1, 1, 5, 1465204236 ]
def _iter_events(self): it = self.zodb_root['conferences'].itervalues() total = len(self.zodb_root['conferences']) all_events_query = Event.find(is_deleted=False).options(undefer('_last_friendly_contribution_id'), undefer('_last_friendly_session_id')) if self.parallel: n, i = self.parallel it = (e for e in it if int(e.id) % n == i) total = int(ceil(total / n)) all_events_query = all_events_query.filter(Event.id % n == i) if self.quiet: it = verbose_iterator(it, total, attrgetter('id'), attrgetter('title')) all_events = {e.id: e for e in all_events_query} for old_event in self.flushing_iterator(it): event = all_events.get(int(old_event.id)) if event is None: self.print_error(cformat('%{red!}Event is only in ZODB but not in SQL'), event_id=old_event.id) continue yield old_event, event
belokop/indico_bare
[ 1, 1, 1, 5, 1465204236 ]
def save_teletries_to_files(telems: List[Dict]): for telem in tqdm(telems, desc="Telemetries saved to files", position=3): SampleFileParser.save_telemetry_to_file(telem)
guardicore/monkey
[ 6098, 725, 6098, 196, 1440919371 ]
def save_telemetry_to_file(telem: Dict): telem_filename = telem["name"] + telem["method"] for i in range(MAX_SAME_TYPE_TELEM_FILES): if not path.exists(path.join(TELEM_DIR_PATH, (str(i) + telem_filename))): telem_filename = str(i) + telem_filename break with open(path.join(TELEM_DIR_PATH, telem_filename), "w") as file: file.write(json.dumps(telem))
guardicore/monkey
[ 6098, 725, 6098, 196, 1440919371 ]
def read_telem_files() -> List[str]: telems = [] try: file_paths = [ path.join(TELEM_DIR_PATH, f) for f in listdir(TELEM_DIR_PATH) if path.isfile(path.join(TELEM_DIR_PATH, f)) ] except FileNotFoundError: raise FileNotFoundError( "Telemetries to send not found. " "Refer to readme to figure out how to generate telemetries and where to put them." ) for file_path in file_paths: with open(file_path, "r") as telem_file: telem_string = "".join(telem_file.readlines()).replace("\n", "") telems.append(telem_string) return telems
guardicore/monkey
[ 6098, 725, 6098, 196, 1440919371 ]
def n_squares(n): return [i**2 for i in range(2, n)]
kylebegovich/ProjectEuler
[ 1, 1, 1, 4, 1490033748 ]
def multiSlice(s,cutpoints): k = len(cutpoints) if k == 0: return [s] else: multislices = [s[:cutpoints[0]]] multislices.extend(s[cutpoints[i]:cutpoints[i+1]] for i in range(k-1)) multislices.append(s[cutpoints[k-1]:]) return multislices
kylebegovich/ProjectEuler
[ 1, 1, 1, 4, 1490033748 ]
def list_sum(num_list): outer_sum = 0 for sub_list in num_list: inner_sum = 0 power = 1 for digit in sub_list[::-1]: inner_sum += power * digit power *= 10 outer_sum += inner_sum return outer_sum
kylebegovich/ProjectEuler
[ 1, 1, 1, 4, 1490033748 ]
def is_s_num(num): sqrt = num**0.5 for part in allPartitions([int(i) for i in str(num)]): if sqrt == list_sum(part): return True return False
kylebegovich/ProjectEuler
[ 1, 1, 1, 4, 1490033748 ]
def T(N): squares = n_squares(N) sum = 0 for n in squares: if is_s_num(n): print(n, "is true") sum += n return sum
kylebegovich/ProjectEuler
[ 1, 1, 1, 4, 1490033748 ]
def __init__(self, mainWindows): QWidget.__init__(self) uic.loadUi(directory + "/../ui/stats_tipster.ui", self) gettext.textdomain("betcon") gettext.bindtextdomain("betcon", "../lang/mo" + mainWindows.lang) gettext.bindtextdomain("betcon", "/usr/share/locale" + mainWindows.lang) self.mainWindows = mainWindows self.mainWindows.setWindowTitle(_("Stats Tipsters") + " | Betcon v" + mainWindows.version) self.translate() try: self.initData() except Exception: print(_("Error trying to load the data.")) self.setEnabled(False) self.cmbYear.activated.connect(self.updateMonths) self.cmbMonth.activated.connect(self.updateTree)
soker90/betcon
[ 7, 2, 7, 2, 1499643026 ]
def initData(self): self.years, self.months = LibStats.getYears() self.cmbYear.addItems(self.years.keys()) firstKey = next(iter(self.years)) self.cmbMonth.addItems(self.getMonths(firstKey)) data = LibStats.getTipster() items = [] for i in data: item = QTreeWidgetItem(i) item = paint_row(item, i[5]) items.append(item) self.treeTotal.addTopLevelItems(items) self.updateMonths()
soker90/betcon
[ 7, 2, 7, 2, 1499643026 ]
def updateTree(self): year = self.cmbYear.currentText() sMonth = self.cmbMonth.currentText() month = key_from_value(self.months, sMonth) data = LibStats.getTipster(year, month) self.treeMonth.clear() items = [] for i in data: item = QTreeWidgetItem(i) item = paint_row(item, i[5]) items.append(item) self.treeMonth.addTopLevelItems(items)
soker90/betcon
[ 7, 2, 7, 2, 1499643026 ]
def __init__(self, name=None, **kwargs): self.name = name
ValyrianTech/BitcoinSpellbook-v0.3
[ 16, 2, 16, 2, 1503599758 ]
def genome_template(self): pass
ValyrianTech/BitcoinSpellbook-v0.3
[ 16, 2, 16, 2, 1503599758 ]
def model_to_genome(self, model): pass
ValyrianTech/BitcoinSpellbook-v0.3
[ 16, 2, 16, 2, 1503599758 ]
def convert_text(node, context): context["top"].append(node.astext())
uliss/quneiform
[ 10, 2, 10, 6, 1306885353 ]
def convert_title(node, context): level = context["heading-level"] if level == 0: # The document did not start with a section level = 1 heading = odf_create_heading(level, node.astext(), style='Heading_20_%s' % level) context["body"].append(heading)
uliss/quneiform
[ 10, 2, 10, 6, 1306885353 ]
def convert_list(node, context, list_type): # Reuse template styles if list_type == "enumerated": style_name = "Numbering_20_1" else: style_name = "List_20_1" odf_list = odf_create_list(style=style_name) context["top"].append(odf_list) # Save the current top old_top = context["top"] for item in node: if item.tagname != "list_item": printwarn("node not supported: %s" % item.tagname) continue # Create a new item odf_item = odf_create_list_item() odf_list.append(odf_item) # A new top context["top"] = odf_item for child in item: convert_node(child, context) # And restore the top context["top"] = old_top
uliss/quneiform
[ 10, 2, 10, 6, 1306885353 ]
def convert_list_bullet(node, context): return convert_list(node, context, "bullet")
uliss/quneiform
[ 10, 2, 10, 6, 1306885353 ]
def convert_footnote(node, context): # XXX ids is a list ?? refid = node.get("ids")[0] # Find the footnote footnotes = context["footnotes"] if refid not in footnotes: printwarn('unknown footnote "%s"' % refid) return footnote_body = footnotes[refid].get_element("text:note-body") # Save the current top old_top = context["top"] # Fill the note context["top"] = footnote_body for child in node: # We skip the label (already added) if child.tagname == "label": continue convert_node(child, context) # And restore the top context["top"] = old_top
uliss/quneiform
[ 10, 2, 10, 6, 1306885353 ]
def _convert_style_like(node, context, style_name): # Create the span span = odf_create_span(style=style_name) context["top"].append(span) # Save the current top old_top = context["top"] # Convert context["top"] = span for child in node: convert_node(child, context) # And restore the top context["top"] = old_top
uliss/quneiform
[ 10, 2, 10, 6, 1306885353 ]
def convert_emphasis(node, context): emphasis_style = _get_emphasis_style(context).get_style_name() # Convert _convert_style_like(node, context, emphasis_style)
uliss/quneiform
[ 10, 2, 10, 6, 1306885353 ]
def convert_strong(node, context): strong_style = _get_strong_style(context).get_style_name() # Convert _convert_style_like(node, context, strong_style)
uliss/quneiform
[ 10, 2, 10, 6, 1306885353 ]
def convert_literal_block(node, context): paragraph = odf_create_paragraph(style="Preformatted_20_Text") context["top"].append(paragraph) # Convert for child in node: # Only text if child.tagname != "#text": printwarn('node "%s" not supported in literal block' % ( child.tagname)) continue text = child.astext() tmp = [] spaces = 0 for c in text: if c == '\n': if tmp: tmp = u"".join(tmp) paragraph.append(tmp) tmp = [] spaces = 0 paragraph.append(odf_create_line_break()) elif c == '\r': continue elif c == ' ': spaces += 1 elif c == '\t': # Tab = 4 spaces spaces += 4 else: if spaces >= 2: if tmp: tmp = u"".join(tmp) paragraph.append(tmp) tmp = [] paragraph.append( odf_create_undividable_space(spaces)) spaces = 0 elif spaces == 1: tmp.append(' ') spaces = 0 tmp.append(c) if tmp: tmp = u"".join(tmp) paragraph.append(tmp)
uliss/quneiform
[ 10, 2, 10, 6, 1306885353 ]
def _get_term_style(context): styles = context['styles'] term_style = styles.get('term') if term_style is not None: return term_style # Reuse template style if any doc = context['doc'] term_style = doc.get_style('paragraph', u"Definition_20_List_20_Term") if term_style is None: # Create default one term_style = odf_create_style('paragraph', name=u"Definition_20_List_20_Term", display_name=u"Definition List Term", parent="Standard", font_weight=u"bold", area='text') doc.insert_style(term_style, automatic=False) styles['term'] = term_style return term_style
uliss/quneiform
[ 10, 2, 10, 6, 1306885353 ]
def convert_definition_list(node, context): """Convert a list of term/definition pairs to styled paragraphs. The "Definition List Term" style is looked for term paragraphs, and the "Definition List Definition" style is looked for definition paragraphs. """ styles = context['styles'] term_style = _get_term_style(context).get_style_name() definition_style = _get_definition_style(context).get_style_name() for item in node: if item.tagname != "definition_list_item": printwarn('node "%s" not supported in definition_list' % ( item.tagname)) continue for child in item: tagname = child.tagname if tagname == "term": paragraph = odf_create_paragraph(text=child.astext(), style=term_style) context["top"].append(paragraph) elif tagname == "definition": # Push a style on the stack for next paragraphs to use styles['paragraph'] = definition_style for subchildren in child: convert_node(subchildren, context) # Pop the paragraph style del styles['paragraph'] else: printwarn('node "%s" not supported in definition_list_item' % tagname)
uliss/quneiform
[ 10, 2, 10, 6, 1306885353 ]
def _get_caption_style(context): styles = context['styles'] caption_style = styles.get('caption') if caption_style is not None: return caption_style caption_style = odf_create_style('graphic', parent=u"Frame", **{'style:wrap': u"none", 'style:vertical-pos': u"top", 'style:vertical-rel': u"paragraph-content", 'style:horizontal-pos': u"center", 'style:horizontal-rel': u"paragraph-content", 'fo:padding': u"0.25cm", 'fo:border': u"0cm solid #000000"}) context['doc'].insert_style(caption_style, automatic=True) styles['caption'] = caption_style return caption_style
uliss/quneiform
[ 10, 2, 10, 6, 1306885353 ]
def _add_image(image, caption, context, width=None, height=None): # Load the image to find its size encoding = stdout.encoding if stdout.encoding is not None else "utf-8" try: image_file = open(image.encode(encoding), 'rb') image_object = Image.open(image_file) except (UnicodeEncodeError, IOError, OverflowError), e: printwarn('unable to insert the image "%s": %s' % (image, e)) return size = image_object.size # Convert pixels to inches if width: try: width = int(width.replace('px', '')) except ValueError: raise NotImplementedError, 'only pixel units supported' if height: try: height = int(height) except ValueError: raise NotImplementedError, 'only pixel units supported' else: height = int(width / (float(size[0]) / float(size[1]))) size = (width, height) elif height: try: height = int(height.replace('px', '')) except ValueError: raise NotImplementedError, 'only pixel units supported' width = int(height * (float(size[0]) / float(size[1]))) size = (width, height) size = ("%sin" % (float(size[0]) / DPI), "%sin" % (float(size[1]) / DPI)) # Add the image local_uri = context["doc"].add_file(image) # Frame style for the caption frame caption_style = _get_caption_style(context).get_style_name() # Frame style for the image frame image_style = _get_image_style(context).get_style_name() # In text application, image must be inserted in a paragraph if context["top"].get_tag() == "office:text": container = odf_create_paragraph() context["top"].append(container) else: container = context["top"] if caption: paragraph = odf_create_paragraph() image_frame = odf_create_image_frame(local_uri, size=size, style=image_style) paragraph.append(image_frame) paragraph.append(caption) # A new frame, we fix only the width text_frame = odf_create_text_frame(paragraph, size=(size[0], None), style=caption_style) container.append(text_frame) else: image_frame = odf_create_image_frame(local_uri, size=size, style=image_style) container.append(image_frame)
uliss/quneiform
[ 10, 2, 10, 6, 1306885353 ]
def convert_figure(node, context): image = None caption = None width = None height = None for child in node: tagname = child.tagname if tagname == "image": if image is not None: printwarn("unexpected duplicate image in a figure") continue image = child.get("uri") width = child.get('width') height = child.get('height') elif tagname == "caption": if caption is not None: printwarn("unexpected duplicate caption in a figure") continue caption = child.astext() _add_image(image, caption, context, width=width, height=height)
uliss/quneiform
[ 10, 2, 10, 6, 1306885353 ]
def _get_cell_style(context): styles = context['styles'] cell_style = styles.get('cell') if cell_style is not None: return cell_style # Give borders to cells cell_style = odf_create_style('table-cell', u"odf_table.A1", padding=u"0.049cm", border=u"0.002cm solid #000000") context['doc'].insert_style(cell_style, automatic=True) styles['cell'] = cell_style return cell_style
uliss/quneiform
[ 10, 2, 10, 6, 1306885353 ]
def convert_node(node, context): tagname = node.tagname convert_method = convert_methods.get(tagname) if convert_method is not None: convert_method(node, context) else: printwarn("node not supported: %s" % tagname)
uliss/quneiform
[ 10, 2, 10, 6, 1306885353 ]
def daterange(start_date, end_date): for n in range(int ((end_date - start_date).days)): yield start_date + timedelta(n)
centrofermi/e3monitor
[ 2, 1, 2, 8, 1417606679 ]
def flirt(message): if len(message) <= 1: return '' for sep in '.!?': s, sepfound, after = message.partition(sep) numspace = len(s) - len(s.lstrip()) s = ' ' * numspace + \ random.choice(PHRASES).format(s=s.lstrip().lower(), sep=sepfound) return s + flirt(after) return message
sentriz/steely
[ 21, 14, 21, 6, 1498783471 ]
def __init__(self, bases, pv=None, *, force=False): self.bases = list(bases) if self.size > self._size_max and not force: raise ValueError( 'Density matrix of the system is going to have {} items. It ' 'is probably too much. If you know what you are doing, ' 'pass `force=True` argument to the constructor.')
brianzi/quantumsim
[ 28, 16, 28, 13, 1465822644 ]
def from_pv(cls, pv, bases, *, force=False): return cls(bases, pv, force=force)
brianzi/quantumsim
[ 28, 16, 28, 13, 1465822644 ]
def to_pv(self): """Get data in a form of Numpy array""" pass
brianzi/quantumsim
[ 28, 16, 28, 13, 1465822644 ]
def from_dm(cls, dm, bases, *, force=False): if not hasattr(bases, '__iter__'): n_qubits = len(dm) // bases.dim_hilbert bases = [bases] * n_qubits return cls(bases, dm_to_pv(dm, bases), force=force)
brianzi/quantumsim
[ 28, 16, 28, 13, 1465822644 ]
def n_qubits(self): return len(self.bases)
brianzi/quantumsim
[ 28, 16, 28, 13, 1465822644 ]
def dim_hilbert(self): return tuple((b.dim_hilbert for b in self.bases))
brianzi/quantumsim
[ 28, 16, 28, 13, 1465822644 ]
def size(self): return pytools.product(self.dim_hilbert) ** 2
brianzi/quantumsim
[ 28, 16, 28, 13, 1465822644 ]
def dim_pauli(self): return tuple([pb.dim_pauli for pb in self.bases])
brianzi/quantumsim
[ 28, 16, 28, 13, 1465822644 ]
def apply_ptm(self, operation, *qubits): pass
brianzi/quantumsim
[ 28, 16, 28, 13, 1465822644 ]
def diagonal(self, *, get_data=True): pass
brianzi/quantumsim
[ 28, 16, 28, 13, 1465822644 ]
def trace(self): pass
brianzi/quantumsim
[ 28, 16, 28, 13, 1465822644 ]
def partial_trace(self, *qubits): pass
brianzi/quantumsim
[ 28, 16, 28, 13, 1465822644 ]
def meas_prob(self, qubit): pass
brianzi/quantumsim
[ 28, 16, 28, 13, 1465822644 ]
def renormalize(self): pass
brianzi/quantumsim
[ 28, 16, 28, 13, 1465822644 ]
def copy(self): pass
brianzi/quantumsim
[ 28, 16, 28, 13, 1465822644 ]
def __init__(self, store, aID, name, currency=0, balance=0.0, mintId=None, currNick=False): ORMObject.__init__(self) self.IsFrozen = True self.Store = store self.ID = aID self._Name = name self._Transactions = None self._RecurringTransactions = [] self._preTransactions = [] # Make sure that Currency and Balance are not None (bug #653716) self.Currency = currency or 0 self.Balance = balance or 0.0 self.MintId = mintId or None self.ShowCurrencyNick = currNick or False self.IsFrozen = False Publisher.subscribe(self.onTransactionAmountChanged, "ormobject.updated.Transaction.Amount")
mrooney/wxbanker
[ 17, 9, 17, 7, 1337971002 ]
def GetSiblings(self): return [account for account in self.Parent if account is not self]
mrooney/wxbanker
[ 17, 9, 17, 7, 1337971002 ]
def GetCurrency(self): return self._Currency
mrooney/wxbanker
[ 17, 9, 17, 7, 1337971002 ]
def GetBalance(self, currency=None): return self.balanceAtCurrency(self.Balance, currency)
mrooney/wxbanker
[ 17, 9, 17, 7, 1337971002 ]
def GetRecurringTransactions(self): return self._RecurringTransactions
mrooney/wxbanker
[ 17, 9, 17, 7, 1337971002 ]
def SetRecurringTransactions(self, recurrings): self._RecurringTransactions = recurrings
mrooney/wxbanker
[ 17, 9, 17, 7, 1337971002 ]
def GetName(self): return self._Name
mrooney/wxbanker
[ 17, 9, 17, 7, 1337971002 ]
def Remove(self): self.Parent.Remove(self.Name)
mrooney/wxbanker
[ 17, 9, 17, 7, 1337971002 ]
def AddRecurringTransaction(self, amount, description, date, repeatType, repeatEvery=1, repeatOn=None, endDate=None, source=None): # Create the recurring transaction object. recurring = RecurringTransaction(None, self, amount, description, date, repeatType, repeatEvery, repeatOn, endDate, source) # Store it. self.Store.MakeRecurringTransaction(recurring) # Add it to our internal list. self.RecurringTransactions.append(recurring)
mrooney/wxbanker
[ 17, 9, 17, 7, 1337971002 ]
def RemoveRecurringTransaction(self, recurring): # Orphan any recurring children so that they are normal transactions. for child in recurring.GetChildren(): child.RecurringParent = None if child.LinkedTransaction: child.LinkedTransaction.RecurringParent = None
mrooney/wxbanker
[ 17, 9, 17, 7, 1337971002 ]
def AddTransaction(self, amount=None, description="", date=None, source=None, transaction=None): """ Enter a transaction in this account, optionally making the opposite transaction in the source account first. """ Publisher.sendMessage("batch.start")
mrooney/wxbanker
[ 17, 9, 17, 7, 1337971002 ]