function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
sequence
def simple_trace_to_wkt_linestring(trace): ''' Coverts a simple fault trace to well-known text format :param trace: Fault trace as instance of :class: openquake.hazardlib.geo.line.Line :returns: Well-known text (WKT) Linstring representation of the trace ''' trace_str = "" for point in trace: trace_str += ' %s %s,' % (point.longitude, point.latitude) trace_str = trace_str.lstrip(' ') return 'LINESTRING (' + trace_str.rstrip(',') + ')'
gem/oq-hazardlib
[ 23, 49, 23, 9, 1323944086 ]
def init(name): settings = get_xblock_settings() sleep_seconds = settings.get("sleep_timeout", 10) providers = settings.get("providers") config = providers.get(name) if config and isinstance(config, dict): provider_type = config.get("type") if provider_type == "openstack" or not provider_type: return OpenstackProvider(name, config, sleep_seconds) elif provider_type == "gcloud": return GcloudProvider(name, config, sleep_seconds)
hastexo/hastexo-xblock
[ 16, 9, 16, 4, 1444516845 ]
def set_logger(self, logger): """Set a logger other than the standard one. This is meant to be used from Celery tasks, which usually would want to use their task logger for logging. """ self.logger = logger
hastexo/hastexo-xblock
[ 16, 9, 16, 4, 1444516845 ]
def set_capacity(self, capacity): if capacity in (None, "None"): capacity = -1 else: try: capacity = int(capacity) except (TypeError, ValueError): # Invalid capacity: disable the provider capacity = 0 self.capacity = capacity
hastexo/hastexo-xblock
[ 16, 9, 16, 4, 1444516845 ]
def set_environment(self, environment): if not environment: error_msg = ("No environment provided for provider %s" % self.name) raise ProviderException(error_msg) self.environment = environment
hastexo/hastexo-xblock
[ 16, 9, 16, 4, 1444516845 ]
def generate_key_pair(self, encodeb64=False): keypair = {} pkey = paramiko.RSAKey.generate(1024) keypair["public_key"] = pkey.get_base64() s = StringIO() pkey.write_private_key(s) k = s.getvalue() s.close() if encodeb64: k = base64.b64encode(b(k)) keypair["private_key"] = k return keypair
hastexo/hastexo-xblock
[ 16, 9, 16, 4, 1444516845 ]
def get_stacks(self): raise NotImplementedError()
hastexo/hastexo-xblock
[ 16, 9, 16, 4, 1444516845 ]
def create_stack(self): raise NotImplementedError()
hastexo/hastexo-xblock
[ 16, 9, 16, 4, 1444516845 ]
def suspend_stack(self): raise NotImplementedError()
hastexo/hastexo-xblock
[ 16, 9, 16, 4, 1444516845 ]
def __init__(self, provider, config, sleep): super(OpenstackProvider, self).__init__(provider, config, sleep) self.heat_c = self._get_heat_client() self.nova_c = self._get_nova_client()
hastexo/hastexo-xblock
[ 16, 9, 16, 4, 1444516845 ]
def _get_nova_client(self): return NovaWrapper(**self.credentials).get_client()
hastexo/hastexo-xblock
[ 16, 9, 16, 4, 1444516845 ]
def get_stacks(self): stacks = [] try: heat_stacks = self.heat_c.stacks.list() except HTTPNotFound: return stacks except (HTTPException, HttpError) as e: raise ProviderException(e) if heat_stacks: for heat_stack in heat_stacks: stack = { "name": heat_stack.stack_name, "status": heat_stack.stack_status } stacks.append(stack) return stacks
hastexo/hastexo-xblock
[ 16, 9, 16, 4, 1444516845 ]
def create_stack(self, name, run): if not self.template: raise ProviderException("Template not set for provider %s." % self.name) try: self.logger.info('Creating OpenStack Heat stack [%s]' % name) res = self.heat_c.stacks.create( stack_name=name, template=self.template, environment=self.environment, parameters={'run': run} ) except (HTTPException, HttpError) as e: raise ProviderException(e) stack_id = res['stack']['id'] # Sleep to avoid throttling. self.sleep() try: heat_stack = self.heat_c.stacks.get(stack_id=stack_id) except (HTTPException, HttpError) as e: raise ProviderException(e) status = heat_stack.stack_status # Wait for stack creation while IN_PROGRESS in status: self.sleep() try: heat_stack = self.heat_c.stacks.get(stack_id=heat_stack.id) except HTTPNotFound: raise ProviderException("OpenStack Heat stack " "disappeared during creation.") except (HTTPException, HttpError) as e: raise ProviderException(e) status = heat_stack.stack_status if FAILED in status: raise ProviderException("Failure creating OpenStack Heat stack.") return {"status": status, "outputs": self._get_stack_outputs(heat_stack)}
hastexo/hastexo-xblock
[ 16, 9, 16, 4, 1444516845 ]
def suspend_stack(self, name, wait=True): try: self.logger.info("Suspending OpenStack Heat stack [%s]" % name) self.heat_c.actions.suspend(stack_id=name) except (HTTPException, HttpError) as e: raise ProviderException(e) status = SUSPEND_IN_PROGRESS # Wait until suspend finishes. if wait: while (FAILED not in status and status != DELETE_COMPLETE and status != SUSPEND_COMPLETE): self.sleep() try: heat_stack = self.heat_c.stacks.get( stack_id=name) except HTTPNotFound: status = DELETE_COMPLETE except (HTTPException, HttpError) as e: raise ProviderException(e) else: status = heat_stack.stack_status if FAILED in status: raise ProviderException("Failure suspending " "OpenStack Heat stack.") return {"status": status}
hastexo/hastexo-xblock
[ 16, 9, 16, 4, 1444516845 ]
def __init__(self, provider, config, sleep): super(GcloudProvider, self).__init__(provider, config, sleep) self.ds = self._get_deployment_service() self.cs = self._get_compute_service() self.project = config.get("gc_project_id")
hastexo/hastexo-xblock
[ 16, 9, 16, 4, 1444516845 ]
def _get_compute_service(self): return GcloudComputeEngine(**self.credentials).get_service()
hastexo/hastexo-xblock
[ 16, 9, 16, 4, 1444516845 ]
def _get_deployment_servers(self, deployment_name): try: response = self.ds.resources().list( project=self.project, deployment=deployment_name, filter='type = "compute.v1.instance"' ).execute() except GcloudApiError as e: raise ProviderException(e) servers = [] if "resources" in response: for s in response["resources"]: try: server_name = s["name"] p = yaml.safe_load(s["finalProperties"]) server_zone = p["zone"] server = self.cs.instances().get( project=self.project, zone=server_zone, instance=server_name ).execute() except (KeyError, yaml.error.YAMLError, GcloudApiError) as e: raise ProviderException(e) servers.append(server) return servers
hastexo/hastexo-xblock
[ 16, 9, 16, 4, 1444516845 ]
def _encode_name(self, name): """ GCP enforces strict resource naming policies (regex '[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?'), so we work around it by naming the stack with a hash. """ digest = hashlib.sha1(b(name)).hexdigest() return '%s%s' % (self.deployment_name_prefix, digest)
hastexo/hastexo-xblock
[ 16, 9, 16, 4, 1444516845 ]
def get_stack(self, name): deployment_name = self._encode_name(name) try: self.logger.debug('Fetching information on ' 'Google Cloud deployment [%s]' % deployment_name) response = self.ds.deployments().get( project=self.project, deployment=deployment_name ).execute() except GcloudApiHttpError as e: if e.resp.status == 404: status = DELETE_COMPLETE outputs = {} else: raise ProviderException(e) except GcloudApiError as e: raise ProviderException(e) else: status = self._get_deployment_status(response) outputs = self._get_deployment_outputs(response) return {"status": status, "outputs": outputs}
hastexo/hastexo-xblock
[ 16, 9, 16, 4, 1444516845 ]
def delete_stack(self, name, wait=True): deployment_name = self._encode_name(name) try: self.logger.info('Deleting Google Cloud deployment ' '[%s]' % deployment_name) operation = self.ds.deployments().delete( project=self.project, deployment=deployment_name ).execute() except GcloudApiError as e: raise ProviderException(e) status = DELETE_IN_PROGRESS # Wait until delete finishes. if wait: while True: try: response = self.ds.operations().get( project=self.project, operation=operation["name"] ).execute() if response["status"] == "DONE": if "error" in response: errors = response["error"].get("errors") if errors: message = errors[0]["message"] else: message = "Error in operation." raise ProviderException(message) status = DELETE_COMPLETE break except GcloudApiHttpError as e: if e.resp.status == 404: status = DELETE_COMPLETE break else: raise ProviderException(e) except GcloudApiError as e: raise ProviderException(e) self.sleep() return {"status": status}
hastexo/hastexo-xblock
[ 16, 9, 16, 4, 1444516845 ]
def create_url(endpoint): url = '{}{}'.format(BASE_URL, endpoint) return url
openmotics/gateway
[ 30, 12, 30, 27, 1481877206 ]
def pretty_print_output(output): try: json_dict = json.loads(output) json_str = json.dumps(json_dict, indent=4) return json_str except Exception: return output
openmotics/gateway
[ 30, 12, 30, 27, 1481877206 ]
def login(verbose=None): global TOKEN if verbose == None: verbose = VERBOSE params = {'username': USERNAME, 'password': PASSWD} resp = api_call('login', params=params, authenticated=False, verbose=verbose) resp_json = resp.json() if 'token' in resp_json: token = resp.json()['token'] TOKEN = token if verbose > 0: print(' => logged in and received token: {}'.format(token)) print('--------------------------------------------') else: raise RuntimeError('Could not log in to the gateway') return token
openmotics/gateway
[ 30, 12, 30, 27, 1481877206 ]
def get_news_text_from_html(data): """ Given a string of data, locate the span that has the id "textstire" and that ends in </span>. It needs to support nested spans. Arguments: data: A string with the entire html page. Returns: A string with just the content text. """ # From the data, get just the content. I don't quite understand why this # didn't work with a regular expression. data = replace_circ_diacritics(data) try: soup = BeautifulSoup(data, "lxml") except HTMLParseError: return 'error' tag = soup.find('div', id="article_text_content") desc = soup.find('meta', {'name': 'description'}) if desc is None: return None content = desc['content'] return content.encode('UTF-8') + ' ' + str(tag)
pistruiatul/hartapoliticii
[ 58, 23, 58, 36, 1326573089 ]
def __init__(self, error_code, error_msg): self.error_code = error_code self.error_msg = error_msg super(XQueueAddToQueueError, self).__init__(unicode(self))
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def __init__(self, request=None): # Get basic auth (username/password) for # xqueue connection if it's in the settings if settings.XQUEUE_INTERFACE.get('basic_auth') is not None: requests_auth = HTTPBasicAuth( *settings.XQUEUE_INTERFACE['basic_auth']) else: requests_auth = None if request is None: factory = RequestFactory() self.request = factory.get('/') else: self.request = request self.xqueue_interface = XQueueInterface( settings.XQUEUE_INTERFACE['url'], settings.XQUEUE_INTERFACE['django_auth'], requests_auth, ) self.whitelist = CertificateWhitelist.objects.all() self.restricted = UserProfile.objects.filter(allow_certificate=False) self.use_https = True
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def del_cert(self, student, course_id): """ Arguments: student - User.object course_id - courseenrollment.course_id (string) Removes certificate for a student, will change the certificate status to 'deleting'. Certificate must be in the 'error' or 'downloadable' state otherwise it will return the current state """ raise NotImplementedError
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def add_cert(self, student, course_id, course=None, forced_grade=None, template_file=None, designation=None, generate_pdf=True): """ Request a new certificate for a student. Arguments: student - User.object course_id - courseenrollment.course_id (CourseKey) forced_grade - a string indicating a grade parameter to pass with the certificate request. If this is given, grading will be skipped. generate_pdf - Boolean should a message be sent in queue to generate certificate PDF Will change the certificate status to 'generating' or `downloadable` in case of web view certificates. The course must not be a CCX. Certificate must be in the 'unavailable', 'error', 'deleted' or 'generating' state. If a student has a passing grade or is in the whitelist table for the course a request will be made for a new cert. If a student has allow_certificate set to False in the userprofile table the status will change to 'restricted' If a student does not have a passing grade the status will change to status.notpassing Returns the newly created certificate instance """ if hasattr(course_id, 'ccx'): LOGGER.warning( ( u"Cannot create certificate generation task for user %s " u"in the course '%s'; " u"certificates are not allowed for CCX courses." ), student.id, unicode(course_id) ) return None valid_statuses = [ status.generating, status.unavailable, status.deleted, status.error, status.notpassing, status.downloadable, status.auditing, status.audit_passing, status.audit_notpassing, status.unverified, ] cert_status = certificate_status_for_student(student, course_id)['status'] cert = None if cert_status not in valid_statuses: LOGGER.warning( ( u"Cannot create certificate generation task for user %s " u"in the course '%s'; " u"the certificate status '%s' is not one of %s." ), student.id, unicode(course_id), cert_status, unicode(valid_statuses) ) return None # The caller can optionally pass a course in to avoid # re-fetching it from Mongo. If they have not provided one, # get it from the modulestore. if course is None: course = modulestore().get_course(course_id, depth=0) profile = UserProfile.objects.get(user=student) profile_name = profile.name # Needed for access control in grading. self.request.user = student self.request.session = {} is_whitelisted = self.whitelist.filter(user=student, course_id=course_id, whitelist=True).exists() course_grade = CourseGradeFactory().read(student, course) enrollment_mode, __ = CourseEnrollment.enrollment_mode_for_user(student, course_id) mode_is_verified = enrollment_mode in GeneratedCertificate.VERIFIED_CERTS_MODES user_is_verified = IDVerificationService.user_is_verified(student) cert_mode = enrollment_mode is_eligible_for_certificate = is_whitelisted or CourseMode.is_eligible_for_certificate(enrollment_mode) unverified = False # For credit mode generate verified certificate if cert_mode == CourseMode.CREDIT_MODE: cert_mode = CourseMode.VERIFIED if template_file is not None: template_pdf = template_file elif mode_is_verified and user_is_verified: template_pdf = "certificate-template-{id.org}-{id.course}-verified.pdf".format(id=course_id) elif mode_is_verified and not user_is_verified: template_pdf = "certificate-template-{id.org}-{id.course}.pdf".format(id=course_id) if CourseMode.mode_for_course(course_id, CourseMode.HONOR): cert_mode = GeneratedCertificate.MODES.honor else: unverified = True else: # honor code and audit students template_pdf = "certificate-template-{id.org}-{id.course}.pdf".format(id=course_id) LOGGER.info( ( u"Certificate generated for student %s in the course: %s with template: %s. " u"given template: %s, " u"user is verified: %s, " u"mode is verified: %s," u"generate_pdf is: %s" ), student.username, unicode(course_id), template_pdf, template_file, user_is_verified, mode_is_verified, generate_pdf ) cert, created = GeneratedCertificate.objects.get_or_create(user=student, course_id=course_id) cert.mode = cert_mode cert.user = student cert.grade = course_grade.percent cert.course_id = course_id cert.name = profile_name cert.download_url = '' # Strip HTML from grade range label grade_contents = forced_grade or course_grade.letter_grade try: grade_contents = lxml.html.fromstring(grade_contents).text_content() passing = True except (TypeError, XMLSyntaxError, ParserError) as exc: LOGGER.info( ( u"Could not retrieve grade for student %s " u"in the course '%s' " u"because an exception occurred while parsing the " u"grade contents '%s' as HTML. " u"The exception was: '%s'" ), student.id, unicode(course_id), grade_contents, unicode(exc) ) # Log if the student is whitelisted if is_whitelisted: LOGGER.info( u"Student %s is whitelisted in '%s'", student.id, unicode(course_id) ) passing = True else: passing = False # If this user's enrollment is not eligible to receive a # certificate, mark it as such for reporting and # analytics. Only do this if the certificate is new, or # already marked as ineligible -- we don't want to mark # existing audit certs as ineligible. cutoff = settings.AUDIT_CERT_CUTOFF_DATE if (cutoff and cert.created_date >= cutoff) and not is_eligible_for_certificate: cert.status = status.audit_passing if passing else status.audit_notpassing cert.save() LOGGER.info( u"Student %s with enrollment mode %s is not eligible for a certificate.", student.id, enrollment_mode ) return cert # If they are not passing, short-circuit and don't generate cert elif not passing: cert.status = status.notpassing cert.save() LOGGER.info( ( u"Student %s does not have a grade for '%s', " u"so their certificate status has been set to '%s'. " u"No certificate generation task was sent to the XQueue." ), student.id, unicode(course_id), cert.status ) return cert # Check to see whether the student is on the the embargoed # country restricted list. If so, they should not receive a # certificate -- set their status to restricted and log it. if self.restricted.filter(user=student).exists(): cert.status = status.restricted cert.save() LOGGER.info( ( u"Student %s is in the embargoed country restricted " u"list, so their certificate status has been set to '%s' " u"for the course '%s'. " u"No certificate generation task was sent to the XQueue." ), student.id, cert.status, unicode(course_id) ) return cert if unverified: cert.status = status.unverified cert.save() LOGGER.info( ( u"User %s has a verified enrollment in course %s " u"but is missing ID verification. " u"Certificate status has been set to unverified" ), student.id, unicode(course_id), ) return cert # Finally, generate the certificate and send it off. return self._generate_cert(cert, course, student, grade_contents, template_pdf, generate_pdf, designation)
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def add_example_cert(self, example_cert): """Add a task to create an example certificate. Unlike other certificates, an example certificate is not associated with any particular user and is never shown to students. If an error occurs when adding the example certificate to the queue, the example certificate status will be set to "error". Arguments: example_cert (ExampleCertificate) """ contents = { 'action': 'create', 'course_id': unicode(example_cert.course_key), 'name': example_cert.full_name, 'template_pdf': example_cert.template, # Example certificates are not associated with a particular user. # However, we still need to find the example certificate when # we receive a response from the queue. For this reason, # we use the example certificate's unique identifier as a username. # Note that the username is *not* displayed on the certificate; # it is used only to identify the certificate task in the queue. 'username': example_cert.uuid, # We send this extra parameter to differentiate # example certificates from other certificates. # This is not used by the certificates workers or XQueue. 'example_certificate': True, } # The callback for example certificates is different than the callback # for other certificates. Although both tasks use the same queue, # we can distinguish whether the certificate was an example cert based # on which end-point XQueue uses once the task completes. callback_url_path = reverse('update_example_certificate') try: self._send_to_xqueue( contents, example_cert.access_key, task_identifier=example_cert.uuid, callback_url_path=callback_url_path ) LOGGER.info(u"Started generating example certificates for course '%s'.", example_cert.course_key) except XQueueAddToQueueError as exc: example_cert.update_status( ExampleCertificate.STATUS_ERROR, error_reason=unicode(exc) ) LOGGER.critical( ( u"Could not add example certificate with uuid '%s' to XQueue. " u"The exception was %s. " u"The example certificate has been marked with status 'error'." ), example_cert.uuid, unicode(exc) )
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def find_worksheets_feed(self): return self.find_url(WORKSHEETS_REL)
pistruiatul/hartapoliticii
[ 58, 23, 58, 36, 1326573089 ]
def get_table_id(self): if self.id.text: return self.id.text.split('/')[-1] return None
pistruiatul/hartapoliticii
[ 58, 23, 58, 36, 1326573089 ]
def value_for_index(self, column_index): for field in self.field: if field.index == column_index: return field.text raise FieldMissing('There is no field for %s' % column_index)
pistruiatul/hartapoliticii
[ 58, 23, 58, 36, 1326573089 ]
def value_for_name(self, name): for field in self.field: if field.name == name: return field.text raise FieldMissing('There is no field for %s' % name)
pistruiatul/hartapoliticii
[ 58, 23, 58, 36, 1326573089 ]
def get_value(self, column_name): """Returns the displayed text for the desired column in this row. The formula or input which generated the displayed value is not accessible through the list feed, to see the user's input, use the cells feed. If a column is not present in this spreadsheet, or there is no value for a column in this row, this method will return None. """ values = self.get_elements(column_name, GSX_NAMESPACE) if len(values) == 0: return None return values[0].text
pistruiatul/hartapoliticii
[ 58, 23, 58, 36, 1326573089 ]
def batch_set_cell(row, col, input): pass
pistruiatul/hartapoliticii
[ 58, 23, 58, 36, 1326573089 ]
def sqlite_column_reflect_listener(inspector, table, column_info): """Adds parenthesis around SQLite datetime defaults for utcnow.""" if column_info['default'] == "datetime('now', 'utc')": column_info['default'] = utcnow_server_default
magfest/ubersystem
[ 44, 49, 44, 436, 1391223385 ]
def upgrade(): op.create_table('guest_autograph', sa.Column('id', residue.UUID(), nullable=False), sa.Column('guest_id', residue.UUID(), nullable=False), sa.Column('num', sa.Integer(), server_default='0', nullable=False), sa.Column('length', sa.Integer(), server_default='60', nullable=False), sa.ForeignKeyConstraint(['guest_id'], ['guest_group.id'], name=op.f('fk_guest_autograph_guest_id_guest_group')), sa.PrimaryKeyConstraint('id', name=op.f('pk_guest_autograph')), sa.UniqueConstraint('guest_id', name=op.f('uq_guest_autograph_guest_id')) ) op.create_table('guest_interview', sa.Column('id', residue.UUID(), nullable=False), sa.Column('guest_id', residue.UUID(), nullable=False), sa.Column('will_interview', sa.Boolean(), server_default='False', nullable=False), sa.Column('email', sa.Unicode(), server_default='', nullable=False), sa.Column('direct_contact', sa.Boolean(), server_default='False', nullable=False), sa.ForeignKeyConstraint(['guest_id'], ['guest_group.id'], name=op.f('fk_guest_interview_guest_id_guest_group')), sa.PrimaryKeyConstraint('id', name=op.f('pk_guest_interview')), sa.UniqueConstraint('guest_id', name=op.f('uq_guest_interview_guest_id')) ) op.create_table('guest_travel_plans', sa.Column('id', residue.UUID(), nullable=False), sa.Column('guest_id', residue.UUID(), nullable=False), sa.Column('modes', sa.Unicode(), server_default='', nullable=False), sa.Column('modes_text', sa.Unicode(), server_default='', nullable=False), sa.Column('details', sa.Unicode(), server_default='', nullable=False), sa.ForeignKeyConstraint(['guest_id'], ['guest_group.id'], name=op.f('fk_guest_travel_plans_guest_id_guest_group')), sa.PrimaryKeyConstraint('id', name=op.f('pk_guest_travel_plans')), sa.UniqueConstraint('guest_id', name=op.f('uq_guest_travel_plans_guest_id')) )
magfest/ubersystem
[ 44, 49, 44, 436, 1391223385 ]
def __levenshtein__(str1, str2): str1 = str1.encode('utf-8') str2 = str2.encode('utf-8') return Levenshtein.distance(str1.lower(),str2.lower())
stratosphereips/Manati
[ 109, 28, 109, 11, 1505115163 ]
def __dist_registrar__(registrar_a, registrar_b): registrar_a = registrar_a if not registrar_a is None else '' registrar_b = registrar_b if not registrar_b is None else '' registrar_a = registrar_a.encode('utf-8') if not isinstance(registrar_a, list) else registrar_a[0].encode('utf-8') registrar_b = registrar_b.encode('utf-8') if not isinstance(registrar_b, list) else registrar_b[0].encode('utf-8') return __levenshtein__(str(registrar_a).lower(), str(registrar_b).lower())
stratosphereips/Manati
[ 109, 28, 109, 11, 1505115163 ]
def __dist_org_by_min_dist__(orgs_a=[], orgs_b=[]): orgs_seed = orgs_a.split(',') if not isinstance(orgs_a, list) else orgs_a orgs_file = orgs_b.split(',') if not isinstance(orgs_b, list) else orgs_b if not orgs_seed and not orgs_file: return float(0) elif not orgs_seed: orgs_seed = [''] elif not orgs_file: orgs_file = [''] dist_org = __levenshtein__(str(orgs_seed[0]), str(orgs_file[0])) for org_s in orgs_seed: org_s = org_s.encode('utf-8') for org_f in orgs_file: org_f = org_f.encode('utf-8') dist_org = min(str(dist_org), str(__levenshtein__(str(org_s), str(org_f)))) return float(dist_org)
stratosphereips/Manati
[ 109, 28, 109, 11, 1505115163 ]
def get_date_aux(date): try: return datetime.datetime.strptime(date, '%d-%m-%Y') \ if not isinstance(date, datetime.datetime) else date except Exception as ex: return dateutil.parser.parse(date)
stratosphereips/Manati
[ 109, 28, 109, 11, 1505115163 ]
def get_diff_ttl(creation_date_a, creation_date_b,expiration_date_a, expiration_date_b): if not creation_date_a and not creation_date_b and not expiration_date_a and not expiration_date_a: return float(0) elif not creation_date_a and not creation_date_b and expiration_date_a and expiration_date_b: if expiration_date_a == expiration_date_a: return float(0) else: return float(1) elif creation_date_a and creation_date_b and not expiration_date_a and not expiration_date_b: if creation_date_a == creation_date_a: return float(0) else: return float(1) elif not creation_date_a or not creation_date_b or not expiration_date_a or not expiration_date_b: return float(1) else: cd_a = get_date_aux(creation_date_a) ed_a = get_date_aux(expiration_date_a) cd_b = get_date_aux(creation_date_b) ed_b = get_date_aux(expiration_date_b) ttl_days_b = float(abs(cd_b - ed_b).days) # time to live ttl_days_a = float(abs(cd_a - ed_a).days) if ttl_days_b == ttl_days_a: return float(0) else: return float(1) - ((ttl_days_b / ttl_days_a) if ttl_days_b <= ttl_days_a else (ttl_days_a / ttl_days_b))
stratosphereips/Manati
[ 109, 28, 109, 11, 1505115163 ]
def get_diff_emails_by_min_dist(emails_a=[], emails_b=[]): emails_seed = emails_a.split(',') if not isinstance(emails_a, list) else emails_a emails_file = emails_b.split(',') if not isinstance(emails_b, list) else emails_b if not emails_seed and not emails_file: return float(0) elif not emails_seed: emails_seed = [''] elif not emails_file: emails_file = [''] dist_email = __levenshtein__(str(emails_seed[0]), str(emails_file[0])) for email_s in emails_seed: for email_f in emails_file: dist_email = min(str(dist_email), str(__levenshtein__(str(email_s), str(email_f)))) return float(dist_email)
stratosphereips/Manati
[ 109, 28, 109, 11, 1505115163 ]
def get_diff_name_servers_by_min_dist(name_servers_a=[], name_servers_b=[]): if name_servers_a is None: name_servers_a = [] if name_servers_b is None: name_servers_b = [] name_servers_seed = name_servers_a.split(',') if not isinstance(name_servers_a, list) else name_servers_a name_servers_file = name_servers_b.split(',') if not isinstance(name_servers_b, list) else name_servers_b if not name_servers_seed and not name_servers_file: return float(0) elif not name_servers_seed: name_servers_seed = [''] elif not name_servers_file: name_servers_file = [''] dist_name_server = __levenshtein__(str(name_servers_seed[0]), str(name_servers_file[0])) for name_server_s in name_servers_seed: for name_server_f in name_servers_file: dist_name_server = min(str(dist_name_server), str(__levenshtein__(str(name_server_s), str(name_server_f)))) return float(dist_name_server)
stratosphereips/Manati
[ 109, 28, 109, 11, 1505115163 ]
def features_domains(whois_info_a={}, whois_info_b={}): domain_name_a = whois_info_a.get(KEY_DOMAIN_NAME,'') registrar_a = whois_info_a.get(KEY_REGISTRAR,'') name_a = whois_info_a.get(KEY_NAME,'') orgs_a = whois_info_a.get(KEY_ORG,[]) # [] zipcode_a = whois_info_a.get(KEY_ZIPCODE,[]) # [] creation_date_a = whois_info_a.get(KEY_CREATION_DATE,None) expiration_date_a = whois_info_a.get(KEY_EXPIRATION_DATE,None) emails_a = whois_info_a.get(KEY_EMAILS, []) # [] name_servers_a = whois_info_a.get(KEY_NAME_SERVERS, []) # [] domain_name_b = whois_info_b.get(KEY_DOMAIN_NAME, '') registrar_b = whois_info_b.get(KEY_REGISTRAR, '') name_b = whois_info_b.get(KEY_NAME, '') orgs_b = whois_info_b.get(KEY_ORG, []) # [] zipcode_b = whois_info_b.get(KEY_ZIPCODE, []) # [] creation_date_b = whois_info_b.get(KEY_CREATION_DATE, '') expiration_date_b = whois_info_b.get(KEY_EXPIRATION_DATE, '') emails_b = whois_info_b.get(KEY_EMAILS, []) # [] name_servers_b = whois_info_b.get(KEY_NAME_SERVERS, []) # [] return features_domains_attr(domain_name_a, registrar_a, name_a, orgs_a, zipcode_a, creation_date_a, expiration_date_a, emails_a,name_servers_a, domain_name_b, registrar_b, name_b, orgs_b, zipcode_b, creation_date_b, expiration_date_b, emails_b, name_servers_b)
stratosphereips/Manati
[ 109, 28, 109, 11, 1505115163 ]
def get_input_and_target_from(dmfs): inputs = [] target = [] for dmf in dmfs: inputs.append([1] + dmf.get_features().values()) target.append(dmf.related) return inputs, target
stratosphereips/Manati
[ 109, 28, 109, 11, 1505115163 ]
def distance_related_by_whois_obj(external_module,domain_a, domain_b): global weights result = WhoisConsult.get_features_info_by_set_url(external_module, [domain_a,domain_b]) domains = result.keys() try: whois_info_a = result[domains[0]] whois_info_b = result[domains[1]] except Exception as e: whois_info_a = result[domains[0]] whois_info_b = result[domains[0]] distance, feature_distance = get_whois_distance(whois_info_a,whois_info_b) return distance <= RELATION_THRESHOLD,distance,feature_distance
stratosphereips/Manati
[ 109, 28, 109, 11, 1505115163 ]
def testZeroMag(self): mags = [0,0,0,0,0] freqs = [23, 500, 3200, 9000, 10000] self.assertEqualVector( Tristimulus()(freqs, mags), [0,0,0])
MTG/essentia
[ 2331, 490, 2331, 378, 1370271227 ]
def test4Freqs(self): mags = [1,2,3,4] freqs = [100, 435, 6547, 24324] self.assertAlmostEqualVector( Tristimulus()(freqs, mags), [.1, .9, 0])
MTG/essentia
[ 2331, 490, 2331, 378, 1370271227 ]
def testFrequencyOrder(self): freqs = [1,2,1.1] mags = [0,0,0] self.assertComputeFails(Tristimulus(), freqs, mags)
MTG/essentia
[ 2331, 490, 2331, 378, 1370271227 ]
def testEmpty(self): freqs = [] mags = [] self.assertEqualVector(Tristimulus()([],[]), [0,0,0])
MTG/essentia
[ 2331, 490, 2331, 378, 1370271227 ]
def upgrade(): try: op.create_table('smtpserver', sa.Column('id', sa.Integer(), nullable=False), sa.Column('identifier', sa.Unicode(length=255), nullable=False), sa.Column('server', sa.Unicode(length=255), nullable=False), sa.Column('port', sa.Integer(), nullable=True), sa.Column('username', sa.Unicode(length=255), nullable=True), sa.Column('password', sa.Unicode(length=255), nullable=True), sa.Column('sender', sa.Unicode(length=255), nullable=True), sa.Column('tls', sa.Boolean(), nullable=True), sa.Column('description', sa.Unicode(length=2000), nullable=True), sa.PrimaryKeyConstraint('id') ) except (OperationalError, ProgrammingError, InternalError) as exx: if "duplicate column name" in str(exx.orig).lower(): print("Good. Column smtpserver already exists.") else: print(exx) except Exception as exx: print ("Could not add table 'smtpserver'") print (exx)
privacyidea/privacyidea
[ 1321, 287, 1321, 217, 1401806822 ]
def __init__(self): super(Simrad, self).__init__() self.desc = "Simrad" self._ext.add('ssp') self._ext.add('s??')
hydroffice/hyo_soundspeed
[ 15, 18, 15, 3, 1459123919 ]
def _parse_header(self): meta = {} m = re.search(r'''\$[A-Z][A-Z](?P<fmt>S\d\d), #fmt is between 00 and 53 (?P<id>\d+), (?P<nmeasure>\d+), (?P<hour>\d\d)(?P<minute>\d\d)(?P<second>\d\d), (?P<day>\d\d), (?P<mon>\d\d), (?P<yr>\d+), ''', self.lines[0], re.VERBOSE) # ignoring the optional fields of first line if m: meta.update(getMetaFromTimeRE(m)) meta['DataSetID'] = m.group('id') meta['Format'] = "SSP " + m.group('fmt') meta['fmt'] = m.group('fmt') m = re.search(r'''(?P<lat>[\d.]+,[NS]), (?P<lon>[\d.]+,[EW]), ''', self.lines[1], re.VERBOSE) # try the optional second line if not m: m = re.search(r'''(?P<lat>[\d.]+,[NS]), (?P<lon>[\d.]+,[EW]), ''', self.lines[-1], re.VERBOSE) # try at the end of file if m: location = coordinates.Coordinate(m.group('lat'), m.group('lon')) meta.update(Profile.getMetaFromCoord(location)) meta['filename'] = self.fid._path self.rawmeta = meta
hydroffice/hyo_soundspeed
[ 15, 18, 15, 3, 1459123919 ]
def _rule(word, count, min_count): if word == "human": return utils.RULE_DISCARD # throw out else: return utils.RULE_DEFAULT # apply default rule, i.e. min_count
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testBuildVocabFromFreq(self): """Test that the algorithm is able to build vocabulary from given frequency table""" freq_dict = { 'minors': 2, 'graph': 3, 'system': 4, 'trees': 3, 'eps': 2, 'computer': 2, 'survey': 2, 'user': 3, 'human': 2, 'time': 2, 'interface': 2, 'response': 2 } freq_dict_orig = freq_dict.copy() model_hs = word2vec.Word2Vec(vector_size=10, min_count=0, seed=42, hs=1, negative=0) model_neg = word2vec.Word2Vec(vector_size=10, min_count=0, seed=42, hs=0, negative=5) model_hs.build_vocab_from_freq(freq_dict) model_neg.build_vocab_from_freq(freq_dict) self.assertEqual(len(model_hs.wv), 12) self.assertEqual(len(model_neg.wv), 12) for k in freq_dict_orig.keys(): self.assertEqual(model_hs.wv.get_vecattr(k, 'count'), freq_dict_orig[k]) self.assertEqual(model_neg.wv.get_vecattr(k, 'count'), freq_dict_orig[k]) new_freq_dict = { 'computer': 1, 'artificial': 4, 'human': 1, 'graph': 1, 'intelligence': 4, 'system': 1, 'trees': 1 } model_hs.build_vocab_from_freq(new_freq_dict, update=True) model_neg.build_vocab_from_freq(new_freq_dict, update=True) self.assertEqual(model_hs.wv.get_vecattr('graph', 'count'), 4) self.assertEqual(model_hs.wv.get_vecattr('artificial', 'count'), 4) self.assertEqual(len(model_hs.wv), 14) self.assertEqual(len(model_neg.wv), 14)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testTotalWordCount(self): model = word2vec.Word2Vec(vector_size=10, min_count=0, seed=42) total_words = model.scan_vocab(sentences)[0] self.assertEqual(total_words, 29)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testOnlineLearning(self): """Test that the algorithm is able to add new words to the vocabulary and to a trained model when using a sorted vocabulary""" model_hs = word2vec.Word2Vec(sentences, vector_size=10, min_count=0, seed=42, hs=1, negative=0) model_neg = word2vec.Word2Vec(sentences, vector_size=10, min_count=0, seed=42, hs=0, negative=5) self.assertTrue(len(model_hs.wv), 12) self.assertTrue(model_hs.wv.get_vecattr('graph', 'count'), 3) model_hs.build_vocab(new_sentences, update=True) model_neg.build_vocab(new_sentences, update=True) self.assertTrue(model_hs.wv.get_vecattr('graph', 'count'), 4) self.assertTrue(model_hs.wv.get_vecattr('artificial', 'count'), 4) self.assertEqual(len(model_hs.wv), 14) self.assertEqual(len(model_neg.wv), 14)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testOnlineLearningFromFile(self): """Test that the algorithm is able to add new words to the vocabulary and to a trained model when using a sorted vocabulary""" with temporary_file(get_tmpfile('gensim_word2vec1.tst')) as corpus_file,\ temporary_file(get_tmpfile('gensim_word2vec2.tst')) as new_corpus_file: utils.save_as_line_sentence(sentences, corpus_file) utils.save_as_line_sentence(new_sentences, new_corpus_file) model_hs = word2vec.Word2Vec(corpus_file=corpus_file, vector_size=10, min_count=0, seed=42, hs=1, negative=0) model_neg = word2vec.Word2Vec(corpus_file=corpus_file, vector_size=10, min_count=0, seed=42, hs=0, negative=5) self.assertTrue(len(model_hs.wv), 12) self.assertTrue(model_hs.wv.get_vecattr('graph', 'count'), 3) model_hs.build_vocab(corpus_file=new_corpus_file, update=True) model_hs.train(corpus_file=new_corpus_file, total_words=model_hs.corpus_total_words, epochs=model_hs.epochs) model_neg.build_vocab(corpus_file=new_corpus_file, update=True) model_neg.train( corpus_file=new_corpus_file, total_words=model_hs.corpus_total_words, epochs=model_hs.epochs) self.assertTrue(model_hs.wv.get_vecattr('graph', 'count'), 4) self.assertTrue(model_hs.wv.get_vecattr('artificial', 'count'), 4) self.assertEqual(len(model_hs.wv), 14) self.assertEqual(len(model_neg.wv), 14)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testOnlineLearningAfterSaveFromFile(self): """Test that the algorithm is able to add new words to the vocabulary and to a trained model when using a sorted vocabulary""" with temporary_file(get_tmpfile('gensim_word2vec1.tst')) as corpus_file,\ temporary_file(get_tmpfile('gensim_word2vec2.tst')) as new_corpus_file: utils.save_as_line_sentence(sentences, corpus_file) utils.save_as_line_sentence(new_sentences, new_corpus_file) tmpf = get_tmpfile('gensim_word2vec.tst') model_neg = word2vec.Word2Vec(corpus_file=corpus_file, vector_size=10, min_count=0, seed=42, hs=0, negative=5) model_neg.save(tmpf) model_neg = word2vec.Word2Vec.load(tmpf) self.assertTrue(len(model_neg.wv), 12) # Check that training works on the same data after load without calling build_vocab model_neg.train(corpus_file=corpus_file, total_words=model_neg.corpus_total_words, epochs=model_neg.epochs) # Train on new corpus file model_neg.build_vocab(corpus_file=new_corpus_file, update=True) model_neg.train(corpus_file=new_corpus_file, total_words=model_neg.corpus_total_words, epochs=model_neg.epochs) self.assertEqual(len(model_neg.wv), 14)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def test_sg_hs_online(self): """Test skipgram w/ hierarchical softmax""" model = word2vec.Word2Vec(sg=1, window=5, hs=1, negative=0, min_count=3, epochs=10, seed=42, workers=2) self.onlineSanity(model)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def test_cbow_hs_online(self): """Test CBOW w/ hierarchical softmax""" model = word2vec.Word2Vec( sg=0, cbow_mean=1, alpha=0.05, window=5, hs=1, negative=0, min_count=3, epochs=20, seed=42, workers=2 ) self.onlineSanity(model)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testPersistence(self): """Test storing/loading the entire model.""" tmpf = get_tmpfile('gensim_word2vec.tst') model = word2vec.Word2Vec(sentences, min_count=1) model.save(tmpf) self.models_equal(model, word2vec.Word2Vec.load(tmpf)) # test persistence of the KeyedVectors of a model wv = model.wv wv.save(tmpf) loaded_wv = keyedvectors.KeyedVectors.load(tmpf) self.assertTrue(np.allclose(wv.vectors, loaded_wv.vectors)) self.assertEqual(len(wv), len(loaded_wv))
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testPersistenceFromFile(self): """Test storing/loading the entire model trained with corpus_file argument.""" with temporary_file(get_tmpfile('gensim_word2vec.tst')) as corpus_file: utils.save_as_line_sentence(sentences, corpus_file) tmpf = get_tmpfile('gensim_word2vec.tst') model = word2vec.Word2Vec(corpus_file=corpus_file, min_count=1) model.save(tmpf) self.models_equal(model, word2vec.Word2Vec.load(tmpf)) # test persistence of the KeyedVectors of a model wv = model.wv wv.save(tmpf) loaded_wv = keyedvectors.KeyedVectors.load(tmpf) self.assertTrue(np.allclose(wv.vectors, loaded_wv.vectors)) self.assertEqual(len(wv), len(loaded_wv))
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testRuleWithMinCount(self): """Test that returning RULE_DEFAULT from trim_rule triggers min_count.""" model = word2vec.Word2Vec(sentences + [["occurs_only_once"]], min_count=2, trim_rule=_rule) self.assertTrue("human" not in model.wv) self.assertTrue("occurs_only_once" not in model.wv) self.assertTrue("interface" in model.wv)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testLambdaRule(self): """Test that lambda trim_rule works.""" def rule(word, count, min_count): return utils.RULE_DISCARD if word == "human" else utils.RULE_DEFAULT model = word2vec.Word2Vec(sentences, min_count=1, trim_rule=rule) self.assertTrue("human" not in model.wv)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testLoadPreKeyedVectorModelCFormat(self): """Test loading pre-KeyedVectors word2vec model saved in word2vec format""" model = keyedvectors.KeyedVectors.load_word2vec_format(datapath('word2vec_pre_kv_c')) self.assertTrue(model.vectors.shape[0] == len(model))
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testNoTrainingCFormat(self): tmpf = get_tmpfile('gensim_word2vec.tst') model = word2vec.Word2Vec(sentences, min_count=1) model.wv.save_word2vec_format(tmpf, binary=True) kv = keyedvectors.KeyedVectors.load_word2vec_format(tmpf, binary=True) binary_model = word2vec.Word2Vec() binary_model.wv = kv self.assertRaises(ValueError, binary_model.train, sentences)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testTooShortTextWord2VecFormat(self): tfile = get_tmpfile('gensim_word2vec.tst') model = word2vec.Word2Vec(sentences, min_count=1) model.wv.save_word2vec_format(tfile, binary=False) f = open(tfile, 'r+b') f.write(b'13') # write wrong (too-long) vector count f.close() self.assertRaises(EOFError, keyedvectors.KeyedVectors.load_word2vec_format, tfile, binary=False)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testPersistenceWord2VecFormatWithVocab(self): """Test storing/loading the entire model and vocabulary in word2vec format.""" tmpf = get_tmpfile('gensim_word2vec.tst') model = word2vec.Word2Vec(sentences, min_count=1) testvocab = get_tmpfile('gensim_word2vec.vocab') model.wv.save_word2vec_format(tmpf, testvocab, binary=True) binary_model_with_vocab_kv = keyedvectors.KeyedVectors.load_word2vec_format(tmpf, testvocab, binary=True) self.assertEqual(model.wv.get_vecattr('human', 'count'), binary_model_with_vocab_kv.get_vecattr('human', 'count'))
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testPersistenceWord2VecFormatCombinationWithStandardPersistence(self): """Test storing/loading the entire model and vocabulary in word2vec format chained with saving and loading via `save` and `load` methods`. It was possible prior to 1.0.0 release, now raises Exception""" tmpf = get_tmpfile('gensim_word2vec.tst') model = word2vec.Word2Vec(sentences, min_count=1) testvocab = get_tmpfile('gensim_word2vec.vocab') model.wv.save_word2vec_format(tmpf, testvocab, binary=True) binary_model_with_vocab_kv = keyedvectors.KeyedVectors.load_word2vec_format(tmpf, testvocab, binary=True) binary_model_with_vocab_kv.save(tmpf) self.assertRaises(AttributeError, word2vec.Word2Vec.load, tmpf)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testVocab(self): """Test word2vec vocabulary building.""" corpus = LeeCorpus() total_words = sum(len(sentence) for sentence in corpus) # try vocab building explicitly, using all words model = word2vec.Word2Vec(min_count=1, hs=1, negative=0) model.build_vocab(corpus) self.assertTrue(len(model.wv) == 6981) # with min_count=1, we're not throwing away anything, # so make sure the word counts add up to be the entire corpus self.assertEqual(sum(model.wv.get_vecattr(k, 'count') for k in model.wv.key_to_index), total_words) # make sure the binary codes are correct np.allclose(model.wv.get_vecattr('the', 'code'), [1, 1, 0, 0]) # test building vocab with default params model = word2vec.Word2Vec(hs=1, negative=0) model.build_vocab(corpus) self.assertTrue(len(model.wv) == 1750) np.allclose(model.wv.get_vecattr('the', 'code'), [1, 1, 1, 0]) # no input => "RuntimeError: you must first build vocabulary before training the model" self.assertRaises(RuntimeError, word2vec.Word2Vec, []) # input not empty, but rather completely filtered out self.assertRaises(RuntimeError, word2vec.Word2Vec, corpus, min_count=total_words + 1)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testTrainingFromFile(self): """Test word2vec training with corpus_file argument.""" # build vocabulary, don't train yet with temporary_file(get_tmpfile('gensim_word2vec.tst')) as tf: utils.save_as_line_sentence(sentences, tf) model = word2vec.Word2Vec(vector_size=2, min_count=1, hs=1, negative=0) model.build_vocab(corpus_file=tf) self.assertTrue(model.wv.vectors.shape == (len(model.wv), 2)) self.assertTrue(model.syn1.shape == (len(model.wv), 2)) model.train(corpus_file=tf, total_words=model.corpus_total_words, epochs=model.epochs) sims = model.wv.most_similar('graph', topn=10) # self.assertTrue(sims[0][0] == 'trees', sims) # most similar # test querying for "most similar" by vector graph_vector = model.wv.get_vector('graph', norm=True) sims2 = model.wv.most_similar(positive=[graph_vector], topn=11) sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself self.assertEqual(sims, sims2)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testLocking(self): """Test word2vec training doesn't change locked vectors.""" corpus = LeeCorpus() # build vocabulary, don't train yet for sg in range(2): # test both cbow and sg model = word2vec.Word2Vec(vector_size=4, hs=1, negative=5, min_count=1, sg=sg, window=5) model.build_vocab(corpus) # remember two vectors locked0 = np.copy(model.wv.vectors[0]) unlocked1 = np.copy(model.wv.vectors[1]) # alocate a full lockf array (not just default single val for all) model.wv.vectors_lockf = np.ones(len(model.wv), dtype=np.float32) # lock the vector in slot 0 against change model.wv.vectors_lockf[0] = 0.0 model.train(corpus, total_examples=model.corpus_count, epochs=model.epochs) self.assertFalse((unlocked1 == model.wv.vectors[1]).all()) # unlocked vector should vary self.assertTrue((locked0 == model.wv.vectors[0]).all()) # locked vector should not vary
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testEvaluateWordPairs(self): """Test Spearman and Pearson correlation coefficients give sane results on similarity datasets""" corpus = word2vec.LineSentence(datapath('head500.noblanks.cor.bz2')) model = word2vec.Word2Vec(corpus, min_count=3, epochs=20) correlation = model.wv.evaluate_word_pairs(datapath('wordsim353.tsv')) pearson = correlation[0][0] spearman = correlation[1][0] oov = correlation[2] self.assertTrue(0.1 < pearson < 1.0, "pearson {pearson} not between 0.1 & 1.0") self.assertTrue(0.1 < spearman < 1.0, "spearman {spearman} not between 0.1 and 1.0") self.assertTrue(0.0 <= oov < 90.0, "OOV {oov} not between 0.0 and 90.0")
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testEvaluateWordPairsFromFile(self): """Test Spearman and Pearson correlation coefficients give sane results on similarity datasets""" with temporary_file(get_tmpfile('gensim_word2vec.tst')) as tf: utils.save_as_line_sentence(word2vec.LineSentence(datapath('head500.noblanks.cor.bz2')), tf) model = word2vec.Word2Vec(corpus_file=tf, min_count=3, epochs=20) correlation = model.wv.evaluate_word_pairs(datapath('wordsim353.tsv')) pearson = correlation[0][0] spearman = correlation[1][0] oov = correlation[2] self.assertTrue(0.1 < pearson < 1.0, f"pearson {pearson} not between 0.1 & 1.0") self.assertTrue(0.1 < spearman < 1.0, f"spearman {spearman} not between 0.1 and 1.0") self.assertTrue(0.0 <= oov < 90.0, f"OOV {oov} not between 0.0 and 90.0")
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def test_sg_hs(self): """Test skipgram w/ hierarchical softmax""" model = word2vec.Word2Vec(sg=1, window=4, hs=1, negative=0, min_count=5, epochs=10, workers=2) self.model_sanity(model)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def test_sg_hs_fromfile(self): model = word2vec.Word2Vec(sg=1, window=4, hs=1, negative=0, min_count=5, epochs=10, workers=2) self.model_sanity(model, with_corpus_file=True)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def test_sg_neg_fromfile(self): model = word2vec.Word2Vec(sg=1, window=4, hs=0, negative=15, min_count=5, epochs=10, workers=2) self.model_sanity(model, with_corpus_file=True)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def test_method_in_bulk(self): """Not run by default testing, but can be run locally to help tune stochastic aspects of tests to very-very-rarely fail. EG: % BULK_TEST_REPS=200 METHOD_NAME=test_cbow_hs pytest test_word2vec.py -k "test_method_in_bulk" Method must accept `ranks` keyword-argument, empty list into which salient internal result can be reported. """ failures = 0 ranks = [] reps = int(os.environ['BULK_TEST_REPS']) method_name = os.environ.get('METHOD_NAME', 'test_cbow_hs') # by default test that specially-troublesome one method_fn = getattr(self, method_name) for i in range(reps): try: method_fn(ranks=ranks) except Exception as ex: print('%s failed: %s' % (method_name, ex)) failures += 1 print(ranks) print(np.mean(ranks)) self.assertEquals(failures, 0, "too many failures")
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def test_cbow_hs_fromfile(self): model = word2vec.Word2Vec( sg=0, cbow_mean=1, alpha=0.1, window=2, hs=1, negative=0, min_count=5, epochs=60, workers=2, batch_words=1000 ) self.model_sanity(model, with_corpus_file=True)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def test_cbow_neg_fromfile(self): model = word2vec.Word2Vec( sg=0, cbow_mean=1, alpha=0.05, window=5, hs=0, negative=15, min_count=5, epochs=10, workers=2, sample=0 ) self.model_sanity(model, with_corpus_file=True)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testTrainingCbow(self): """Test CBOW word2vec training.""" # to test training, make the corpus larger by repeating its sentences over and over # build vocabulary, don't train yet model = word2vec.Word2Vec(vector_size=2, min_count=1, sg=0, hs=1, negative=0) model.build_vocab(sentences) self.assertTrue(model.wv.vectors.shape == (len(model.wv), 2)) self.assertTrue(model.syn1.shape == (len(model.wv), 2)) model.train(sentences, total_examples=model.corpus_count, epochs=model.epochs) sims = model.wv.most_similar('graph', topn=10) # self.assertTrue(sims[0][0] == 'trees', sims) # most similar # test querying for "most similar" by vector graph_vector = model.wv.get_vector('graph', norm=True) sims2 = model.wv.most_similar(positive=[graph_vector], topn=11) sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself self.assertEqual(sims, sims2) # build vocab and train in one step; must be the same as above model2 = word2vec.Word2Vec(sentences, vector_size=2, min_count=1, sg=0, hs=1, negative=0) self.models_equal(model, model2)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testTrainingCbowNegative(self): """Test CBOW (negative sampling) word2vec training.""" # to test training, make the corpus larger by repeating its sentences over and over # build vocabulary, don't train yet model = word2vec.Word2Vec(vector_size=2, min_count=1, sg=0, hs=0, negative=2) model.build_vocab(sentences) self.assertTrue(model.wv.vectors.shape == (len(model.wv), 2)) self.assertTrue(model.syn1neg.shape == (len(model.wv), 2)) model.train(sentences, total_examples=model.corpus_count, epochs=model.epochs) sims = model.wv.most_similar('graph', topn=10) # self.assertTrue(sims[0][0] == 'trees', sims) # most similar # test querying for "most similar" by vector graph_vector = model.wv.get_vector('graph', norm=True) sims2 = model.wv.most_similar(positive=[graph_vector], topn=11) sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself self.assertEqual(sims, sims2) # build vocab and train in one step; must be the same as above model2 = word2vec.Word2Vec(sentences, vector_size=2, min_count=1, sg=0, hs=0, negative=2) self.models_equal(model, model2)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testSimilarBy(self): """Test word2vec similar_by_word and similar_by_vector.""" model = word2vec.Word2Vec(sentences, vector_size=2, min_count=1, hs=1, negative=0) wordsims = model.wv.similar_by_word('graph', topn=10) wordsims2 = model.wv.most_similar(positive='graph', topn=10) vectorsims = model.wv.similar_by_vector(model.wv['graph'], topn=10) vectorsims2 = model.wv.most_similar([model.wv['graph']], topn=10) self.assertEqual(wordsims, wordsims2) self.assertEqual(vectorsims, vectorsims2)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testRNG(self): """Test word2vec results identical with identical RNG seed.""" model = word2vec.Word2Vec(sentences, min_count=2, seed=42, workers=1) model2 = word2vec.Word2Vec(sentences, min_count=2, seed=42, workers=1) self.models_equal(model, model2)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testPredictOutputWord(self): '''Test word2vec predict_output_word method handling for negative sampling scheme''' # under normal circumstances model_with_neg = word2vec.Word2Vec(sentences, min_count=1) predictions_with_neg = model_with_neg.predict_output_word(['system', 'human'], topn=5) self.assertTrue(len(predictions_with_neg) == 5) # out-of-vobaculary scenario predictions_out_of_vocab = model_with_neg.predict_output_word(['some', 'random', 'words'], topn=5) self.assertEqual(predictions_out_of_vocab, None) # when required model parameters have been deleted tmpf = get_tmpfile('gensim_word2vec.tst') model_with_neg.wv.save_word2vec_format(tmpf, binary=True) kv_model_with_neg = keyedvectors.KeyedVectors.load_word2vec_format(tmpf, binary=True) binary_model_with_neg = word2vec.Word2Vec() binary_model_with_neg.wv = kv_model_with_neg self.assertRaises(RuntimeError, binary_model_with_neg.predict_output_word, ['system', 'human']) # negative sampling scheme not used model_without_neg = word2vec.Word2Vec(sentences, min_count=1, negative=0) self.assertRaises(RuntimeError, model_without_neg.predict_output_word, ['system', 'human'])
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testLoadOldModelSeparates(self): """Test loading an old word2vec model of indeterminate version""" # Model stored in multiple files model_file = 'word2vec_old_sep' model = word2vec.Word2Vec.load(datapath(model_file)) self.assertTrue(model.wv.vectors.shape == (12, 100)) self.assertTrue(len(model.wv) == 12) self.assertTrue(len(model.wv.index_to_key) == 12) self.assertTrue(model.syn1neg.shape == (len(model.wv), model.wv.vector_size)) self.assertTrue(len(model.wv.vectors_lockf.shape) > 0) self.assertTrue(model.cum_table.shape == (12,)) self.onlineSanity(model, trained_model=True)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def test_load_old_models_1_x(self): """Test loading 1.x models""" old_versions = [ '1.0.0', '1.0.1', ] for old_version in old_versions: self._check_old_version(old_version)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def test_load_old_models_3_x(self): """Test loading 3.x models""" # test for max_final_vocab for model saved in 3.3 model_file = 'word2vec_3.3' model = word2vec.Word2Vec.load(datapath(model_file)) self.assertEqual(model.max_final_vocab, None) self.assertEqual(model.max_final_vocab, None) old_versions = [ '3.0.0', '3.1.0', '3.2.0', '3.3.0', '3.4.0' ] for old_version in old_versions: self._check_old_version(old_version)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testBuildVocabWarning(self, loglines): """Test if warning is raised on non-ideal input to a word2vec model""" sentences = ['human', 'machine'] model = word2vec.Word2Vec() model.build_vocab(sentences) warning = "Each 'sentences' item should be a list of words (usually unicode strings)." self.assertTrue(warning in str(loglines))
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testTrainWarning(self, loglines): """Test if warning is raised if alpha rises during subsequent calls to train()""" sentences = [ ['human'], ['graph', 'trees'] ] model = word2vec.Word2Vec(min_count=1) model.build_vocab(sentences) for epoch in range(10): model.train(sentences, total_examples=model.corpus_count, epochs=model.epochs) model.alpha -= 0.002 model.min_alpha = model.alpha if epoch == 5: model.alpha += 0.05 warning = "Effective 'alpha' higher than previous training cycles" self.assertTrue(warning in str(loglines))
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def test_sentences_should_not_be_a_generator(self): """ Is sentences a generator object? """ gen = (s for s in sentences) self.assertRaises(TypeError, word2vec.Word2Vec, (gen,))
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def test_reset_from(self): """Test if reset_from() uses pre-built structures from other model""" model = word2vec.Word2Vec(sentences, min_count=1) other_model = word2vec.Word2Vec(new_sentences, min_count=1) model.reset_from(other_model) self.assertEqual(model.wv.key_to_index, other_model.wv.key_to_index)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testNonzero(self): '''Test basic functionality with a test sentence.''' model = word2vec.Word2Vec(sentences, min_count=2, seed=42, workers=1) sentence1 = ['human', 'interface', 'computer'] sentence2 = ['survey', 'user', 'computer', 'system', 'response', 'time'] distance = model.wv.wmdistance(sentence1, sentence2) # Check that distance is non-zero. self.assertFalse(distance == 0.0)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testSymmetry(self): '''Check that distance is symmetric.''' model = word2vec.Word2Vec(sentences, min_count=2, seed=42, workers=1) sentence1 = ['human', 'interface', 'computer'] sentence2 = ['survey', 'user', 'computer', 'system', 'response', 'time'] distance1 = model.wv.wmdistance(sentence1, sentence2) distance2 = model.wv.wmdistance(sentence2, sentence1) self.assertTrue(np.allclose(distance1, distance2))
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testIdenticalSentences(self): '''Check that the distance from a sentence to itself is zero.''' model = word2vec.Word2Vec(sentences, min_count=1) sentence = ['survey', 'user', 'computer', 'system', 'response', 'time'] distance = model.wv.wmdistance(sentence, sentence) self.assertEqual(0.0, distance)
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testLineSentenceWorksWithFilename(self): """Does LineSentence work with a filename argument?""" with utils.open(datapath('lee_background.cor'), 'rb') as orig: sentences = word2vec.LineSentence(datapath('lee_background.cor')) for words in sentences: self.assertEqual(words, utils.to_unicode(orig.readline()).split())
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testCythonLineSentenceWorksWithFilename(self): """Does CythonLineSentence work with a filename argument?""" from gensim.models import word2vec_corpusfile with utils.open(datapath('lee_background.cor'), 'rb') as orig: sentences = word2vec_corpusfile.CythonLineSentence(datapath('lee_background.cor')) for words in sentences: self.assertEqual(words, orig.readline().split())
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]
def testLineSentenceWorksWithNormalFile(self): """Does LineSentence work with a file object argument, rather than filename?""" with utils.open(datapath('head500.noblanks.cor'), 'rb') as orig: with utils.open(datapath('head500.noblanks.cor'), 'rb') as fin: sentences = word2vec.LineSentence(fin) for words in sentences: self.assertEqual(words, utils.to_unicode(orig.readline()).split())
gojomo/gensim
[ 14, 13, 14, 1, 1401498617 ]