function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
sequence
def get_all_deployed_l7_policys(self): """Retrieve a dict of all l7policies deployed The dict returned will have the following format: {policy_bigip_id_0: {'id': policy_id_0, 'tenant_id': tenant_id, 'hostnames': [hostnames_0]} ... } Where hostnames is the list of BIG-IP hostnames impacted, and the policy_id is the policy_bigip_id without 'wrapper_policy_' """ LOG.debug('getting all deployed l7_policys on BIG-IP\'s') deployed_l7_policys_dict = {} for bigip in self.get_all_bigips(): folders = self.system_helper.get_folders(bigip) for folder in folders: tenant_id = folder[len(self.service_adapter.prefix):] if str(folder).startswith(self.service_adapter.prefix): resource = resource_helper.BigIPResourceHelper( resource_helper.ResourceType.l7policy) deployed_l7_policys = resource.get_resources( bigip, folder) if deployed_l7_policys: for l7_policy in deployed_l7_policys: l7_policy_id = l7_policy.name if l7_policy_id in deployed_l7_policys_dict: my_dict = \ deployed_l7_policys_dict[l7_policy_id] my_dict['hostnames'].append(bigip.hostname) else: po_id = l7_policy_id.replace( 'wrapper_policy_', '') deployed_l7_policys_dict[l7_policy_id] = { 'id': po_id, 'tenant_id': tenant_id, 'hostnames': [bigip.hostname] } return deployed_l7_policys_dict
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def purge_orphaned_l7_policy(self, tenant_id=None, l7_policy_id=None, hostnames=list(), listener_id=None): """Purge all l7_policys that exist on the BIG-IP but not in Neutron""" for bigip in self.get_all_bigips(): if bigip.hostname in hostnames: error = None try: l7_policy_name = l7_policy_id partition = self.service_adapter.prefix + tenant_id if listener_id and partition: if self.service_adapter.prefix not in listener_id: listener_id = \ self.service_adapter.prefix + listener_id li_resource = resource_helper.BigIPResourceHelper( resource_helper.ResourceType.virtual).load( bigip, listener_id, partition) li_resource.update(policies=[]) l7_policy = resource_helper.BigIPResourceHelper( resource_helper.ResourceType.l7policy).load( bigip, l7_policy_name, partition) l7_policy.delete() except HTTPError as err: if err.response.status_code == 404: LOG.debug('l7_policy %s not on BIG-IP %s.' % (l7_policy_id, bigip.hostname)) else: error = err except Exception as exc: error = err if error: kwargs = dict( tenant_id=tenant_id, l7_policy_id=l7_policy_id, hostname=bigip.hostname, listener_id=listener_id) LOG.exception('Exception: purge_orphaned_l7_policy({}) ' '"{}"'.format(kwargs, exc))
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def purge_orphaned_loadbalancer(self, tenant_id=None, loadbalancer_id=None, hostnames=list()): for bigip in self.get_all_bigips(): if bigip.hostname in hostnames: try: va_name = self.service_adapter.prefix + loadbalancer_id partition = self.service_adapter.prefix + tenant_id va = resource_helper.BigIPResourceHelper( resource_helper.ResourceType.virtual_address).load( bigip, va_name, partition) # get virtual services (listeners) # referencing this virtual address vses = resource_helper.BigIPResourceHelper( resource_helper.ResourceType.virtual).get_resources( bigip, partition) vs_dest_compare = '/' + partition + '/' + va.name for vs in vses: if str(vs.destination).startswith(vs_dest_compare): if hasattr(vs, 'pool'): pool = resource_helper.BigIPResourceHelper( resource_helper.ResourceType.pool).load( bigip, os.path.basename(vs.pool), partition) vs.delete() pool.delete() else: vs.delete() resource_helper.BigIPResourceHelper( resource_helper.ResourceType.virtual_address).delete( bigip, va_name, partition) except HTTPError as err: if err.response.status_code == 404: LOG.debug('loadbalancer %s not on BIG-IP %s.' % (loadbalancer_id, bigip.hostname)) except Exception as exc: LOG.exception('Exception purging loadbalancer %s' % str(exc))
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def purge_orphaned_listener( self, tenant_id=None, listener_id=None, hostnames=[]): for bigip in self.get_all_bigips(): if bigip.hostname in hostnames: try: listener_name = self.service_adapter.prefix + listener_id partition = self.service_adapter.prefix + tenant_id listener = resource_helper.BigIPResourceHelper( resource_helper.ResourceType.virtual).load( bigip, listener_name, partition) listener.delete() except HTTPError as err: if err.response.status_code == 404: LOG.debug('listener %s not on BIG-IP %s.' % (listener_id, bigip.hostname)) except Exception as exc: LOG.exception('Exception purging listener %s' % str(exc))
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def create_loadbalancer(self, loadbalancer, service): """Create virtual server.""" self._common_service_handler(service) return self._update_target(service)
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def update_loadbalancer(self, old_loadbalancer, loadbalancer, service): """Update virtual server.""" # anti-pattern three args unused. self._common_service_handler(service) return self._update_target(service)
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def delete_loadbalancer(self, loadbalancer, service): """Delete loadbalancer.""" LOG.debug("Deleting loadbalancer") self._common_service_handler( service, delete_partition=True, delete_event=True) return self._update_target(service)
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def create_listener(self, listener, service): """Create virtual server.""" LOG.debug("Creating listener") self._common_service_handler(service) return self._update_target(service, self._update_listener_status, service)
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def update_listener(self, old_listener, listener, service): """Update virtual server.""" LOG.debug("Updating listener") self._common_service_handler(service) return self._update_target(service, self._update_listener_status, service)
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def delete_listener(self, listener, service): """Delete virtual server.""" LOG.debug("Deleting listener") self._common_service_handler(service) return self._update_target(service, self._update_listener_status, service)
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def create_pool(self, pool, service): """Create lb pool.""" LOG.debug("Creating pool") # pzhang(NOTE): pool may not bound with a listener if service.get("listeners"): target_listener = service["listeners"][0] target_listener["provisioning_status"] = "PENDING_UPDATE" self._common_service_handler(service) return self._update_target(service, self._update_pool_status, service["pools"])
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def update_pool(self, old_pool, pool, service): """Update lb pool.""" LOG.debug("Updating pool") if service.get("listeners"): target_listener = service["listeners"][0] target_listener["provisioning_status"] = "PENDING_UPDATE" self._common_service_handler(service) return self._update_target(service, self._update_pool_status, service["pools"])
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def delete_pool(self, pool, service): """Delete lb pool.""" LOG.debug("Deleting pool") if service.get("listeners"): target_listener = service["listeners"][0] target_listener["provisioning_status"] = "PENDING_UPDATE" self._common_service_handler(service) return self._update_target(service, self._update_pool_status, service["pools"])
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def create_l7policy(self, l7policy, service): """Create lb l7policy.""" LOG.debug("Creating l7policy") target_listener = service["listeners"][0] target_listener["provisioning_status"] = "PENDING_UPDATE" self._common_service_handler(service) return self._update_target(service, self._update_l7policy_status, service["l7policies"])
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def update_l7policy(self, old_l7policy, l7policy, service): """Update lb l7policy.""" LOG.debug("Updating l7policy") target_listener = service["listeners"][0] target_listener["provisioning_status"] = "PENDING_UPDATE" self._common_service_handler(service) return self._update_target(service, self._update_l7policy_status, service["l7policies"])
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def delete_l7policy(self, l7policy, service): """Delete lb l7policy.""" LOG.debug("Deleting l7policy") target_listener = service["listeners"][0] target_listener["provisioning_status"] = "PENDING_UPDATE" self._common_service_handler(service) return self._update_target(service, self._update_l7policy_status, service["l7policies"])
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def create_l7rule(self, l7rule, service): """Create lb l7rule.""" LOG.debug("Creating l7rule") target_listener = service["listeners"][0] target_listener["provisioning_status"] = "PENDING_UPDATE" self._common_service_handler(service) return self._update_target(service, self._update_l7rule_status, service['l7policy_rules'])
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def update_l7rule(self, old_l7rule, l7rule, service): """Update lb l7rule.""" LOG.debug("Updating l7rule") target_listener = service["listeners"][0] target_listener["provisioning_status"] = "PENDING_UPDATE" self._common_service_handler(service) return self._update_target(service, self._update_l7rule_status, service['l7policy_rules'])
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def delete_l7rule(self, l7rule, service): """Delete lb l7rule.""" LOG.debug("Deleting l7rule") target_listener = service["listeners"][0] target_listener["provisioning_status"] = "PENDING_UPDATE" self._common_service_handler(service) return self._update_target(service, self._update_l7rule_status, service['l7policy_rules'])
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def create_member(self, member, service): """Create pool member.""" LOG.debug("Creating member") target_pool = service["pools"][0] target_pool["provisioning_status"] = "PENDING_UPDATE" self._common_service_handler(service) return self._update_target(service, self._update_member_status, service["members"])
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def update_member(self, old_member, member, service): """Update pool member.""" LOG.debug("Updating member") target_pool = service["pools"][0] target_pool["provisioning_status"] = "PENDING_UPDATE" self._common_service_handler(service) return self._update_target(service, self._update_member_status, service["members"])
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def delete_member(self, member, service): """Delete pool member.""" LOG.debug("Deleting member") target_pool = service["pools"][0] target_pool["provisioning_status"] = "PENDING_UPDATE" self._common_service_handler(service) return self._update_target(service, self._update_member_status, service["members"])
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def create_health_monitor(self, health_monitor, service): """Create pool health monitor.""" LOG.debug("Creating health monitor") target_pool = service["pools"][0] target_pool["provisioning_status"] = "PENDING_UPDATE" self._common_service_handler(service) return self._update_target(service, self._update_health_monitor_status, service["healthmonitors"])
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def update_health_monitor(self, old_health_monitor, health_monitor, service): """Update pool health monitor.""" LOG.debug("Updating health monitor") target_pool = service["pools"][0] target_pool["provisioning_status"] = "PENDING_UPDATE" self._common_service_handler(service) return self._update_target(service, self._update_health_monitor_status, service["healthmonitors"])
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def delete_health_monitor(self, health_monitor, service): """Delete pool health monitor.""" LOG.debug("Deleting health monitor") target_pool = service["pools"][0] target_pool["provisioning_status"] = "PENDING_UPDATE" self._common_service_handler(service) return self._update_target(service, self._update_health_monitor_status, service["healthmonitors"])
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def get_stats(self, service): lb_stats = {} stats = ['clientside.bitsIn', 'clientside.bitsOut', 'clientside.curConns', 'clientside.totConns'] loadbalancer = service['loadbalancer'] try: # sum virtual server stats for all BIG-IPs vs_stats = self.lbaas_builder.get_listener_stats(service, stats) # convert to bytes lb_stats[f5const.F5_STATS_IN_BYTES] = \ vs_stats['clientside.bitsIn']/8 lb_stats[f5const.F5_STATS_OUT_BYTES] = \ vs_stats['clientside.bitsOut']/8 lb_stats[f5const.F5_STATS_ACTIVE_CONNECTIONS] = \ vs_stats['clientside.curConns'] lb_stats[f5const.F5_STATS_TOTAL_CONNECTIONS] = \ vs_stats['clientside.totConns'] # update Neutron self.plugin_rpc.update_loadbalancer_stats( loadbalancer['id'], lb_stats) except Exception as e: LOG.error("Error getting loadbalancer stats: %s", e.message) finally: return lb_stats
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def fdb_remove(self, fdb): # Remove (L2toL3) forwarding database entries for bigip in self.get_all_bigips(): self.network_builder.remove_bigip_fdb(bigip, fdb)
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def tunnel_update(self, **kwargs): # Tunnel Update from Neutron Core RPC pass
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def sync(self, service): """Sync service defintion to device.""" # loadbalancer and plugin_rpc may not be set lb_id = service.get('loadbalancer', dict()).get('id', '') if hasattr(self, 'plugin_rpc') and self.plugin_rpc and lb_id: # Get the latest service. It may have changed. service = self.plugin_rpc.get_service_by_loadbalancer_id(lb_id) if service.get('loadbalancer', None): self.lbaas_builder.to_sync = True self._common_service_handler(service) self.lbaas_builder.to_sync = False # pzhang(NOTE): move udpate neutron db out here for the lb tree if self.do_service_update: self.update_service_status(service) loadbalancer = service.get('loadbalancer', {}) lb_provisioning_status = loadbalancer.get("provisioning_status", f5const.F5_ERROR) lb_pending = \ (lb_provisioning_status == f5const.F5_PENDING_CREATE or lb_provisioning_status == f5const.F5_PENDING_UPDATE) return lb_pending else: LOG.debug("Attempted sync of deleted pool")
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def backup_configuration(self): # Save Configuration on Devices for bigip in self.get_all_bigips(): LOG.debug('_backup_configuration: saving device %s.' % bigip.hostname) self.cluster_manager.save_config(bigip)
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def service_rename_required(self, service): rename_required = False # Returns whether the bigip has a pool for the service if not service['loadbalancer']: return False bigips = self.get_config_bigips() loadbalancer = service['loadbalancer'] # Does the correctly named virtual address exist? for bigip in bigips: virtual_address = VirtualAddress(self.service_adapter, loadbalancer) if not virtual_address.exists(bigip): rename_required = True break return rename_required
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def _service_exists(self, service): # Returns whether the bigip has the service defined if not service['loadbalancer']: return False loadbalancer = service['loadbalancer'] folder_name = self.service_adapter.get_folder_name( loadbalancer['tenant_id'] ) if self.network_builder: # append route domain to member address self.network_builder._annotate_service_route_domains(service) # Foreach bigip in the cluster: for bigip in self.get_config_bigips(): # Does the tenant folder exist? if not self.system_helper.folder_exists(bigip, folder_name): LOG.error("Folder %s does not exists on bigip: %s" % (folder_name, bigip.hostname)) return False # Get the virtual address virtual_address = VirtualAddress(self.service_adapter, loadbalancer) if not virtual_address.exists(bigip): LOG.error("Virtual address %s(%s) does not " "exists on bigip: %s" % (virtual_address.name, virtual_address.address, bigip.hostname)) return False # Ensure that each virtual service exists. for listener in service['listeners']: svc = {"loadbalancer": loadbalancer, "listener": listener} virtual_server = self.service_adapter.get_virtual_name(svc) if not self.vs_manager.exists(bigip, name=virtual_server['name'], partition=folder_name): LOG.error("Virtual /%s/%s not found on bigip: %s" % (virtual_server['name'], folder_name, bigip.hostname)) return False # Ensure that each pool exists. for pool in service['pools']: svc = {"loadbalancer": loadbalancer, "pool": pool} bigip_pool = self.service_adapter.get_pool(svc) if not self.pool_manager.exists( bigip, name=bigip_pool['name'], partition=folder_name): LOG.error("Pool /%s/%s not found on bigip: %s" % (folder_name, bigip_pool['name'], bigip.hostname)) return False else: deployed_pool = self.pool_manager.load( bigip, name=bigip_pool['name'], partition=folder_name) deployed_members = \ deployed_pool.members_s.get_collection() # First check that number of members deployed # is equal to the number in the service. if len(deployed_members) != len(pool['members']): LOG.error("Pool %s members member count mismatch " "match: deployed %d != service %d" % (bigip_pool['name'], len(deployed_members), len(pool['members']))) return False # Ensure each pool member exists for member in service['members']: if member['pool_id'] == pool['id']: lb = self.lbaas_builder pool = lb.get_pool_by_id( service, member["pool_id"]) svc = {"loadbalancer": loadbalancer, "member": member, "pool": pool} if not lb.pool_builder.member_exists(svc, bigip): LOG.error("Pool member not found: %s" % svc['member']) return False # Ensure that each health monitor exists. for healthmonitor in service['healthmonitors']: svc = {"loadbalancer": loadbalancer, "healthmonitor": healthmonitor} monitor = self.service_adapter.get_healthmonitor(svc) monitor_ep = self._get_monitor_endpoint(bigip, svc) if not monitor_ep.exists(name=monitor['name'], partition=folder_name): LOG.error("Monitor /%s/%s not found on bigip: %s" % (monitor['name'], folder_name, bigip.hostname)) return False return True
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def _common_service_handler(self, service, delete_partition=False, delete_event=False): # Assure that the service is configured on bigip(s) start_time = time() lb_pending = True self.do_service_update = True if self.conf.trace_service_requests: self.trace_service_requests(service) loadbalancer = service.get("loadbalancer", None) if not loadbalancer: LOG.error("_common_service_handler: Service loadbalancer is None") return lb_pending lb_provisioning_status = loadbalancer.get("provisioning_status", f5const.F5_ERROR) try: try: self.tenant_manager.assure_tenant_created(service) except Exception as e: LOG.error("Tenant folder creation exception: %s", e.message) if lb_provisioning_status != f5const.F5_PENDING_DELETE: loadbalancer['provisioning_status'] = \ f5const.F5_ERROR raise e LOG.debug(" _assure_tenant_created took %.5f secs" % (time() - start_time)) traffic_group = self.service_to_traffic_group(service) loadbalancer['traffic_group'] = traffic_group if self.network_builder: start_time = time() try: self.network_builder.prep_service_networking( service, traffic_group) except f5ex.NetworkNotReady as error: LOG.debug("Network creation deferred until network " "definition is completed: %s", error.message) if not delete_event: self.do_service_update = False raise error except Exception as error: LOG.error("Prep-network exception: icontrol_driver: %s", error.message) if lb_provisioning_status != f5const.F5_PENDING_DELETE: loadbalancer['provisioning_status'] = \ f5const.F5_ERROR if not delete_event: raise error finally: if time() - start_time > .001: LOG.debug(" _prep_service_networking " "took %.5f secs" % (time() - start_time)) all_subnet_hints = {} for bigip in self.get_config_bigips(): # check_for_delete_subnets: # keep track of which subnets we should check to delete # for a deleted vip or member # do_not_delete_subnets: # If we add an IP to a subnet we must not delete the subnet all_subnet_hints[bigip.device_name] = \ {'check_for_delete_subnets': {}, 'do_not_delete_subnets': []} LOG.debug("XXXXXXXXX: Pre assure service") self.lbaas_builder.assure_service(service, traffic_group, all_subnet_hints) LOG.debug("XXXXXXXXX: Post assure service") if self.network_builder: start_time = time() try: self.network_builder.post_service_networking( service, all_subnet_hints) except Exception as error: LOG.error("Post-network exception: icontrol_driver: %s", error.message) if lb_provisioning_status != f5const.F5_PENDING_DELETE: loadbalancer['provisioning_status'] = \ f5const.F5_ERROR raise error if time() - start_time > .001: LOG.debug(" _post_service_networking " "took %.5f secs" % (time() - start_time)) except f5ex.NetworkNotReady as error: pass except Exception as err: LOG.exception(err) finally: # only delete partition if loadbalancer is being deleted if lb_provisioning_status == f5const.F5_PENDING_DELETE: self.tenant_manager.assure_tenant_cleanup(service, all_subnet_hints)
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def _update_member_status(self, members, timed_out=False): """Update member status in OpenStack.""" for member in members: if 'provisioning_status' in member: provisioning_status = member['provisioning_status'] if provisioning_status in self.positive_plugin_const_state: if timed_out and \ provisioning_status != f5const.F5_ACTIVE: member['provisioning_status'] = f5const.F5_ERROR operating_status = f5const.F5_OFFLINE else: member['provisioning_status'] = f5const.F5_ACTIVE operating_status = f5const.F5_ONLINE self.plugin_rpc.update_member_status( member['id'], member['provisioning_status'], operating_status ) elif provisioning_status == f5const.F5_PENDING_DELETE: if not member.get('parent_pool_deleted', False): self.plugin_rpc.member_destroyed( member['id']) elif provisioning_status == f5const.F5_ERROR: self.plugin_rpc.update_member_status( member['id'], f5const.F5_ERROR, f5const.F5_OFFLINE)
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def _update_pool_status(self, pools): """Update pool status in OpenStack.""" for pool in pools: if 'provisioning_status' in pool: provisioning_status = pool['provisioning_status'] if provisioning_status in self.positive_plugin_const_state: self.plugin_rpc.update_pool_status( pool['id'], f5const.F5_ACTIVE, f5const.F5_ONLINE ) pool['provisioning_status'] = f5const.F5_ACTIVE elif provisioning_status == f5const.F5_PENDING_DELETE: self.plugin_rpc.pool_destroyed( pool['id']) elif provisioning_status == f5const.F5_ERROR: self.plugin_rpc.update_pool_status(pool['id'])
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def _update_listener_status(self, service): """Update listener status in OpenStack.""" listeners = service['listeners'] for listener in listeners: if 'provisioning_status' in listener: provisioning_status = listener['provisioning_status'] if provisioning_status in self.positive_plugin_const_state: self.plugin_rpc.update_listener_status( listener['id'], f5const.F5_ACTIVE, listener['operating_status'] ) listener['provisioning_status'] = \ f5const.F5_ACTIVE elif provisioning_status == f5const.F5_PENDING_DELETE: self.plugin_rpc.listener_destroyed( listener['id']) elif provisioning_status == f5const.F5_ERROR: self.plugin_rpc.update_listener_status( listener['id'], provisioning_status, f5const.F5_OFFLINE)
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def _update_l7rule_status(self, l7rules): """Update l7rule status in OpenStack.""" for l7rule in l7rules: if 'provisioning_status' in l7rule: provisioning_status = l7rule['provisioning_status'] if provisioning_status in self.positive_plugin_const_state: self.plugin_rpc.update_l7rule_status( l7rule['id'], l7rule['policy_id'], f5const.F5_ACTIVE, f5const.F5_ONLINE ) elif provisioning_status == f5const.F5_PENDING_DELETE: self.plugin_rpc.l7rule_destroyed( l7rule['id']) elif provisioning_status == f5const.F5_ERROR: self.plugin_rpc.update_l7rule_status( l7rule['id'], l7rule['policy_id'])
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def _update_l7policy_status(self, l7policies): LOG.debug("_update_l7policy_status") """Update l7policy status in OpenStack.""" for l7policy in l7policies: if 'provisioning_status' in l7policy: provisioning_status = l7policy['provisioning_status'] if provisioning_status in self.positive_plugin_const_state: self.plugin_rpc.update_l7policy_status( l7policy['id'], f5const.F5_ACTIVE, f5const.F5_ONLINE ) elif provisioning_status == f5const.F5_PENDING_DELETE: LOG.debug("calling l7policy_destroyed") self.plugin_rpc.l7policy_destroyed( l7policy['id']) elif provisioning_status == f5const.F5_ERROR: self.plugin_rpc.update_l7policy_status(l7policy['id'])
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def _update_loadbalancer_status(self, service, timed_out=False): """Update loadbalancer status in OpenStack.""" loadbalancer = service.get('loadbalancer', {}) provisioning_status = loadbalancer.get('provisioning_status', f5const.F5_ERROR) # if provisioning_status in self.positive_plugin_const_state: if provisioning_status in self.positive_plugin_const_state: if timed_out: operating_status = (f5const.F5_OFFLINE) if provisioning_status == f5const.F5_PENDING_CREATE: loadbalancer['provisioning_status'] = \ f5const.F5_ERROR else: loadbalancer['provisioning_status'] = \ f5const.F5_ACTIVE else: operating_status = (f5const.F5_ONLINE) loadbalancer['provisioning_status'] = \ f5const.F5_ACTIVE self.plugin_rpc.update_loadbalancer_status( loadbalancer['id'], loadbalancer['provisioning_status'], operating_status) elif provisioning_status == f5const.F5_PENDING_DELETE: self.plugin_rpc.loadbalancer_destroyed( loadbalancer['id']) elif provisioning_status == f5const.F5_ERROR: self.plugin_rpc.update_loadbalancer_status( loadbalancer['id'], provisioning_status, f5const.F5_OFFLINE) elif provisioning_status == f5const.F5_ACTIVE: LOG.debug('Loadbalancer provisioning status is active') else: LOG.error('Loadbalancer provisioning status is invalid')
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def update_operating_status(self, service): if 'members' in service: if self.network_builder: # append route domain to member address try: self.network_builder._annotate_service_route_domains( service) except f5ex.InvalidNetworkType as exc: LOG.warning(exc.msg) return # get currrent member status self.lbaas_builder.update_operating_status(service) # udpate Neutron for member in service['members']: if member['provisioning_status'] == f5const.F5_ACTIVE: operating_status = member.get('operating_status', None) self.plugin_rpc.update_member_status( member['id'], provisioning_status=None, operating_status=operating_status)
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def service_to_traffic_group(self, service): # Hash service tenant id to index of traffic group # return which iControlDriver.__traffic_group that tenant is "in?" return self.tenant_to_traffic_group( service['loadbalancer']['tenant_id'])
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def get_bigip(self): hostnames = sorted(list(self.__bigips)) for host in hostnames: if hasattr(self.__bigips[host], 'status') and \ self.__bigips[host].status == 'active': return self.__bigips[host]
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def get_all_bigips(self): return_bigips = [] for host in list(self.__bigips): if hasattr(self.__bigips[host], 'status') and \ self.__bigips[host].status == 'active': return_bigips.append(self.__bigips[host]) return return_bigips
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def get_active_bigips(self): return self.get_all_bigips()
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def get_inbound_throughput(self, bigip, global_statistics=None): return self.stat_helper.get_inbound_throughput( bigip, global_stats=global_statistics)
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def get_throughput(self, bigip=None, global_statistics=None): return self.stat_helper.get_throughput( bigip, global_stats=global_statistics)
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def get_ssltps(self, bigip=None, global_statistics=None): return self.stat_helper.get_active_SSL_TPS( bigip, global_stats=global_statistics)
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def get_clientssl_profile_count(self, bigip=None, global_statistics=None): return ssl_profile.SSLProfileHelper.get_client_ssl_profile_count(bigip)
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def get_tunnel_count(self, bigip=None, global_statistics=None): return self.network_helper.get_tunnel_count(bigip)
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def get_route_domain_count(self, bigip=None, global_statistics=None): return self.network_helper.get_route_domain_count(bigip)
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def _validate_bigip_version(self, bigip, hostname): # Ensure the BIG-IP has sufficient version major_version = self.system_helper.get_major_version(bigip) if major_version < f5const.MIN_TMOS_MAJOR_VERSION: raise f5ex.MajorVersionValidateFailed( 'Device %s must be at least TMOS %s.%s' % (hostname, f5const.MIN_TMOS_MAJOR_VERSION, f5const.MIN_TMOS_MINOR_VERSION)) minor_version = self.system_helper.get_minor_version(bigip) if minor_version < f5const.MIN_TMOS_MINOR_VERSION: raise f5ex.MinorVersionValidateFailed( 'Device %s must be at least TMOS %s.%s' % (hostname, f5const.MIN_TMOS_MAJOR_VERSION, f5const.MIN_TMOS_MINOR_VERSION)) return major_version, minor_version
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def dense_prediction_cell_hparams(): """DensePredictionCell HParams. Returns: A dictionary of hyper-parameters used for dense prediction cell with keys: - reduction_size: Integer, the number of output filters for each operation inside the cell. - dropout_on_concat_features: Boolean, apply dropout on the concatenated features or not. - dropout_on_projection_features: Boolean, apply dropout on the projection features or not. - dropout_keep_prob: Float, when `dropout_on_concat_features' or `dropout_on_projection_features' is True, the `keep_prob` value used in the dropout operation. - concat_channels: Integer, the concatenated features will be channel-reduced to `concat_channels` channels. - conv_rate_multiplier: Integer, used to multiply the convolution rates. This is useful in the case when the output_stride is changed from 16 to 8, we need to double the convolution rates correspondingly. """ return { 'reduction_size': 256, 'dropout_on_concat_features': True, 'dropout_on_projection_features': False, 'dropout_keep_prob': 0.9, 'concat_channels': 256, 'conv_rate_multiplier': 1, }
googleinterns/wss
[ 142, 21, 142, 9, 1597440534 ]
def __init__(self, config, hparams=None): """Initializes the dense prediction cell. Args: config: A dictionary storing the architecture of a dense prediction cell. hparams: A dictionary of hyper-parameters, provided by users. This dictionary will be used to update the default dictionary returned by dense_prediction_cell_hparams(). Raises: ValueError: If `conv_rate_multiplier` has value < 1. """ self.hparams = dense_prediction_cell_hparams() if hparams is not None: self.hparams.update(hparams) self.config = config # Check values in hparams are valid or not. if self.hparams['conv_rate_multiplier'] < 1: raise ValueError('conv_rate_multiplier cannot have value < 1.')
googleinterns/wss
[ 142, 21, 142, 9, 1597440534 ]
def _parse_operation(self, config, crop_size, output_stride, image_pooling_crop_size=None): """Parses one operation. When 'operation' is 'pyramid_pooling', we compute the required hyper-parameters and save in config. Args: config: A dictionary storing required hyper-parameters for one operation. crop_size: A list of two integers, [crop_height, crop_width] specifying whole patch crop size. output_stride: Integer, output stride value for extracted features. image_pooling_crop_size: A list of two integers, [crop_height, crop_width] specifying the crop size for image pooling operations. Note that we decouple whole patch crop_size and image_pooling_crop_size as one could perform the image_pooling with different crop sizes. Returns: A dictionary stores the related information for the operation. """ if config[_OP] == _PYRAMID_POOLING: (config[_TARGET_SIZE], config[_KERNEL]) = self._get_pyramid_pooling_arguments( crop_size=crop_size, output_stride=output_stride, image_grid=config[_GRID_SIZE], image_pooling_crop_size=image_pooling_crop_size) return config
googleinterns/wss
[ 142, 21, 142, 9, 1597440534 ]
def _tryGevent(): global gevent, geventEvent if gevent and geventEvent: return False try: import gevent from gevent import event as geventEvent return True except ImportError: raise ValueError('gevent not found')
byaka/flaskJSONRPCServer
[ 2, 1, 2, 39, 1429860383 ]
def _child(target, args, kwargs): """Wrapper function that runs in child process. Resets gevent/libev state and executes user-given function. """ _tryGevent() _reset_signal_handlers() gevent.reinit() hub = gevent.get_hub() del hub.threadpool hub._threadpool = None hub.destroy(destroy_loop=True) h = gevent.get_hub(default=True) assert h.loop.default, 'Could not create libev default event loop.' target(*args, **kwargs)
byaka/flaskJSONRPCServer
[ 2, 1, 2, 39, 1429860383 ]
def start(self): _tryGevent() # Start grabbing SIGCHLD within libev event loop. gevent.get_hub().loop.install_sigchld() # Run new process (based on `fork()` on POSIX-compliant systems). super(_GProcess, self).start() # The occurrence of SIGCHLD is recorded asynchronously in libev. # This guarantees proper behavior even if the child watcher is # started after the child exits. Start child watcher now. self._sigchld_watcher = gevent.get_hub().loop.child(self.pid) self._returnevent = gevent.event.Event() self._sigchld_watcher.start(self._on_sigchld, self._sigchld_watcher)
byaka/flaskJSONRPCServer
[ 2, 1, 2, 39, 1429860383 ]
def is_alive(self): assert self._popen is not None, "Process not yet started." if self._popen.returncode is None: return True return False
byaka/flaskJSONRPCServer
[ 2, 1, 2, 39, 1429860383 ]
def exitcode(self): if self._popen is None: return None return self._popen.returncode
byaka/flaskJSONRPCServer
[ 2, 1, 2, 39, 1429860383 ]
def join(self, timeout=None): """ Wait cooperatively until child process terminates or timeout occurs. :arg timeout: ``None`` (default) or a a time in seconds. The method simply returns upon timeout expiration. The state of the process has to be identified via ``is_alive()``. """ assert self._parent_pid == os.getpid(), "I'm not parent of this child." assert self._popen is not None, 'Can only join a started process.' # Resemble multiprocessing's join() method while replacing # `self._popen.wait(timeout)` with # `self._returnevent.wait(timeout)` self._returnevent.wait(timeout) if self._popen.returncode is not None: if hasattr(multiprocessing.process, '_children'): # This is for Python 3.4. kids = multiprocessing.process._children else: # For Python 2.6, 2.7, 3.3. kids = multiprocessing.process._current_process._children kids.discard(self)
byaka/flaskJSONRPCServer
[ 2, 1, 2, 39, 1429860383 ]
def _reset_signal_handlers(): for s in _signals_to_reset: if s < signal.NSIG: signal.signal(s, signal.SIG_DFL)
byaka/flaskJSONRPCServer
[ 2, 1, 2, 39, 1429860383 ]
def _reraise(tp, value, tb=None): if value is None: value = tp() if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value
byaka/flaskJSONRPCServer
[ 2, 1, 2, 39, 1429860383 ]
def __exec(_code_, _globs_=None, _locs_=None): """Execute code in a namespace.""" if _globs_ is None: frame = sys._getframe(1) _globs_ = frame.f_globals if _locs_ is None: _locs_ = frame.f_locals del frame elif _locs_ is None: _locs_ = _globs_ exec("""exec _code_ in _globs_, _locs_""")
byaka/flaskJSONRPCServer
[ 2, 1, 2, 39, 1429860383 ]
def test_feedback_001(self): """ Test the feedback algorithm. """ mi = 1, ma = 256 base = 3 obj = ExponentialTimeFeedback(min_time=mi, max_time=ma, base=base ) # Estado inicial # Initial state. self.assertEqual(False, obj.feedback()) obj.wait() # wait = 1 # 3 ^ 0 == 1 (wait is 1) self.assertEqual(True, obj.feedback()) # Testa se voltou direito # Test if got back correctly. self.assertEqual(False, obj.feedback()) # Aumentamos o tempo de sensoriamento 3^1 = 3 # We increase the sensing time 3^1 = 3. obj.increase_time() self.assertEqual(False, obj.feedback()) obj.wait() # wait = 1 self.assertEqual(False, obj.feedback()) obj.wait() # wait = 2 obj.wait() # wait = 3 self.assertEqual(True, obj.feedback()) # wait gets back to 0 # volta wait para 0 self.assertEqual(False, obj.feedback()) obj.decrease_time() # reset time 3^0 = 1 # reseta tempo 3^0 = 1 obj.wait() # wait = 1 self.assertEqual(True, obj.feedback()) # wait gets back to 0 # volta wait para 0
ComputerNetworks-UFRGS/OpERA
[ 3, 2, 3, 1, 1374231621 ]
def __save_page(self, data, url, outputdir): ''' save the page content with the specific url to the local path. ''' if(not os.path.exists(outputdir)): os.makedirs(outputdir) filename = self.__validate_name(url) f = open(outputdir + os.sep + filename, 'w') f.writelines(data) f.close()
onehao/opensource
[ 1, 1, 1, 1, 1414656394 ]
def runoff_pitt(precip, evaptrans, soil_type, land_use): """ The Pitt Small Storm Hydrology method. The output is a runoff value in inches. This uses numpy to make a linear interpolation between tabular values to calculate the exact runoff for a given value `precip` is the amount of precipitation in inches. """ runoff_ratios = lookup_pitt_runoff(soil_type, land_use) runoff_ratio = np.interp(precip, runoff_ratios['precip'], runoff_ratios['Rv']) runoff = precip*runoff_ratio return min(runoff, precip - evaptrans)
WikiWatershed/tr-55
[ 9, 8, 9, 9, 1427290590 ]
def runoff_nrcs(precip, evaptrans, soil_type, land_use): """ The runoff equation from the TR-55 document. The output is a runoff value in inches. `precip` is the amount of precipitation in inches. """ curve_number = lookup_cn(soil_type, land_use) if nrcs_cutoff(precip, curve_number): return 0.0 potential_retention = (1000.0 / curve_number) - 10 initial_abs = 0.2 * potential_retention precip_minus_initial_abs = precip - initial_abs numerator = pow(precip_minus_initial_abs, 2) denominator = (precip_minus_initial_abs + potential_retention) runoff = numerator / denominator return min(runoff, precip - evaptrans)
WikiWatershed/tr-55
[ 9, 8, 9, 9, 1427290590 ]
def clamp(runoff, et, inf, precip): """ This function ensures that runoff + et + inf <= precip. NOTE: Infiltration is normally independent of the precipitation level, but this function introduces a slight dependency (that is, at very low levels of precipitation, this function can cause infiltration to be smaller than it ordinarily would be. """ total = runoff + et + inf if (total > precip): scale = precip / total runoff *= scale et *= scale inf *= scale return (runoff, et, inf)
WikiWatershed/tr-55
[ 9, 8, 9, 9, 1427290590 ]
def create_unmodified_census(census): """ This creates a cell census, ignoring any modifications. The output is suitable for use as input to `simulate_water_quality`. """ unmod = copy.deepcopy(census) unmod.pop('modifications', None) return unmod
WikiWatershed/tr-55
[ 9, 8, 9, 9, 1427290590 ]
def simulate_water_quality(tree, cell_res, fn, pct=1.0, current_cell=None, precolumbian=False): """ Perform a water quality simulation by doing simulations on each of the cell types (leaves), then adding them together by summing the values of a node's subtrees and storing them at that node. `tree` is the (sub)tree of cell distributions that is currently under consideration. `pct` is the percentage of calculated water volume to retain. `cell_res` is the size of each cell/pixel in meters squared (used for turning inches of water into volumes of water). `fn` is a function that takes a cell type and a number of cells and returns a dictionary containing runoff, et, and inf as volumes. `current_cell` is the cell type for the present node. """ # Internal node. if 'cell_count' in tree and 'distribution' in tree: n = tree['cell_count'] # simulate subtrees if n != 0: tally = {} for cell, subtree in tree['distribution'].items(): simulate_water_quality(subtree, cell_res, fn, pct, cell, precolumbian) subtree_ex_dist = subtree.copy() subtree_ex_dist.pop('distribution', None) tally = dict_plus(tally, subtree_ex_dist) tree.update(tally) # update this node # effectively a leaf elif n == 0: for pol in get_pollutants(): tree[pol] = 0.0 # Leaf node. elif 'cell_count' in tree and 'distribution' not in tree: # the number of cells covered by this leaf n = tree['cell_count'] # canonicalize the current_cell string split = current_cell.split(':') if (len(split) == 2): split.append('') if precolumbian: split[1] = make_precolumbian(split[1]) current_cell = '%s:%s:%s' % tuple(split) # run the runoff model on this leaf result = fn(current_cell, n) # runoff, et, inf runoff_adjustment = result['runoff-vol'] - (result['runoff-vol'] * pct) result['runoff-vol'] -= runoff_adjustment result['inf-vol'] += runoff_adjustment tree.update(result) # perform water quality calculation if n != 0: soil_type, land_use, bmp = split runoff_per_cell = result['runoff-vol'] / n liters = get_volume_of_runoff(runoff_per_cell, n, cell_res) for pol in get_pollutants(): tree[pol] = get_pollutant_load(land_use, pol, liters)
WikiWatershed/tr-55
[ 9, 8, 9, 9, 1427290590 ]
def compute_bmp_effect(census, m2_per_pixel, precip): """ Compute the overall amount of water retained by infiltration/retention type BMP's. Result is a percent of runoff remaining after water is trapped in infiltration/retention BMP's """ meters_per_inch = 0.0254 cubic_meters = census['runoff-vol'] * meters_per_inch * m2_per_pixel # 'runoff-vol' in census is in inches*#cells bmp_dict = census.get('BMPs', {}) bmp_keys = set(bmp_dict.keys()) reduction = 0.0 for bmp in set.intersection(set(get_bmps()), bmp_keys): bmp_area = bmp_dict[bmp] storage_space = (lookup_bmp_storage(bmp) * bmp_area) max_reduction = lookup_bmp_drainage_ratio(bmp) * bmp_area * precip * meters_per_inch bmp_reduction = min(max_reduction, storage_space) reduction += bmp_reduction return 0 if not cubic_meters else \ max(0.0, cubic_meters - reduction) / cubic_meters
WikiWatershed/tr-55
[ 9, 8, 9, 9, 1427290590 ]
def simulate_day(census, precip, cell_res=10, precolumbian=False): """ Simulate a day, including water quality effects of modifications. `census` contains a distribution of cell-types in the area of interest. `cell_res` is as described in `simulate_water_quality`. `precolumbian` indicates that artificial types should be turned into forest. """ et_max = 0.207 # From the EPA WaterSense data finder for the Philadelphia airport (19153) # Converted to daily number in inches per day. # http://www3.epa.gov/watersense/new_homes/wb_data_finder.html # TODO: include Potential Max ET as a data layer from CGIAR # http://csi.cgiar.org/aridity/Global_Aridity_PET_Methodolgy.asp if 'modifications' in census: verify_census(census) def fn(cell, cell_count): # Compute et for cell type split = cell.split(':') if (len(split) == 2): (land_use, bmp) = split else: (_, land_use, bmp) = split et = et_max * lookup_ki(bmp or land_use) # Simulate the cell for one day return simulate_cell_day(precip, et, cell, cell_count) return simulate_modifications(census, fn, cell_res, precip, precolumbian)
WikiWatershed/tr-55
[ 9, 8, 9, 9, 1427290590 ]
def _verify_buckets_status(self, revision_id, comparison_revision_id, expected): # Verify that actual and expected results match, despite the order of # `comparison_revision_id` and `revision_id` args. revision_ids = [revision_id, comparison_revision_id] for rev_ids in (revision_ids, reversed(revision_ids)): actual = db_api.revision_diff(*rev_ids) self.assertEqual(expected, actual)
att-comdev/deckhand
[ 9, 5, 9, 5, 1497626943 ]
def test_revision_diff_created(self): payload = base.DocumentFixture.get_minimal_multi_fixture(count=3) bucket_name = test_utils.rand_name('bucket') documents = self.create_documents(bucket_name, payload) revision_id = documents[0]['revision_id'] self._verify_buckets_status( 0, revision_id, {bucket_name: 'created'})
att-comdev/deckhand
[ 9, 5, 9, 5, 1497626943 ]
def test_revision_diff_self(self): payload = base.DocumentFixture.get_minimal_multi_fixture(count=3) bucket_name = test_utils.rand_name('bucket') documents = self.create_documents(bucket_name, payload) revision_id = documents[0]['revision_id'] self._verify_buckets_status( revision_id, revision_id, {bucket_name: 'unmodified'})
att-comdev/deckhand
[ 9, 5, 9, 5, 1497626943 ]
def test_revision_diff_modified(self): payload = base.DocumentFixture.get_minimal_multi_fixture(count=3) bucket_name = test_utils.rand_name('bucket') documents = self.create_documents(bucket_name, payload) revision_id = documents[0]['revision_id'] payload[0]['data'] = {'modified': 'modified'} comparison_documents = self.create_documents(bucket_name, payload) comparison_revision_id = comparison_documents[0]['revision_id'] self._verify_buckets_status( revision_id, comparison_revision_id, {bucket_name: 'modified'})
att-comdev/deckhand
[ 9, 5, 9, 5, 1497626943 ]
def test_revision_diff_multi_revision_multi_bucket_modified(self): revision_ids = [] bucket_name = test_utils.rand_name('bucket') alt_bucket_name = test_utils.rand_name('bucket') bucket_names = [bucket_name, alt_bucket_name] * 2 # Create revisions by modifying documents in `bucket_name` and # `alt_bucket_name`. for bucket_idx in range(4): payload = base.DocumentFixture.get_minimal_multi_fixture(count=3) documents = self.create_documents( bucket_names[bucket_idx], payload) revision_id = documents[0]['revision_id'] revision_ids.append(revision_id) # Between revision_ids[0] and [1], bucket_name is unmodified and # alt_bucket_name is created. self._verify_buckets_status( revision_ids[0], revision_ids[1], {bucket_name: 'unmodified', alt_bucket_name: 'created'}) # Between revision_ids[0] and [2], bucket_name is modified (by 2) and # alt_bucket_name is created (by 1). self._verify_buckets_status( revision_ids[0], revision_ids[2], {bucket_name: 'modified', alt_bucket_name: 'created'}) # Between revision_ids[0] and [3], bucket_name is modified (by [2]) and # alt_bucket_name is created (by [1]) (as well as modified by [3]). self._verify_buckets_status( revision_ids[0], revision_ids[3], {bucket_name: 'modified', alt_bucket_name: 'created'}) # Between revision_ids[1] and [2], bucket_name is modified but # alt_bucket_name remains unmodified. self._verify_buckets_status( revision_ids[1], revision_ids[2], {bucket_name: 'modified', alt_bucket_name: 'unmodified'}) # Between revision_ids[1] and [3], bucket_name is modified (by [2]) and # alt_bucket_name is modified by [3]. self._verify_buckets_status( revision_ids[1], revision_ids[3], {bucket_name: 'modified', alt_bucket_name: 'modified'}) # Between revision_ids[2] and [3], alt_bucket_name is modified but # bucket_name remains unmodified. self._verify_buckets_status( revision_ids[2], revision_ids[3], {bucket_name: 'unmodified', alt_bucket_name: 'modified'})
att-comdev/deckhand
[ 9, 5, 9, 5, 1497626943 ]
def test_revision_diff_ignore_bucket_with_all_unrelated_documents(self): payload = base.DocumentFixture.get_minimal_multi_fixture(count=3) alt_payload = copy.deepcopy(payload) bucket_name = test_utils.rand_name('bucket') alt_bucket_name = test_utils.rand_name('bucket') # Create a bucket with 3 documents. documents = self.create_documents(bucket_name, payload) revision_id = documents[0]['revision_id'] # Modify all 3 documents from first bucket. for idx in range(3): alt_payload[idx]['name'] = test_utils.rand_name('name') alt_payload[idx]['schema'] = test_utils.rand_name('schema') self.create_documents( alt_bucket_name, alt_payload) # Modify the document from the 1st bucket. payload[0]['data'] = {'modified': 'modified'} documents = self.create_documents(bucket_name, payload) comparison_revision_id = documents[0]['revision_id'] # The alt_bucket_name should be created. self._verify_buckets_status( revision_id, comparison_revision_id, {bucket_name: 'modified', alt_bucket_name: 'created'})
att-comdev/deckhand
[ 9, 5, 9, 5, 1497626943 ]
def test_revision_diff_delete_then_recreate(self): payload = base.DocumentFixture.get_minimal_fixture() bucket_name = test_utils.rand_name('bucket') created_documents = self.create_documents(bucket_name, payload) revision_id_1 = created_documents[0]['revision_id'] # Delete the previously created document. deleted_documents = self.create_documents(bucket_name, []) revision_id_2 = deleted_documents[0]['revision_id'] # Recreate the previously deleted document. recreated_documents = self.create_documents(bucket_name, payload) revision_id_3 = recreated_documents[0]['revision_id'] # Verify that the revision for recreated document compared to revision # for deleted document is created, ignoring order. self._verify_buckets_status( revision_id_2, revision_id_3, {bucket_name: 'created'}) # Verify that the revision for recreated document compared to revision # for created document is unmodified, ignoring order. self._verify_buckets_status( revision_id_1, revision_id_3, {bucket_name: 'unmodified'})
att-comdev/deckhand
[ 9, 5, 9, 5, 1497626943 ]
def forwards(self, orm): # Adding field 'BadgeByCourse.title_en' db.add_column('badges_badgebycourse', 'title_en', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True), keep_default=False) # Adding field 'BadgeByCourse.title_es' db.add_column('badges_badgebycourse', 'title_es', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True), keep_default=False) # Adding field 'BadgeByCourse.title_it' db.add_column('badges_badgebycourse', 'title_it', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True), keep_default=False) # Adding field 'BadgeByCourse.title_pt' db.add_column('badges_badgebycourse', 'title_pt', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True), keep_default=False) # Adding field 'BadgeByCourse.title_fr' db.add_column('badges_badgebycourse', 'title_fr', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True), keep_default=False) # Adding field 'BadgeByCourse.title_de' db.add_column('badges_badgebycourse', 'title_de', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True), keep_default=False) # Adding field 'BadgeByCourse.description_en' db.add_column('badges_badgebycourse', 'description_en', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False) # Adding field 'BadgeByCourse.description_es' db.add_column('badges_badgebycourse', 'description_es', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False) # Adding field 'BadgeByCourse.description_it' db.add_column('badges_badgebycourse', 'description_it', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False) # Adding field 'BadgeByCourse.description_pt' db.add_column('badges_badgebycourse', 'description_pt', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False) # Adding field 'BadgeByCourse.description_fr' db.add_column('badges_badgebycourse', 'description_fr', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False) # Adding field 'BadgeByCourse.description_de' db.add_column('badges_badgebycourse', 'description_de', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
GeographicaGS/moocng
[ 2, 1, 2, 12, 1409568699 ]
def main(): return 'main method'
GoogleCloudPlatform/repo-automation-playground
[ 5, 15, 5, 28, 1562866376 ]
def not_main(): return 'not main'
GoogleCloudPlatform/repo-automation-playground
[ 5, 15, 5, 28, 1562866376 ]
def also_not_main(): return 'also_not main'
GoogleCloudPlatform/repo-automation-playground
[ 5, 15, 5, 28, 1562866376 ]
def untested_method(): return 'untested!'
GoogleCloudPlatform/repo-automation-playground
[ 5, 15, 5, 28, 1562866376 ]
def __init__(self, representation_names, sample_freq, update_freq): """Constructs a SelectionStrategy object. Args: representation_names: A list of representations names for tf.summary. sample_freq: Frequency to draw a new selection (in steps). update_freq: Frequency to update the selector's state (in epochs). """ self.num_representations = len(representation_names) self.representation_names = representation_names self.sample_freq = sample_freq self.update_freq = update_freq # index of the selected representation self.current_selection = tf.Variable(0, trainable=False) self.last_selection_step = tf.Variable(-1, trainable=False) self.last_update_epoch = tf.Variable(0, trainable=False) self.selection_counter = tf.Variable([0] * self.num_representations)
tensorflow/neural-structured-learning
[ 963, 192, 963, 1, 1566942496 ]
def should_update(self, epoch): """Returns whether the strategy should update its state at this epoch.""" return epoch - self.last_update_epoch >= self.update_freq
tensorflow/neural-structured-learning
[ 963, 192, 963, 1, 1566942496 ]
def _select(self): raise NotImplementedError
tensorflow/neural-structured-learning
[ 963, 192, 963, 1, 1566942496 ]
def _select(self): # No needs to reselect since this strategy is deterministic. return self.current_selection.numpy()
tensorflow/neural-structured-learning
[ 963, 192, 963, 1, 1566942496 ]
def _select(self): return (self.current_selection + 1) % self.num_representations
tensorflow/neural-structured-learning
[ 963, 192, 963, 1, 1566942496 ]
def eta_scheduler(epoch, values=(0.1,), breakpoints=()): """Piecewise constant schedule for eta (selector weight learning rate).""" idx = sum(1 if epoch > b else 0 for b in breakpoints) return values[idx]
tensorflow/neural-structured-learning
[ 963, 192, 963, 1, 1566942496 ]
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Weights of each representation. Each selection is a sample drawn # proportionally to the weights. # TODO(csferng): Store the weights in logit space. self.weights = tf.Variable(tf.ones(self.num_representations)) self.current_selection.assign(self._select())
tensorflow/neural-structured-learning
[ 963, 192, 963, 1, 1566942496 ]
def _update(self, epoch, validation_losses): validation_losses = tf.convert_to_tensor(validation_losses) eta = eta_scheduler(epoch) self.weights.assign(self.weights * tf.math.exp(eta * validation_losses)) for i in range(self.num_representations): tf.summary.scalar( f"representations/weight/{self.representation_names[i]}", self.weights[i], step=epoch)
tensorflow/neural-structured-learning
[ 963, 192, 963, 1, 1566942496 ]
def create_or_update_chain(pid, sid, obsoletes_pid, obsoleted_by_pid): chain_model = _get_chain_by_pid(pid) if chain_model: _set_chain_sid(chain_model, sid) else: _add_sciobj(pid, sid, obsoletes_pid, obsoleted_by_pid) _update_sid_to_last_existing_pid_map(pid)
DataONEorg/d1_python
[ 13, 6, 13, 17, 1464710460 ]
def cut_from_chain(sciobj_model): """Remove an object from a revision chain. The object can be at any location in the chain, including the head or tail. Preconditions: - The object with the pid is verified to exist and to be a member of an revision chain. E.g., with: d1_gmn.app.views.asserts.is_existing_object(pid) d1_gmn.app.views.asserts.is_in_revision_chain(pid) Postconditions: - The given object is a standalone object with empty obsoletes, obsoletedBy and seriesId fields. - The previously adjacent objects in the chain are adjusted to close any gap that was created or remove dangling reference at the head or tail. - If the object was the last object in the chain and the chain has a SID, the SID reference is shifted over to the new last object in the chain. """ if _is_head(sciobj_model): old_pid = sciobj_model.obsoletes.did _cut_head_from_chain(sciobj_model) elif _is_tail(sciobj_model): old_pid = sciobj_model.obsoleted_by.did _cut_tail_from_chain(sciobj_model) else: old_pid = sciobj_model.obsoleted_by.did _cut_embedded_from_chain(sciobj_model) _update_sid_to_last_existing_pid_map(old_pid)
DataONEorg/d1_python
[ 13, 6, 13, 17, 1464710460 ]
def resolve_sid(sid): """Get the PID to which the ``sid`` currently maps. Preconditions: - ``sid`` is verified to exist. E.g., with d1_gmn.app.views.asserts.is_sid(). """ return d1_gmn.app.models.Chain.objects.get(sid__did=sid).head_pid.did
DataONEorg/d1_python
[ 13, 6, 13, 17, 1464710460 ]
def set_revision_links(sciobj_model, obsoletes_pid=None, obsoleted_by_pid=None): if obsoletes_pid: sciobj_model.obsoletes = d1_gmn.app.did.get_or_create_did(obsoletes_pid) _set_revision_reverse(sciobj_model.pid.did, obsoletes_pid, is_obsoletes=False) if obsoleted_by_pid: sciobj_model.obsoleted_by = d1_gmn.app.did.get_or_create_did(obsoleted_by_pid) _set_revision_reverse(sciobj_model.pid.did, obsoleted_by_pid, is_obsoletes=True) sciobj_model.save()
DataONEorg/d1_python
[ 13, 6, 13, 17, 1464710460 ]
def is_obsoleted_by_pid(pid): """Return True if ``pid`` is referenced in the obsoletedBy field of any object. This will return True even if the PID is in the obsoletes field of an object that does not exist on the local MN, such as replica that is in an incomplete chain. """ return d1_gmn.app.models.ScienceObject.objects.filter( obsoleted_by__did=pid ).exists()
DataONEorg/d1_python
[ 13, 6, 13, 17, 1464710460 ]
def _add_sciobj(pid, sid, obsoletes_pid, obsoleted_by_pid): is_added = _add_to_chain(pid, sid, obsoletes_pid, obsoleted_by_pid) if not is_added: # if not obsoletes_pid and not obsoleted_by_pid: _add_standalone(pid, sid) # else:
DataONEorg/d1_python
[ 13, 6, 13, 17, 1464710460 ]
def _add_to_chain(pid, sid, obsoletes_pid, obsoleted_by_pid): _assert_sid_is_in_chain(sid, obsoletes_pid) _assert_sid_is_in_chain(sid, obsoleted_by_pid) obsoletes_chain_model = _get_chain_by_pid(obsoletes_pid) obsoleted_by_chain_model = _get_chain_by_pid(obsoleted_by_pid) sid_chain_model = _get_chain_by_sid(sid) if sid else None chain_model = obsoletes_chain_model or obsoleted_by_chain_model or sid_chain_model if not chain_model: return False if obsoletes_chain_model and obsoletes_chain_model != chain_model: _merge_chains(chain_model, obsoletes_chain_model) if obsoleted_by_chain_model and obsoleted_by_chain_model != chain_model: _merge_chains(chain_model, obsoleted_by_chain_model) _add_pid_to_chain(chain_model, pid) _set_chain_sid(chain_model, sid) return True
DataONEorg/d1_python
[ 13, 6, 13, 17, 1464710460 ]
def _add_pid_to_chain(chain_model, pid): chain_member_model = d1_gmn.app.models.ChainMember( chain=chain_model, pid=d1_gmn.app.did.get_or_create_did(pid) ) chain_member_model.save()
DataONEorg/d1_python
[ 13, 6, 13, 17, 1464710460 ]