function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
sequence
def _get_subnet(self, context, subnet_id, fields=None): subnet = self._get_resource('subnet', context, subnet_id, fields) return self._make_subnet_dict(subnet)
cloudwatt/contrail-neutron-plugin
[ 1, 2, 1, 1, 1401891835 ]
def update_subnet(self, context, subnet_id, subnet): """Updates the attributes of a particular subnet.""" subnet = self._update_resource('subnet', context, subnet_id, subnet) return self._make_subnet_dict(subnet)
cloudwatt/contrail-neutron-plugin
[ 1, 2, 1, 1, 1401891835 ]
def get_subnets(self, context, filters=None, fields=None): """Get the list of subnets.""" return [self._make_subnet_dict(s) for s in self._list_resource( 'subnet', context, filters, fields)]
cloudwatt/contrail-neutron-plugin
[ 1, 2, 1, 1, 1401891835 ]
def _extend_port_dict_security_group(self, port_res, port_db): # Security group bindings will be retrieved from the sqlalchemy # model. As they're loaded eagerly with ports because of the # joined load they will not cause an extra query. port_res[securitygroup.SECURITYGROUPS] = port_db.get( 'security_groups', []) or [] return port_res
cloudwatt/contrail-neutron-plugin
[ 1, 2, 1, 1, 1401891835 ]
def _get_port(self, context, id, fields=None): port = self._get_resource('port', context, id, fields) return self._make_port_dict(port)
cloudwatt/contrail-neutron-plugin
[ 1, 2, 1, 1, 1401891835 ]
def create_port(self, context, port): """Creates a port on the specified Virtual Network.""" port = self._create_resource('port', context, port) return self._make_port_dict(port)
cloudwatt/contrail-neutron-plugin
[ 1, 2, 1, 1, 1401891835 ]
def update_port(self, context, port_id, port): """Updates a port. Updates the attributes of a port on the specified Virtual Network. """ if 'fixed_ips' in port['port']: original = self._get_port(context, port_id) added_ips, prev_ips = self._update_ips_for_port( context, original['network_id'], port_id, original['fixed_ips'], port['port']['fixed_ips']) port['port']['fixed_ips'] = prev_ips + added_ips port = self._update_resource('port', context, port_id, port) return self._make_port_dict(port)
cloudwatt/contrail-neutron-plugin
[ 1, 2, 1, 1, 1401891835 ]
def get_ports(self, context, filters=None, fields=None): """Get all ports. Retrieves all port identifiers belonging to the specified Virtual Network with the specfied filter. """ return [self._make_port_dict(p) for p in self._list_resource('port', context, filters, fields)]
cloudwatt/contrail-neutron-plugin
[ 1, 2, 1, 1, 1401891835 ]
def create_router(self, context, router): """Creates a router. Creates a new Logical Router, and assigns it a symbolic name. """ return self._create_resource('router', context, router)
cloudwatt/contrail-neutron-plugin
[ 1, 2, 1, 1, 1401891835 ]
def update_router(self, context, router_id, router): """Updates the attributes of a router.""" return self._update_resource('router', context, router_id, router)
cloudwatt/contrail-neutron-plugin
[ 1, 2, 1, 1, 1401891835 ]
def get_routers(self, context, filters=None, fields=None): """Retrieves all router identifiers.""" return self._list_resource('router', context, filters, fields)
cloudwatt/contrail-neutron-plugin
[ 1, 2, 1, 1, 1401891835 ]
def add_router_interface(self, context, router_id, interface_info): """Add interface to a router.""" if not interface_info: msg = _("Either subnet_id or port_id must be specified") raise exc.BadRequest(resource='router', msg=msg) if 'port_id' in interface_info: if 'subnet_id' in interface_info: msg = _("Cannot specify both subnet-id and port-id") raise exc.BadRequest(resource='router', msg=msg) res_dict = self._encode_resource(resource_id=router_id, resource=interface_info) status_code, res_info = self._request_backend(context, res_dict, 'router', 'ADDINTERFACE') if status_code != requests.codes.ok: self._raise_contrail_error(status_code, info=res_info, obj_name='add_router_interface') return res_info
cloudwatt/contrail-neutron-plugin
[ 1, 2, 1, 1, 1401891835 ]
def create_floatingip(self, context, floatingip): """Creates a floating IP.""" return self._create_resource('floatingip', context, floatingip)
cloudwatt/contrail-neutron-plugin
[ 1, 2, 1, 1, 1401891835 ]
def get_floatingip(self, context, fip_id, fields=None): """Get the attributes of a floating ip.""" return self._get_resource('floatingip', context, fip_id, fields)
cloudwatt/contrail-neutron-plugin
[ 1, 2, 1, 1, 1401891835 ]
def get_floatingips(self, context, filters=None, fields=None): """Retrieves all floating ips identifiers.""" return self._list_resource('floatingip', context, filters, fields)
cloudwatt/contrail-neutron-plugin
[ 1, 2, 1, 1, 1401891835 ]
def create_security_group(self, context, security_group): """Creates a Security Group.""" return self._create_resource('security_group', context, security_group)
cloudwatt/contrail-neutron-plugin
[ 1, 2, 1, 1, 1401891835 ]
def update_security_group(self, context, sg_id, security_group): """Updates the attributes of a security group.""" return self._update_resource('security_group', context, sg_id, security_group)
cloudwatt/contrail-neutron-plugin
[ 1, 2, 1, 1, 1401891835 ]
def get_security_groups(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """Retrieves all security group identifiers.""" return self._list_resource('security_group', context, filters, fields)
cloudwatt/contrail-neutron-plugin
[ 1, 2, 1, 1, 1401891835 ]
def get_security_group_rules_count(self, context, filters=None): return 0
cloudwatt/contrail-neutron-plugin
[ 1, 2, 1, 1, 1401891835 ]
def delete_security_group_rule(self, context, sg_rule_id): """Deletes a security group rule.""" self._delete_resource('security_group_rule', context, sg_rule_id)
cloudwatt/contrail-neutron-plugin
[ 1, 2, 1, 1, 1401891835 ]
def __init__(self): """ Default initialization """
PanDAWMS/pilot
[ 12, 19, 12, 1, 1398709467 ]
def __new__(cls, *args, **kwargs): """ Override the __new__ method to make the class a singleton """ if not cls.__instance: cls.__instance = super(ATLASExperiment, cls).__new__(cls, *args, **kwargs) return cls.__instance
PanDAWMS/pilot
[ 12, 19, 12, 1, 1398709467 ]
def setParameters(self, *args, **kwargs): """ Set any internally needed variables """ # set initial values self.__job = kwargs.get('job', None) if self.__job: self.__analysisJob = isAnalysisJob(self.__job.trf) else: self.__warning = "setParameters found no job object"
PanDAWMS/pilot
[ 12, 19, 12, 1, 1398709467 ]
def willDoFileLookups(self): """ Should (LFC) file lookups be done by the pilot or not? """ return False
PanDAWMS/pilot
[ 12, 19, 12, 1, 1398709467 ]
def setupNordugridTrf(self, job, analysisJob, wgetCommand, pilot_initdir): """ perform the Nordugrid trf setup """ error = PilotErrors() pilotErrorDiag = "" cmd = "" # assume that the runtime script has already been created if not os.environ.has_key('RUNTIME_CONFIG_DIR'): pilotErrorDiag = "Environment variable not set: RUNTIME_CONFIG_DIR" tolog("!!FAILED!!3000!! %s" % (pilotErrorDiag)) return error.ERR_SETUPFAILURE, pilotErrorDiag, "" runtime_script = "%s/APPS/HEP/ATLAS-%s" % (os.environ['RUNTIME_CONFIG_DIR'], job.release) if os.path.exists(runtime_script): cmd = ". %s 1" % (runtime_script) if analysisJob: # try to download the analysis trf status, pilotErrorDiag, trfName = self.getAnalysisTrf(wgetCommand, job.trf, pilot_initdir) if status != 0: return status, pilotErrorDiag, "" trfName = "./" + trfName else: trfName = job.trf cmd += '; export ATLAS_RELEASE=%s;export AtlasVersion=%s;export AtlasPatchVersion=%s' % (job.homePackage.split('/')[-1],job.homePackage.split('/')[-1],job.homePackage.split('/')[-1]) cmd += "; %s %s" % (trfName, job.jobPars) elif verifyReleaseString(job.release) == "NULL": if analysisJob: # try to download the analysis trf status, pilotErrorDiag, trfName = self.getAnalysisTrf(wgetCommand, job.trf, pilot_initdir) if status != 0: return status, pilotErrorDiag, "" trfName = "./" + trfName else: trfName = job.trf cmd = "%s %s" % (trfName, job.jobPars) else: pilotErrorDiag = "Could not locate runtime script: %s" % (runtime_script) tolog("!!FAILED!!3000!! %s" % (pilotErrorDiag)) return error.ERR_SETUPFAILURE, pilotErrorDiag, "" # correct for multi-core if necessary (especially important in case coreCount=1 to limit parallel make) cmd = self.addMAKEFLAGS(job.coreCount, "") + cmd return 0, pilotErrorDiag, cmd
PanDAWMS/pilot
[ 12, 19, 12, 1, 1398709467 ]
def getReleaseObsolete(self, release): """ Return a list of the software release id's """ # Assuming 'release' is a string that separates release id's with '\n' # Used in the case of payload using multiple steps with different release versions # E.g. release = "19.0.0\n19.1.0" -> ['19.0.0', '19.1.0'] if os.environ.has_key('Nordugrid_pilot') and os.environ.has_key('ATLAS_RELEASE'): return os.environ['ATLAS_RELEASE'].split(",") else: return release.split("\n")
PanDAWMS/pilot
[ 12, 19, 12, 1, 1398709467 ]
def shouldExecuteUtility(self): """ Determine where a memory utility monitor should be executed """ # The RunJob class has the possibility to execute a memory utility monitor that can track the memory usage # of the payload. The monitor is executed if this method returns True. The monitor is expected to produce # a summary JSON file whose name is defined by the getMemoryMonitorJSONFilename() method. The contents of # this file (ie. the full JSON dictionary) will be added to the jobMetrics at the end of the job (see # PandaServerClient class). return True
PanDAWMS/pilot
[ 12, 19, 12, 1, 1398709467 ]
def getUtilityJSONFilename(self): """ Return the filename of the memory monitor JSON file """ # For explanation, see shouldExecuteUtility() return "memory_monitor_summary.json"
PanDAWMS/pilot
[ 12, 19, 12, 1, 1398709467 ]
def trimTrfName(self, trfName): """ Remove any unwanted strings from the trfName """ if "/" in trfName: trfName = os.path.basename(trfName) return trfName
PanDAWMS/pilot
[ 12, 19, 12, 1, 1398709467 ]
def getUtilityCommand(self, **argdict): """ Prepare a utility command string """ # This method can be used to prepare a setup string for an optional utility tool, e.g. a memory monitor, # that will be executed by the pilot in parallel with the payload. # The pilot will look for an output JSON file (summary.json) and will extract pre-determined fields # from it and report them with the job updates. Currently the pilot expects to find fields related # to memory information. pid = argdict.get('pid', 0) summary = self.getUtilityJSONFilename() workdir = argdict.get('workdir', '.') interval = 60 default_release = "21.0.22" #"21.0.18" #"21.0.17" #"20.7.5" #"20.1.5" # default_patch_release = "20.7.5.8" #"20.1.5.2" #"20.1.4.1" # default_cmtconfig = "x86_64-slc6-gcc49-opt" default_cmtconfig = "x86_64-slc6-gcc62-opt" # default_swbase = "%s/atlas.cern.ch/repo/sw/software" % (self.getCVMFSPath()) default_swbase = "%s/atlas.cern.ch/repo" % (self.getCVMFSPath()) default_setup = self.getModernASetup() + " Athena," + default_release + " --platform " + default_cmtconfig tolog("Will use default (fallback) setup for MemoryMonitor") cmd = default_setup # Now add the MemoryMonitor command cmd += "; MemoryMonitor --pid %d --filename %s --json-summary %s --interval %d" % (pid, self.getUtilityOutputFilename(), summary, interval) cmd = "cd " + workdir + ";" + cmd return cmd
PanDAWMS/pilot
[ 12, 19, 12, 1, 1398709467 ]
def __init__(self,dbuser,dbpass): super(DataCollector, self).__init__() self.dbuser = dbuser self.dbpass = dbpass
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def dbConnect(self): db = MySQLdb.connect(user=self.dbuser,passwd=self.dbpass,db="twitter_bookmarks",use_unicode=True,charset="utf8") cursor = db.cursor() return cursor
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def __init__(self,dbuser,dbpass): super(RawDataCollector, self).__init__() self.dbuser = dbuser self.dbpass = dbpass
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def dbConnect(self): db = MySQLdb.connect(user=self.dbuser,passwd=self.dbpass,db="twitter_bookmarks",use_unicode=True,charset="utf8") cursor = db.cursor() return cursor
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def NeedsInit(): return not _binary_manager
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def TemporarilyReplaceBinaryManager(manager): old_manager = GetBinaryManager() try: SetBinaryManager(manager) yield finally: SetBinaryManager(old_manager)
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def SetBinaryManager(manager): global _binary_manager # pylint: disable=global-statement _binary_manager = manager
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def FetchPath(binary_name, os_name, arch, os_version=None): """ Return a path to the appropriate executable for <binary_name>, downloading from cloud storage if needed, or None if it cannot be found. """ if GetBinaryManager() is None: raise exceptions.InitializationError( 'Called FetchPath with uninitialized binary manager.') return GetBinaryManager().FetchPath( binary_name, 'linux' if _IsChromeOSLocalMode(os_name) else os_name, arch, os_version)
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def FetchBinaryDependencies( platform, client_configs, fetch_reference_chrome_binary): """ Fetch all binary dependenencies for the given |platform|. Note: we don't fetch browser binaries by default because the size of the binary is about 2Gb, and it requires cloud storage permission to chrome-telemetry bucket. Args: platform: an instance of telemetry.core.platform client_configs: A list of paths (string) to dependencies json files. fetch_reference_chrome_binary: whether to fetch reference chrome binary for the given platform. """ configs = [ dependency_manager.BaseConfig(TELEMETRY_PROJECT_CONFIG), ] dep_manager = dependency_manager.DependencyManager(configs) os_name = platform.GetOSName() # If we're running directly on a Chrome OS device, fetch the binaries for # linux instead, which should be compatible with CrOS. Otherwise, if we're # running remotely on CrOS, fetch the binaries for the host platform like # we do with android below. if _IsChromeOSLocalMode(os_name): os_name = 'linux' target_platform = '%s_%s' % (os_name, platform.GetArchName()) dep_manager.PrefetchPaths(target_platform) host_platform = None fetch_devil_deps = False if os_name in ('android', 'chromeos'): host_platform = '%s_%s' % ( py_utils.GetHostOsName(), py_utils.GetHostArchName()) dep_manager.PrefetchPaths(host_platform) if os_name == 'android': if host_platform == 'linux_x86_64': fetch_devil_deps = True else: logging.error('Devil only supports 64 bit linux as a host platform. ' 'Android tests may fail.') if fetch_reference_chrome_binary: _FetchReferenceBrowserBinary(platform) # For now, handle client config separately because the BUILD.gn & .isolate of # telemetry tests in chromium src failed to include the files specified in its # client config. # (https://github.com/catapult-project/catapult/issues/2192) # For now this is ok because the client configs usually don't include cloud # storage infos. # TODO(crbug.com/1111556): remove the logic of swallowing exception once the # issue is fixed on Chromium side. if client_configs: manager = dependency_manager.DependencyManager( list(dependency_manager.BaseConfig(c) for c in client_configs)) try: manager.PrefetchPaths(target_platform) if host_platform is not None: manager.PrefetchPaths(host_platform) except dependency_manager.NoPathFoundError as e: logging.error('Error when trying to prefetch paths for %s: %s', target_platform, e) if fetch_devil_deps: devil_env.config.Initialize() devil_env.config.PrefetchPaths(arch=platform.GetArchName()) devil_env.config.PrefetchPaths()
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def _FetchReferenceBrowserBinary(platform): os_name = platform.GetOSName() if _IsChromeOSLocalMode(os_name): os_name = 'linux' arch_name = platform.GetArchName() manager = binary_manager.BinaryManager( [CHROME_BINARY_CONFIG]) if os_name == 'android': os_version = dependency_util.GetChromeApkOsVersion( platform.GetOSVersionName()) manager.FetchPath( 'chrome_stable', os_name, arch_name, os_version) else: manager.FetchPath( 'chrome_stable', os_name, arch_name)
catapult-project/catapult
[ 1835, 570, 1835, 1039, 1429033745 ]
def __init__(self, pattern, response_dict): """ Configure XQueue stub to POST `response_dict` (a dictionary) back to the LMS when it receives a submission that contains the string `pattern`. Remember that there is one XQueue stub shared by all the tests; if possible, you should have tests use unique queue names to avoid conflict between tests running in parallel. """ self._pattern = pattern self._response_dict = response_dict
edx-solutions/edx-platform
[ 12, 19, 12, 9, 1391522577 ]
def config(self): settings = { # Use SYSTEMMPI since openfoam-org doesn't have USERMPI 'mplib': 'SYSTEMMPI', # Add links into bin/, lib/ (eg, for other applications) 'link': False, } # OpenFOAM v2.4 and earlier lacks WM_LABEL_OPTION if self.spec.satisfies('@:2.4'): settings['label-size'] = False return settings
LLNL/spack
[ 3244, 1839, 3244, 2847, 1389172932 ]
def setup_dependent_build_environment(self, env, dependent_spec): """Location of the OpenFOAM project directory. This is identical to the WM_PROJECT_DIR value, but we avoid that variable since it would mask the normal OpenFOAM cleanup of previous versions. """ env.set('FOAM_PROJECT_DIR', self.projectdir)
LLNL/spack
[ 3244, 1839, 3244, 2847, 1389172932 ]
def projectdir(self): """Absolute location of project directory: WM_PROJECT_DIR/""" return self.prefix # <- install directly under prefix
LLNL/spack
[ 3244, 1839, 3244, 2847, 1389172932 ]
def foam_arch(self): if not self._foam_arch: self._foam_arch = OpenfoamOrgArch(self.spec, **self.config) return self._foam_arch
LLNL/spack
[ 3244, 1839, 3244, 2847, 1389172932 ]
def archbin(self): """Relative location of architecture-specific executables""" return join_path('platforms', self.foam_arch, 'bin')
LLNL/spack
[ 3244, 1839, 3244, 2847, 1389172932 ]
def archlib(self): """Relative location of architecture-specific libraries""" return join_path('platforms', self.foam_arch, 'lib')
LLNL/spack
[ 3244, 1839, 3244, 2847, 1389172932 ]
def patch(self): """Adjust OpenFOAM build for spack. Where needed, apply filter as an alternative to normal patching.""" self.rename_source() add_extra_files(self, self.common, self.assets) # Avoid WM_PROJECT_INST_DIR for ThirdParty, site or jobControl. # Use openfoam-site.patch to handle jobControl, site. # # Filtering: bashrc,cshrc (using a patch is less flexible) edits = { 'WM_THIRD_PARTY_DIR': r'$WM_PROJECT_DIR/ThirdParty #SPACK: No separate third-party', 'WM_VERSION': str(self.version), # consistency 'FOAMY_HEX_MESH': '', # This is horrible (unset variable?) } rewrite_environ_files( # Adjust etc/bashrc and etc/cshrc edits, posix=join_path('etc', 'bashrc'), cshell=join_path('etc', 'cshrc'))
LLNL/spack
[ 3244, 1839, 3244, 2847, 1389172932 ]
def build(self, spec, prefix): """Build using the OpenFOAM Allwmake script, with a wrapper to source its environment first. Only build if the compiler is known to be supported. """ self.foam_arch.has_rule(self.stage.source_path) self.foam_arch.create_rules(self.stage.source_path, self) args = [] if self.parallel: # Build in parallel? - pass via the environment os.environ['WM_NCOMPPROCS'] = str(make_jobs) builder = Executable(self.build_script) builder(*args)
LLNL/spack
[ 3244, 1839, 3244, 2847, 1389172932 ]
def install_links(self): """Add symlinks into bin/, lib/ (eg, for other applications)""" # Make build log visible - it contains OpenFOAM-specific information with working_dir(self.projectdir): os.symlink( join_path(os.path.relpath(self.install_log_path)), join_path('log.' + str(self.foam_arch))) if not self.config['link']: return # ln -s platforms/linux64GccXXX/lib lib with working_dir(self.projectdir): if os.path.isdir(self.archlib): os.symlink(self.archlib, 'lib') # (cd bin && ln -s ../platforms/linux64GccXXX/bin/* .) with working_dir(join_path(self.projectdir, 'bin')): for f in [ f for f in glob.glob(join_path('..', self.archbin, "*")) if os.path.isfile(f) ]: os.symlink(f, os.path.basename(f))
LLNL/spack
[ 3244, 1839, 3244, 2847, 1389172932 ]
def is_on(self) -> bool: """Return true if switch is on.""" return self._api.is_on
tchellomello/home-assistant
[ 7, 1, 7, 6, 1467778429 ]
def init(self, switch_configuration, terminal_controller, logger, piping_processor, *args): super(ConfigVlanCommandProcessor, self).init(switch_configuration, terminal_controller, logger, piping_processor) self.vlan = args[0]
internap/fake-switches
[ 53, 37, 53, 11, 1440447597 ]
def do_name(self, *args): self.vlan.name = (args[0][:32])
internap/fake-switches
[ 53, 37, 53, 11, 1440447597 ]
def add_instructor(course_key, requesting_user, new_instructor): """ Adds given user as instructor and staff to the given course, after verifying that the requesting_user has permission to do so. """ # can't use auth.add_users here b/c it requires user to already have Instructor perms in this course CourseInstructorRole(course_key).add_users(new_instructor) auth.add_users(requesting_user, CourseStaffRole(course_key), new_instructor)
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def remove_all_instructors(course_key): """ Removes all instructor and staff users from the given course. """ staff_role = CourseStaffRole(course_key) staff_role.remove_users(*staff_role.users_with_role()) instructor_role = CourseInstructorRole(course_key) instructor_role.remove_users(*instructor_role.users_with_role())
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def _delete_course_from_modulestore(course_key, user_id): """ Delete course from MongoDB. Deleting course will fire a signal which will result into deletion of the courseware associated with a course_key. """ module_store = modulestore() with module_store.bulk_operations(course_key): module_store.delete_course(course_key, user_id)
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def get_lms_link_for_item(location, preview=False): """ Returns an LMS link to the course with a jump_to to the provided location. :param location: the location to jump to :param preview: True if the preview version of LMS should be returned. Default value is false. """ assert isinstance(location, UsageKey) # checks LMS_BASE value in site configuration for the given course_org_filter(org) # if not found returns settings.LMS_BASE lms_base = SiteConfiguration.get_value_for_org( location.org, "LMS_BASE", settings.LMS_BASE ) if lms_base is None: return None if preview: # checks PREVIEW_LMS_BASE value in site configuration for the given course_org_filter(org) # if not found returns settings.FEATURES.get('PREVIEW_LMS_BASE') lms_base = SiteConfiguration.get_value_for_org( location.org, "PREVIEW_LMS_BASE", settings.FEATURES.get('PREVIEW_LMS_BASE') ) return u"//{lms_base}/courses/{course_key}/jump_to/{location}".format( lms_base=lms_base, course_key=text_type(location.course_key), location=text_type(location), )
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def get_lms_link_for_certificate_web_view(user_id, course_key, mode): """ Returns the url to the certificate web view. """ assert isinstance(course_key, CourseKey) # checks LMS_BASE value in SiteConfiguration against course_org_filter if not found returns settings.LMS_BASE lms_base = SiteConfiguration.get_value_for_org(course_key.org, "LMS_BASE", settings.LMS_BASE) if lms_base is None: return None return u"//{certificate_web_base}/certificates/user/{user_id}/course/{course_id}?preview={mode}".format( certificate_web_base=lms_base, user_id=user_id, course_id=unicode(course_key), mode=mode )
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def is_currently_visible_to_students(xblock): """ Returns true if there is a published version of the xblock that is currently visible to students. This means that it has a release date in the past, and the xblock has not been set to staff only. """ try: published = modulestore().get_item(xblock.location, revision=ModuleStoreEnum.RevisionOption.published_only) # If there's no published version then the xblock is clearly not visible except ItemNotFoundError: return False # If visible_to_staff_only is True, this xblock is not visible to students regardless of start date. if published.visible_to_staff_only: return False # Check start date if 'detached' not in published._class_tags and published.start is not None: return datetime.now(UTC) > published.start # No start date, so it's always visible return True
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def is_visible_to_specific_partition_groups(xblock): """ Returns True if this xblock has visibility limited to specific user partition groups. """ if not xblock.group_access: return False for partition in get_user_partition_info(xblock): if any(g["selected"] for g in partition["groups"]): return True return False
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def find_staff_lock_source(xblock): """ Returns the xblock responsible for setting this xblock's staff lock, or None if the xblock is not staff locked. If this xblock is explicitly locked, return it, otherwise find the ancestor which sets this xblock's staff lock. """ # Stop searching if this xblock has explicitly set its own staff lock if xblock.fields['visible_to_staff_only'].is_set_on(xblock): return xblock # Stop searching at the section level if xblock.category == 'chapter': return None parent_location = modulestore().get_parent_location(xblock.location, revision=ModuleStoreEnum.RevisionOption.draft_preferred) # Orphaned xblocks set their own staff lock if not parent_location: return None parent = modulestore().get_item(parent_location) return find_staff_lock_source(parent)
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def reverse_url(handler_name, key_name=None, key_value=None, kwargs=None): """ Creates the URL for the given handler. The optional key_name and key_value are passed in as kwargs to the handler. """ kwargs_for_reverse = {key_name: unicode(key_value)} if key_name else None if kwargs: kwargs_for_reverse.update(kwargs) return reverse(handler_name, kwargs=kwargs_for_reverse)
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def reverse_library_url(handler_name, library_key, kwargs=None): """ Creates the URL for handlers that use library_keys as URL parameters. """ return reverse_url(handler_name, 'library_key_string', library_key, kwargs)
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def get_split_group_display_name(xblock, course): """ Returns group name if an xblock is found in user partition groups that are suitable for the split_test module. Arguments: xblock (XBlock): The courseware component. course (XBlock): The course descriptor. Returns: group name (String): Group name of the matching group xblock. """ for user_partition in get_user_partition_info(xblock, schemes=['random'], course=course): for group in user_partition['groups']: if 'Group ID {group_id}'.format(group_id=group['id']) == xblock.display_name_with_default: return group['name']
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def get_visibility_partition_info(xblock, course=None): """ Retrieve user partition information for the component visibility editor. This pre-processes partition information to simplify the template. Arguments: xblock (XBlock): The component being edited. course (XBlock): The course descriptor. If provided, uses this to look up the user partitions instead of loading the course. This is useful if we're calling this function multiple times for the same course want to minimize queries to the modulestore. Returns: dict """ selectable_partitions = [] # We wish to display enrollment partitions before cohort partitions. enrollment_user_partitions = get_user_partition_info(xblock, schemes=["enrollment_track"], course=course) # For enrollment partitions, we only show them if there is a selected group or # or if the number of groups > 1. for partition in enrollment_user_partitions: if len(partition["groups"]) > 1 or any(group["selected"] for group in partition["groups"]): selectable_partitions.append(partition) # Now add the cohort user partitions. selectable_partitions = selectable_partitions + get_user_partition_info(xblock, schemes=["cohort"], course=course) # Find the first partition with a selected group. That will be the one initially enabled in the dialog # (if the course has only been added in Studio, only one partition should have a selected group). selected_partition_index = -1 # At the same time, build up all the selected groups as they are displayed in the dialog title. selected_groups_label = '' for index, partition in enumerate(selectable_partitions): for group in partition["groups"]: if group["selected"]: if len(selected_groups_label) == 0: selected_groups_label = group['name'] else: # Translators: This is building up a list of groups. It is marked for translation because of the # comma, which is used as a separator between each group. selected_groups_label = _('{previous_groups}, {current_group}').format( previous_groups=selected_groups_label, current_group=group['name'] ) if selected_partition_index == -1: selected_partition_index = index return { "selectable_partitions": selectable_partitions, "selected_partition_index": selected_partition_index, "selected_groups_label": selected_groups_label, }
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def ReadFileAsLines(filename): """Reads a file, removing blank lines and lines that start with #""" file = open(filename, "r") raw_lines = file.readlines() file.close() lines = [] for line in raw_lines: line = line.strip() if len(line) > 0 and not line.startswith("#"): lines.append(line) return lines
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def GetTestName(testName): replacements = {".test": "", ".": "_"} splitTestName = testName.split("/") cleanName = splitTestName[-2] + "_" + splitTestName[-1] for replaceKey in replacements: cleanName = cleanName.replace(replaceKey, replacements[replaceKey]) return cleanName
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def GenerateTestList(sourceFile, rootDir): tests = [] fileName, fileExtension = os.path.splitext(sourceFile) if fileExtension == ".run": lines = ReadFileAsLines(sourceFile) for line in lines: tests += GenerateTestList(os.path.join(os.path.dirname(sourceFile), line), rootDir) elif fileExtension == ".test": tests.append(os.path.relpath(os.path.realpath(sourceFile), rootDir).replace("\\", "/")) return tests
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def __init__(self, mainMenu, params=[]): # metadata info about the module, not modified during runtime self.info = { # name for the module that will appear in module menus 'Name': 'Get FileServers', # list of one or more authors for the module 'Author': ['@424f424f'], # more verbose multi-line description of the module 'Description': 'This module will list file servers', # True if the module needs to run in the background 'Background' : False, # File extension to save the file as 'OutputExtension' : "", # if the module needs administrative privileges 'NeedsAdmin' : False, # True if the method doesn't touch disk/is reasonably opsec safe 'OpsecSafe' : True, # the module language 'Language' : 'python', # the minimum language version needed 'MinLanguageVersion' : '2.6', # list of any references/other comments 'Comments': [''] } # any options needed by the module, settable during runtime self.options = { # format: # value_name : {description, required, default_value} 'Agent' : { # The 'Agent' option is the only one that MUST be in a module 'Description' : 'Agent to run on.', 'Required' : True, 'Value' : '' }, 'LDAPAddress' : { # The 'Agent' option is the only one that MUST be in a module 'Description' : 'LDAP IP/Hostname', 'Required' : True, 'Value' : '' }, 'BindDN' : { # The 'Agent' option is the only one that MUST be in a module 'Description' : '[email protected]', 'Required' : True, 'Value' : '' }, 'Password' : { # The 'Agent' option is the only one that MUST be in a module 'Description' : 'Password to connect to LDAP', 'Required' : False, 'Value' : '' } } # save off a copy of the mainMenu object to access external functionality # like listeners/agent handlers/etc. self.mainMenu = mainMenu # During instantiation, any settable option parameters # are passed as an object set to the module and the # options dictionary is automatically set. This is mostly # in case options are passed on the command line if params: for param in params: # parameter format is [Name, Value] option, value = param if option in self.options: self.options[option]['Value'] = value
EmpireProject/Empire
[ 6874, 2673, 6874, 101, 1438799157 ]
def _getAccessibleAttribute(attributeName): 'Get the accessible attribute.' if attributeName in globalAccessibleAttributeDictionary: return globalAccessibleAttributeDictionary[attributeName] return None
dob71/x2swn
[ 13, 8, 13, 5, 1345256205 ]
def line(valueString): 'Print line.' print(valueString) return valueString
dob71/x2swn
[ 13, 8, 13, 5, 1345256205 ]
def set_test_params(self): self.num_nodes = 4
litecoin-project/litecoin
[ 4159, 3032, 4159, 82, 1339561106 ]
def __init__(self, condition, completion_queue): self._condition = condition self._completion_queue = completion_queue self._due = collections.defaultdict(int) self._events = collections.defaultdict(list)
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def in_thread(): while True: event = self._completion_queue.poll() with self._condition: self._events[event.tag].append(event) self._due[event.tag] -= 1 self._condition.notify_all() if self._due[event.tag] <= 0: self._due.pop(event.tag) if not self._due: return
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def event_with_tag(self, tag): with self._condition: while True: if self._events[tag]: return self._events[tag].pop(0) else: self._condition.wait()
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def setUp(self): self.server_completion_queue = cygrpc.CompletionQueue() self.server = cygrpc.Server([(b'grpc.so_reuseport', 0)]) self.server.register_completion_queue(self.server_completion_queue) port = self.server.add_http2_port(b'[::]:0') self.server.start() self.channel = cygrpc.Channel('localhost:{}'.format(port).encode(), [], None) self._server_shutdown_tag = 'server_shutdown_tag' self.server_condition = threading.Condition() self.server_driver = QueueDriver(self.server_condition, self.server_completion_queue) with self.server_condition: self.server_driver.add_due({ self._server_shutdown_tag, }) self.client_condition = threading.Condition() self.client_completion_queue = cygrpc.CompletionQueue() self.client_driver = QueueDriver(self.client_condition, self.client_completion_queue)
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def no_courses(step): world.clear_courses() create_studio_user()
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def i_click_new_course(step): world.css_click('.new-course-button')
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def i_fill_in_a_new_course_information(step): fill_in_course_info()
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def i_create_course(step, name, org, number, run): fill_in_course_info(name=name, org=org, num=number, run=run)
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def i_create_a_course(step): create_a_course()
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def i_click_the_course_link_in_studio_home(step): # pylint: disable=invalid-name course_css = 'a.course-link' world.css_click(course_css)
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def i_see_error_about_length(step): assert world.css_has_text( '#course_creation_error', 'The combined length of the organization, course number, ' 'and course run fields cannot be more than 65 characters.' )
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def courseware_page_has_loaded_in_studio(step): course_title_css = 'span.course-title' assert world.is_css_present(course_title_css)
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def i_see_the_course_in_studio_home(step): course_css = 'h3.class-title' assert world.css_has_text(course_css, world.scenario_dict['COURSE'].display_name)
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def i_am_on_tab(step, tab_name): header_css = 'div.inner-wrapper h1' assert world.css_has_text(header_css, tab_name)
Stanford-Online/edx-platform
[ 41, 19, 41, 1, 1374606346 ]
def __init__(self): self.__elements = list()
cytec/SickRage
[ 17, 17, 17, 1, 1420778851 ]
def __iter__(self): for element in self.__elements: yield element while self._couldGrow(): newElements = self._grow() for element in newElements: yield element
cytec/SickRage
[ 17, 17, 17, 1, 1420778851 ]
def __fetchToIndex(self, index): while len(self.__elements) <= index and self._couldGrow(): self._grow()
cytec/SickRage
[ 17, 17, 17, 1, 1420778851 ]
def __init__(self, theList, theSlice): self.__list = theList self.__start = theSlice.start or 0 self.__stop = theSlice.stop self.__step = theSlice.step or 1
cytec/SickRage
[ 17, 17, 17, 1, 1420778851 ]
def __finished(self, index): return self.__stop is not None and index >= self.__stop
cytec/SickRage
[ 17, 17, 17, 1, 1420778851 ]
def __init__(self, contentClass, requester, firstUrl, firstParams, headers=None): PaginatedListBase.__init__(self) self.__requester = requester self.__contentClass = contentClass self.__firstUrl = firstUrl self.__firstParams = firstParams or () self.__nextUrl = firstUrl self.__nextParams = firstParams or {} self.__headers = headers if self.__requester.per_page != 30: self.__nextParams["per_page"] = self.__requester.per_page self._reversed = False self.__totalCount = None
cytec/SickRage
[ 17, 17, 17, 1, 1420778851 ]
def totalCount(self): if not self.__totalCount: self._grow() return self.__totalCount
cytec/SickRage
[ 17, 17, 17, 1, 1420778851 ]
def reversed(self): r = PaginatedList(self.__contentClass, self.__requester, self.__firstUrl, self.__firstParams) r.__reverse() return r
cytec/SickRage
[ 17, 17, 17, 1, 1420778851 ]
def _couldGrow(self): return self.__nextUrl is not None
cytec/SickRage
[ 17, 17, 17, 1, 1420778851 ]
def __parseLinkHeader(self, headers): links = {} if "link" in headers: linkHeaders = headers["link"].split(", ") for linkHeader in linkHeaders: (url, rel) = linkHeader.split("; ") url = url[1:-1] rel = rel[5:-1] links[rel] = url return links
cytec/SickRage
[ 17, 17, 17, 1, 1420778851 ]
def xhtml_escape(value): """Escapes a string so it is valid within HTML or XML. Escapes the characters ``<``, ``>``, ``"``, ``'``, and ``&``. When used in attribute values the escaped strings must be enclosed in quotes. .. versionchanged:: 3.2 Added the single quote to the list of escaped characters. """ return _XHTML_ESCAPE_RE.sub(lambda match: _XHTML_ESCAPE_DICT[match.group(0)], to_basestring(value))
unnikrishnankgs/va
[ 1, 5, 1, 10, 1496432585 ]
def json_encode(value): """JSON-encodes the given Python object.""" # JSON permits but does not require forward slashes to be escaped. # This is useful when json data is emitted in a <script> tag # in HTML, as it prevents </script> tags from prematurely terminating # the javascript. Some json libraries do this escaping by default, # although python's standard library does not, so we do it here. # http://stackoverflow.com/questions/1580647/json-why-are-forward-slashes-escaped return json.dumps(value).replace("</", "<\\/")
unnikrishnankgs/va
[ 1, 5, 1, 10, 1496432585 ]
def squeeze(value): """Replace all sequences of whitespace chars with a single space.""" return re.sub(r"[\x00-\x20]+", " ", value).strip()
unnikrishnankgs/va
[ 1, 5, 1, 10, 1496432585 ]
def url_unescape(value, encoding='utf-8', plus=True): """Decodes the given value from a URL. The argument may be either a byte or unicode string. If encoding is None, the result will be a byte string. Otherwise, the result is a unicode string in the specified encoding. If ``plus`` is true (the default), plus signs will be interpreted as spaces (literal plus signs must be represented as "%2B"). This is appropriate for query strings and form-encoded values but not for the path component of a URL. Note that this default is the reverse of Python's urllib module. .. versionadded:: 3.1 The ``plus`` argument """ unquote = (urllib_parse.unquote_plus if plus else urllib_parse.unquote) if encoding is None: return unquote(utf8(value)) else: return unicode_type(unquote(utf8(value)), encoding)
unnikrishnankgs/va
[ 1, 5, 1, 10, 1496432585 ]