function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
sequence
def source(self): """Gets the source of this EducationSummaryV30. # noqa: E501 :return: The source of this EducationSummaryV30. # noqa: E501 :rtype: SourceV30 """ return self._source
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def source(self, source): """Sets the source of this EducationSummaryV30. :param source: The source of this EducationSummaryV30. # noqa: E501 :type: SourceV30 """ self._source = source
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def put_code(self): """Gets the put_code of this EducationSummaryV30. # noqa: E501 :return: The put_code of this EducationSummaryV30. # noqa: E501 :rtype: int """ return self._put_code
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def put_code(self, put_code): """Sets the put_code of this EducationSummaryV30. :param put_code: The put_code of this EducationSummaryV30. # noqa: E501 :type: int """ self._put_code = put_code
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def department_name(self): """Gets the department_name of this EducationSummaryV30. # noqa: E501 :return: The department_name of this EducationSummaryV30. # noqa: E501 :rtype: str """ return self._department_name
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def department_name(self, department_name): """Sets the department_name of this EducationSummaryV30. :param department_name: The department_name of this EducationSummaryV30. # noqa: E501 :type: str """ self._department_name = department_name
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def role_title(self): """Gets the role_title of this EducationSummaryV30. # noqa: E501 :return: The role_title of this EducationSummaryV30. # noqa: E501 :rtype: str """ return self._role_title
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def role_title(self, role_title): """Sets the role_title of this EducationSummaryV30. :param role_title: The role_title of this EducationSummaryV30. # noqa: E501 :type: str """ self._role_title = role_title
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def start_date(self): """Gets the start_date of this EducationSummaryV30. # noqa: E501 :return: The start_date of this EducationSummaryV30. # noqa: E501 :rtype: FuzzyDateV30 """ return self._start_date
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def start_date(self, start_date): """Sets the start_date of this EducationSummaryV30. :param start_date: The start_date of this EducationSummaryV30. # noqa: E501 :type: FuzzyDateV30 """ self._start_date = start_date
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def end_date(self): """Gets the end_date of this EducationSummaryV30. # noqa: E501 :return: The end_date of this EducationSummaryV30. # noqa: E501 :rtype: FuzzyDateV30 """ return self._end_date
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def end_date(self, end_date): """Sets the end_date of this EducationSummaryV30. :param end_date: The end_date of this EducationSummaryV30. # noqa: E501 :type: FuzzyDateV30 """ self._end_date = end_date
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def organization(self): """Gets the organization of this EducationSummaryV30. # noqa: E501 :return: The organization of this EducationSummaryV30. # noqa: E501 :rtype: OrganizationV30 """ return self._organization
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def organization(self, organization): """Sets the organization of this EducationSummaryV30. :param organization: The organization of this EducationSummaryV30. # noqa: E501 :type: OrganizationV30 """ self._organization = organization
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def url(self): """Gets the url of this EducationSummaryV30. # noqa: E501 :return: The url of this EducationSummaryV30. # noqa: E501 :rtype: UrlV30 """ return self._url
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def url(self, url): """Sets the url of this EducationSummaryV30. :param url: The url of this EducationSummaryV30. # noqa: E501 :type: UrlV30 """ self._url = url
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def external_ids(self): """Gets the external_ids of this EducationSummaryV30. # noqa: E501 :return: The external_ids of this EducationSummaryV30. # noqa: E501 :rtype: ExternalIDsV30 """ return self._external_ids
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def external_ids(self, external_ids): """Sets the external_ids of this EducationSummaryV30. :param external_ids: The external_ids of this EducationSummaryV30. # noqa: E501 :type: ExternalIDsV30 """ self._external_ids = external_ids
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def display_index(self): """Gets the display_index of this EducationSummaryV30. # noqa: E501 :return: The display_index of this EducationSummaryV30. # noqa: E501 :rtype: str """ return self._display_index
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def display_index(self, display_index): """Sets the display_index of this EducationSummaryV30. :param display_index: The display_index of this EducationSummaryV30. # noqa: E501 :type: str """ self._display_index = display_index
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def visibility(self): """Gets the visibility of this EducationSummaryV30. # noqa: E501 :return: The visibility of this EducationSummaryV30. # noqa: E501 :rtype: str """ return self._visibility
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def visibility(self, visibility): """Sets the visibility of this EducationSummaryV30. :param visibility: The visibility of this EducationSummaryV30. # noqa: E501 :type: str """ allowed_values = ["LIMITED", "REGISTERED_ONLY", "PUBLIC", "PRIVATE"] # noqa: E501 if visibility not in allowed_values: raise ValueError( "Invalid value for `visibility` ({0}), must be one of {1}" # noqa: E501 .format(visibility, allowed_values) ) self._visibility = visibility
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def path(self): """Gets the path of this EducationSummaryV30. # noqa: E501 :return: The path of this EducationSummaryV30. # noqa: E501 :rtype: str """ return self._path
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def path(self, path): """Sets the path of this EducationSummaryV30. :param path: The path of this EducationSummaryV30. # noqa: E501 :type: str """ self._path = path
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict())
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, EducationSummaryV30): return False return self.__dict__ == other.__dict__
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
[ 13, 7, 13, 28, 1486087622 ]
def _kaitai_repr(self): _repr_list = [] for item in vars(self): if not item.startswith('_'): _r = getattr(self, item) if type(_r) in (int, float, str, bytes, bool): _repr_list.append("=".join((item, _r.__repr__()))) else: _repr_list.append(item) return "<" + self.__class__.__name__ + " |" + ", ".join(_repr_list) + ">"
sonusz/PhasorToolBox
[ 19, 7, 19, 4, 1492726633 ]
def _kaitai_show(self, parent_path=' '): if type(self) in (int, float, str, bytes, bool): print(" == ".join((parent_path, self.__repr__()))) elif type(self) == list: for i, item in enumerate(self): try: item.show('{}[{}]'.format(parent_path,i)) except: _kaitai_show(item,'{}[{}]'.format(parent_path,i)) else: for item in sorted(vars(self)): if not item.startswith('_'): _r = getattr(self, item) try: _r.show(parent_path+'.'+item) except: _kaitai_show(_r,parent_path+'.'+item)
sonusz/PhasorToolBox
[ 19, 7, 19, 4, 1492726633 ]
def __repr__(self): _repr_list = [ "time=" + str(self.time)] if self.fracsec.fraction_of_second else [] for item in vars(self): if not item.startswith('_'): _r = getattr(self, item) if type(_r) in (int, float, str, bytes): _repr_list.append("=".join((item, _r.__repr__()))) else: _repr_list.append(item) return "<" + self.__class__.__name__ + " |" + ", ".join(_repr_list) + ">"
sonusz/PhasorToolBox
[ 19, 7, 19, 4, 1492726633 ]
def __init__(self, _io, _parent=None, _root=None, _mini_cfgs=None): self._io = _io self._parent = _parent self._root = _root if _root else self self._pkt_pos = self._io.pos() self.sync = self._root.SyncWord(self._io, self, self._root) self.framesize = self._io.read_u2be() self.idcode = self._io.read_u2be() self._mini_cfg = _mini_cfgs.mini_cfg[self.idcode] self.soc = self._io.read_u4be() self.fracsec = self._root.Fracsec(self._io, self, self._root, self._mini_cfg.time_base.time_base if self._mini_cfg else None) _on = self.sync.frame_type.value if _on == 0: if self._mini_cfg: self.data = Data(self._io, _mini_cfg=self._mini_cfg) else: self.data = self._io.read_bytes((self.framesize - 16)) elif _on == 3: self._raw_data = self._io.read_bytes((self.framesize - 16)) io = KaitaiStream(BytesIO(self._raw_data)) self.data = Cfg2(io) _mini_cfgs.add_cfg(self.idcode, self.data) elif _on == 4: self._raw_data = self._io.read_bytes((self.framesize - 16)) io = KaitaiStream(BytesIO(self._raw_data)) self.data = Command(io) elif _on == 5: _mini_cfgs.add_cfg(self.raw_pkt) self._raw_data = self._io.read_bytes((self.framesize - 16)) io = KaitaiStream(BytesIO(self._raw_data)) self.data = Cfg3(io) elif _on == 2: self._raw_data = self._io.read_bytes((self.framesize - 16)) io = KaitaiStream(BytesIO(self._raw_data)) self.data = Cfg2(io) elif _on == 1: self._raw_data = self._io.read_bytes((self.framesize - 16)) io = KaitaiStream(BytesIO(self._raw_data)) self.data = Header(io) self.chk = self._io.read_u2be()
sonusz/PhasorToolBox
[ 19, 7, 19, 4, 1492726633 ]
def __init__(self, _io, _parent=None, _root=None): self._io = _io self._parent = _parent self._root = _root if _root else self self.magic = self._io.ensure_fixed_contents(struct.pack('1b', -86)) self.reserved = self._io.read_bits_int(1) != 0 self.frame_type = self._root.SyncWord.FrameTypeEnum( self._io.read_bits_int(3)) self.version_number = self._root.SyncWord.VersionNumberEnum( self._io.read_bits_int(4))
sonusz/PhasorToolBox
[ 19, 7, 19, 4, 1492726633 ]
def __repr__(self): _repr_list = ["fraction_of_second=" + str(self.fraction_of_second)] if self.fraction_of_second else [] for item in vars(self): if not item.startswith('_'): _r = getattr(self, item) if type(_r) in (int, float, str): _repr_list.append("=".join((item, _r.__repr__()))) else: _repr_list.append(item) return "<" + self.__class__.__name__ + " |" + ", ".join(_repr_list) + ">"
sonusz/PhasorToolBox
[ 19, 7, 19, 4, 1492726633 ]
def __init__(self, _io, _parent=None, _root=None, _time_base=None): self._io = _io self._parent = _parent self._root = _root if _root else self self._time_base = _time_base self.reserved = self._io.read_bits_int(1) != 0 self.leap_second_direction = self._root.Fracsec.LeapSecondDirectionEnum( self._io.read_bits_int(1)) self.leap_second_occurred = self._io.read_bits_int(1) != 0 self.leap_second_pending = self._io.read_bits_int(1) != 0 self.time_quality = self._root.Fracsec.MsgTqEnum( self._io.read_bits_int(4)) self.raw_fraction_of_second = self._io.read_bits_int(24)
sonusz/PhasorToolBox
[ 19, 7, 19, 4, 1492726633 ]
def fraction_of_second(self): if hasattr(self, '_m_fraction_of_second'): return self._m_fraction_of_second if hasattr(self, '_m_fraction_of_second') else None if self._time_base: self._m_fraction_of_second = self.raw_fraction_of_second / self._time_base return self._m_fraction_of_second if hasattr(self, '_m_fraction_of_second') else None
sonusz/PhasorToolBox
[ 19, 7, 19, 4, 1492726633 ]
def time(self): if hasattr(self, '_m_time'): return self._m_time if hasattr(self, '_m_time') else None self._m_time = self.soc + self.fracsec.fraction_of_second return self._m_time if hasattr(self, '_m_time') else None
sonusz/PhasorToolBox
[ 19, 7, 19, 4, 1492726633 ]
def chk_body(self): if hasattr(self, '_m_chk_body'): return self._m_chk_body if hasattr(self, '_m_chk_body') else None _pos = self._io.pos() self._io.seek(0) self._m_chk_body = self._io.read_bytes((self.framesize - 2)) self._io.seek(_pos) return self._m_chk_body if hasattr(self, '_m_chk_body') else None
sonusz/PhasorToolBox
[ 19, 7, 19, 4, 1492726633 ]
def test_server_meta_data_headers_full(self): """Tests that the MetaDataProvider can construct meta_data_headers when supplied with a full shopping cart""" shopping_cart_extension = ShoppingCartExtension("Ingenico.creator", "Extension", "1.0", "ExtensionId") meta_data_provider = MetaDataProvider("Ingenico", shopping_cart_extension) request_headers = meta_data_provider.meta_data_headers self.assertEqual(1, len(request_headers)) self.assertServerMetaInfo(meta_data_provider, "Ingenico", shopping_cart_extension, request_headers[0])
Ingenico-ePayments/connect-sdk-python3
[ 8, 7, 8, 2, 1474883685 ]
def test_get_server_metadata_headers_no_additional_headers(self): """Tests that the MetaDataProvider functions correctly without any additional headers as arguments""" meta_data_provider = MetaDataProvider("Ingenico") request_headers = meta_data_provider.meta_data_headers self.assertEqual(1, len(request_headers)) self.assertServerMetaInfo(meta_data_provider, "Ingenico", None, request_headers[0])
Ingenico-ePayments/connect-sdk-python3
[ 8, 7, 8, 2, 1474883685 ]
def test_constructor_with_prohibited_headers(self): """Tests that the MetaDataProvider constructor does not accept any headers marked as prohibited""" for name in MetaDataProvider.prohibited_headers: additional_headers = [RequestHeader("Header1", "Value1"), RequestHeader(name, "should be slashed and burnt"), RequestHeader("Header3", "Value3")] with self.assertRaises(Exception) as error: MetaDataProvider("Ingenico", None, additional_headers) self.assertIn(name, str(error.exception))
Ingenico-ePayments/connect-sdk-python3
[ 8, 7, 8, 2, 1474883685 ]
def __init__(self, filename, dic_variables, experiment, name='', shift_lon=False, **kwargs): super(JSBACH_BOT, self).__init__(filename, dic_variables, name=name, **kwargs) self.experiment = experiment self.shift_lon = shift_lon self.type = 'JSBACH_BOT' self._unique_name = self._get_unique_name()
pygeo/pycmbs
[ 15, 10, 15, 9, 1391865830 ]
def get_albedo_data(self, interval='season'): """ get albedo data for JSBACH returns Data object """ if interval != 'season': raise ValueError('Other temporal sampling than SEASON not supported yet for JSBACH BOT files, sorry') v = 'var176' filename = self.data_dir + 'data/model1/' + self.experiment + '_echam6_BOT_mm_1979-2006_albedo_yseasmean.nc' ls_mask = get_T63_landseamask(self.shift_lon) albedo = Data(filename, v, read=True, label='MPI-ESM albedo ' + self.experiment, unit='-', lat_name='lat', lon_name='lon', shift_lon=self.shift_lon, mask=ls_mask.data.data) return albedo
pygeo/pycmbs
[ 15, 10, 15, 9, 1391865830 ]
def get_grass_fraction(self, interval='season'): """ todo implement this for data from a real run !!! """ if interval != 'season': raise ValueError('Other temporal sampling than SEASON not supported yet for JSBACH BOT files, sorry') ls_mask = get_T63_landseamask(self.shift_lon) filename = '/home/m300028/shared/dev/svn/trstools-0.0.1/lib/python/pyCMBS/framework/external/vegetation_benchmarking/VEGETATION_COVER_BENCHMARKING/example/historical_r1i1p1-LR_1850-2005_grass_crop_pasture_2001.nc' v = 'var12' grass = Data(filename, v, read=True, label='MPI-ESM tree fraction ' + self.experiment, unit='-', lat_name='lat', lon_name='lon', #shift_lon=shift_lon, mask=ls_mask.data.data, start_time=pl.num2date(pl.datestr2num('2001-01-01')), stop_time=pl.num2date(pl.datestr2num('2001-12-31')), squeeze=True) return grass
pygeo/pycmbs
[ 15, 10, 15, 9, 1391865830 ]
def get_rainfall_data(self, interval='season'): """ get rainfall data for JSBACH returns Data object """ if interval == 'season': pass else: raise ValueError('Invalid value for interval: %s' % interval) #/// PREPROCESSING: seasonal means /// s_start_time = str(self.start_time)[0:10] s_stop_time = str(self.stop_time)[0:10] filename1 = self.data_dir + self.experiment + '_echam6_BOT_mm_1980_sel.nc' tmp = pyCDO(filename1, s_start_time, s_stop_time).seldate() tmp1 = pyCDO(tmp, s_start_time, s_stop_time).seasmean() filename = pyCDO(tmp1, s_start_time, s_stop_time).yseasmean() #/// READ DATA /// #1) land / sea mask ls_mask = get_T63_landseamask(self.shift_lon) #2) precipitation data try: v = 'var4' rain = Data(filename, v, read=True, scale_factor=86400., label='MPI-ESM ' + self.experiment, unit='mm/day', lat_name='lat', lon_name='lon', shift_lon=self.shift_lon, mask=ls_mask.data.data) except: v = 'var142' rain = Data(filename, v, read=True, scale_factor=86400., label='MPI-ESM ' + self.experiment, unit='mm/day', lat_name='lat', lon_name='lon', shift_lon=self.shift_lon, mask=ls_mask.data.data) return rain
pygeo/pycmbs
[ 15, 10, 15, 9, 1391865830 ]
def __init__(self, filename, dic_variables, experiment, name='', shift_lon=False, input_format='grb', raw_outdata='outdata/jsbach/', **kwargs): """ The assignment of certain variables to different input streams is done in the routine get_jsbach_data_generic() Parameters ---------- input_format : str specifies file format of input data ['nc','grb'] """ super(JSBACH_RAW2, self).__init__(filename, dic_variables, name=name, **kwargs) self.experiment = experiment self.shift_lon = shift_lon #self.get_data() self.type = 'JSBACH_RAW2' self.input_format = input_format assert self.input_format in ['nc', 'grb'] self.raw_outdata = raw_outdata self._unique_name = self._get_unique_name() # do preprocessing of streams (only needed once!) --- self.files = {} self._preproc_streams() #~ self.model_dict = copy.deepcopy(model_dict) self.model = 'JSBACH'
pygeo/pycmbs
[ 15, 10, 15, 9, 1391865830 ]
def _get_filenames_veg_stream(self): return self.data_dir + self.raw_outdata + self.experiment + '_jsbach_veg_mm_*.' + self.input_format
pygeo/pycmbs
[ 15, 10, 15, 9, 1391865830 ]
def _get_filenames_surf_stream(self): return self.data_dir + self.raw_outdata + self.experiment + '_jsbach_surf_mm_*.' + self.input_format
pygeo/pycmbs
[ 15, 10, 15, 9, 1391865830 ]
def _get_filenames_albedo_NIR(self): return self.data_dir + self.raw_outdata + self.experiment + '_jsbach_mm_*_NIR_albedo.' + self.input_format
pygeo/pycmbs
[ 15, 10, 15, 9, 1391865830 ]
def _preproc_streams(self): """ It is assumed that the standard JSBACH postprocessing scripts have been applied. Thus monthly mean data is available for each stream and code tables still need to be applied. This routine does the following: 1) merge all times from individual (monthly mean) output files 2) assign codetables to work with proper variable names 3) aggregate data from tiles to gridbox values """ print 'Preprocessing JSBACH raw data streams (may take a while) ...' cdo = Cdo() # jsbach stream print ' JSBACH stream ...' outfile = get_temporary_directory() + self.experiment + '_jsbach_mm_full.nc' if os.path.exists(outfile): pass else: codetable = self.data_dir + 'log/' + self.experiment + '_jsbach.codes' tmp = tempfile.mktemp(suffix='.nc', prefix=self.experiment + '_jsbach_', dir=get_temporary_directory()) # temporary file #~ print self.data_dir #~ print self.raw_outdata #~ print 'Files: ', self._get_filenames_jsbach_stream() #~ stop if len(glob.glob(self._get_filenames_jsbach_stream())) > 0: # check if input files existing at all print 'Mering the following files:', self._get_filenames_jsbach_stream() cdo.mergetime(options='-f nc', output=tmp, input=self._get_filenames_jsbach_stream()) if os.path.exists(codetable): cdo.monmean(options='-f nc', output=outfile, input='-setpartab,' + codetable + ' ' + tmp) # monmean needed here, as otherwise interface does not work else: cdo.monmean(options='-f nc', output=outfile, input=tmp) # monmean needed here, as otherwise interface does not work print 'Outfile: ', outfile #~ os.remove(tmp) print 'Temporary name: ', tmp self.files.update({'jsbach': outfile}) # veg stream print ' VEG stream ...' outfile = get_temporary_directory() + self.experiment + '_jsbach_veg_mm_full.nc' if os.path.exists(outfile): pass else: codetable = self.data_dir + 'log/' + self.experiment + '_jsbach_veg.codes' tmp = tempfile.mktemp(suffix='.nc', prefix=self.experiment + '_jsbach_veg_', dir=get_temporary_directory()) # temporary file if len(glob.glob(self._get_filenames_veg_stream())) > 0: # check if input files existing at all cdo.mergetime(options='-f nc', output=tmp, input=self._get_filenames_veg_stream()) if os.path.exists(codetable): cdo.monmean(options='-f nc', output=outfile, input='-setpartab,' + codetable + ' ' + tmp) # monmean needed here, as otherwise interface does not work else: cdo.monmean(options='-f nc', output=outfile, input=tmp) # monmean needed here, as otherwise interface does not work os.remove(tmp) self.files.update({'veg': outfile}) # veg land print ' LAND stream ...' outfile = get_temporary_directory() + self.experiment + '_jsbach_land_mm_full.nc' if os.path.exists(outfile): pass else: codetable = self.data_dir + 'log/' + self.experiment + '_jsbach_land.codes' tmp = tempfile.mktemp(suffix='.nc', prefix=self.experiment + '_jsbach_land_', dir=get_temporary_directory()) # temporary file if len(glob.glob(self._get_filenames_land_stream())) > 0: # check if input files existing at all cdo.mergetime(options='-f nc', output=tmp, input=self._get_filenames_land_stream()) if os.path.exists(codetable): cdo.monmean(options='-f nc', output=outfile, input='-setpartab,' + codetable + ' ' + tmp) # monmean needed here, as otherwise interface does not work else: cdo.monmean(options='-f nc', output=outfile, input=tmp) # monmean needed here, as otherwise interface does not work os.remove(tmp) self.files.update({'land': outfile}) # surf stream print ' SURF stream ...' outfile = get_temporary_directory() + self.experiment + '_jsbach_surf_mm_full.nc' if os.path.exists(outfile): pass else: codetable = self.data_dir + 'log/' + self.experiment + '_jsbach_surf.codes' tmp = tempfile.mktemp(suffix='.nc', prefix=self.experiment + '_jsbach_surf_', dir=get_temporary_directory()) # temporary file if len(glob.glob(self._get_filenames_surf_stream())) > 0: # check if input files existing at all print glob.glob(self._get_filenames_surf_stream()) cdo.mergetime(options='-f nc', output=tmp, input=self._get_filenames_surf_stream()) if os.path.exists(codetable): cdo.monmean(options='-f nc', output=outfile, input='-setpartab,' + codetable + ' ' + tmp) # monmean needed here, as otherwise interface does not work else: cdo.monmean(options='-f nc', output=outfile, input=tmp) # monmean needed here, as otherwise interface does not work os.remove(tmp) self.files.update({'surf': outfile}) # ECHAM BOT stream print ' BOT stream ...' outfile = get_temporary_directory() + self.experiment + '_echam6_echam_mm_full.nc' if os.path.exists(outfile): pass else: codetable = self.data_dir + 'log/' + self.experiment + '_echam6_echam.codes' tmp = tempfile.mktemp(suffix='.nc', prefix=self.experiment + '_echam6_echam_', dir=get_temporary_directory()) # temporary file if len(glob.glob(self._get_filenames_echam_BOT())) > 0: # check if input files existing at all cdo.mergetime(options='-f nc', output=tmp, input=self._get_filenames_echam_BOT()) if os.path.exists(codetable): cdo.monmean(options='-f nc', output=outfile, input='-setpartab,' + codetable + ' ' + tmp) # monmean needed here, as otherwise interface does not work else: cdo.monmean(options='-f nc', output=outfile, input=tmp) # monmean needed here, as otherwise interface does not work os.remove(tmp) self.files.update({'echam': outfile}) # ALBEDO file # albedo files as preprocessed by a script of Thomas print ' ALBEDO VIS stream ...' outfile = get_temporary_directory() + self.experiment + '_jsbach_VIS_albedo_mm_full.nc' if os.path.exists(outfile): pass else: if len(glob.glob(self._get_filenames_albedo_VIS())) > 0: # check if input files existing at all cdo.mergetime(options='-f nc', output=outfile, input=self._get_filenames_albedo_VIS()) self.files.update({'albedo_vis': outfile}) print ' ALBEDO NIR stream ...' outfile = get_temporary_directory() + self.experiment + '_jsbach_NIR_albedo_mm_full.nc' if os.path.exists(outfile): pass else: if len(glob.glob(self._get_filenames_albedo_NIR())) > 0: # check if input files existing at all cdo.mergetime(options='-f nc', output=outfile, input=self._get_filenames_albedo_NIR()) self.files.update({'albedo_nir': outfile})
pygeo/pycmbs
[ 15, 10, 15, 9, 1391865830 ]
def get_albedo_data(self, interval='season'): """ calculate albedo as ratio of upward and downwelling fluxes first the monthly mean fluxes are used to calculate the albedo, This routine uses the definitions of the routines how to read upward and downward fluxes """ if self.start_time is None: raise ValueError('Start time needs to be specified') if self.stop_time is None: raise ValueError('Stop time needs to be specified') #~ tmpdict = copy.deepcopy(kwargs) #~ print self.dic_vars routine_up = self.dic_vars['surface_upward_flux'] routine_down = self.dic_vars['sis'] #sw_down = self.get_surface_shortwave_radiation_down(interval=interval, **kwargs) cmd = 'sw_down = self.' + routine_down exec(cmd) #sw_up = self.get_surface_shortwave_radiation_up(interval=interval, **kwargs) cmd = 'sw_up = self.' + routine_up exec(cmd) # climatological mean alb = sw_up[0].div(sw_down[0]) alb.label = self.experiment + ' albedo' alb.unit = '-' # original data alb_org = sw_up[1][2].div(sw_down[1][2]) alb_org.label = self.experiment + ' albedo' alb_org.unit = '-' retval = (alb_org.time, alb_org.fldmean(), alb_org) return alb, retval
pygeo/pycmbs
[ 15, 10, 15, 9, 1391865830 ]
def get_albedo_data_nir(self, interval='season', **kwargs): """ This routine retrieves the JSBACH albedo information for VIS it requires a preprocessing with a script that aggregates from tile to box values! Parameters ---------- interval : str ['season','monthly'] """ #~ tmpdict = copy.deepcopy(self.model_dict['albedo_nir']) return self.get_jsbach_data_generic(interval=interval, **kwargs)
pygeo/pycmbs
[ 15, 10, 15, 9, 1391865830 ]
def get_surface_shortwave_radiation_down(self, interval='season', **kwargs): return self.get_jsbach_data_generic(interval=interval, **kwargs)
pygeo/pycmbs
[ 15, 10, 15, 9, 1391865830 ]
def get_temperature_2m(self, interval='season', **kwargs): return self.get_jsbach_data_generic(interval=interval, **kwargs)
pygeo/pycmbs
[ 15, 10, 15, 9, 1391865830 ]
def __init__(self, filename, dic_variables, experiment, name='', shift_lon=False, model_dict=None, input_format='nc', raw_outdata='', **kwargs): super(JSBACH_SPECIAL, self).__init__(filename, dic_variables, experiment, name=name, shift_lon=shift_lon, model_dict=model_dict, input_format=input_format, raw_outdata=raw_outdata, **kwargs)
pygeo/pycmbs
[ 15, 10, 15, 9, 1391865830 ]
def __init__(self, filename, dic_variables, experiment, name='', shift_lon=False, intervals='monthly', **kwargs): super(JSBACH_RAW, self).__init__(filename, dic_variables, name=name, intervals=intervals, **kwargs) print('WARNING: This model class should be depreciated as it contained a lot of hardcoded dependencies and is only intermediate') #TODO: depreciate this class stop self.experiment = experiment self.shift_lon = shift_lon self.type = 'JSBACH_RAW' self._unique_name = self._get_unique_name()
pygeo/pycmbs
[ 15, 10, 15, 9, 1391865830 ]
def get_temperature_2m(self, interval='monthly', **kwargs): """ get surface temperature (2m) from JSBACH model results Parameters ---------- interval : str specifies the aggregation interval. Possible options: ['season','monthly'] """ locdict = kwargs[self.type] y1 = '1980-01-01' # TODO move this to the JSON dictionary or some parameter file y2 = '2010-12-31' variable = 'temp2' rawfile = self.data_dir + self.experiment + '_echam6_echam_' + variable + '_ALL.nc' files = glob.glob(rawfile) if len(files) != 1: print 'Inputfiles: ', files raise ValueError('Something went wrong: Invalid number of input files!') else: rawfile = files[0] mdata, retval = self._do_preprocessing(rawfile, variable, y1, y2, interval=interval, valid_mask=locdict['valid_mask']) return mdata, retval
pygeo/pycmbs
[ 15, 10, 15, 9, 1391865830 ]
def _do_preprocessing(self, rawfile, varname, s_start_time, s_stop_time, interval='monthly', force_calc=False, valid_mask='global', target_grid='t63grid'): """ perform preprocessing * selection of variable * temporal subsetting """ cdo = Cdo() if not os.path.exists(rawfile): print('File not existing! %s ' % rawfile) return None, None # calculate monthly means file_monthly = get_temporary_directory() + os.sep + os.path.basename(rawfile[:-3]) + '_' + varname + '_' + s_start_time + '_' + s_stop_time + '_mm.nc' if (force_calc) or (not os.path.exists(file_monthly)): cdo.monmean(options='-f nc', output=file_monthly, input='-seldate,' + s_start_time + ',' + s_stop_time + ' ' + '-selvar,' + varname + ' ' + rawfile, force=force_calc) else: pass if not os.path.exists(file_monthly): raise ValueError('Monthly preprocessing did not work! %s ' % file_monthly) # calculate monthly or seasonal climatology if interval == 'monthly': mdata_clim_file = file_monthly[:-3] + '_ymonmean.nc' mdata_sum_file = file_monthly[:-3] + '_ymonsum.nc' mdata_N_file = file_monthly[:-3] + '_ymonN.nc' mdata_clim_std_file = file_monthly[:-3] + '_ymonstd.nc' cdo.ymonmean(options='-f nc -b 32', output=mdata_clim_file, input=file_monthly, force=force_calc) cdo.ymonsum(options='-f nc -b 32', output=mdata_sum_file, input=file_monthly, force=force_calc) cdo.ymonstd(options='-f nc -b 32', output=mdata_clim_std_file, input=file_monthly, force=force_calc) cdo.div(options='-f nc', output=mdata_N_file, input=mdata_sum_file + ' ' + mdata_clim_file, force=force_calc) # number of samples elif interval == 'season': mdata_clim_file = file_monthly[:-3] + '_yseasmean.nc' mdata_sum_file = file_monthly[:-3] + '_yseassum.nc' mdata_N_file = file_monthly[:-3] + '_yseasN.nc' mdata_clim_std_file = file_monthly[:-3] + '_yseasstd.nc' cdo.yseasmean(options='-f nc -b 32', output=mdata_clim_file, input=file_monthly, force=force_calc) cdo.yseassum(options='-f nc -b 32', output=mdata_sum_file, input=file_monthly, force=force_calc) cdo.yseasstd(options='-f nc -b 32', output=mdata_clim_std_file, input=file_monthly, force=force_calc) cdo.div(options='-f nc -b 32', output=mdata_N_file, input=mdata_sum_file + ' ' + mdata_clim_file, force=force_calc) # number of samples else: raise ValueError('Unknown temporal interval. Can not perform preprocessing!') if not os.path.exists(mdata_clim_file): return None # read data if interval == 'monthly': thetime_cylce = 12 elif interval == 'season': thetime_cylce = 4 else: print interval raise ValueError('Unsupported interval!') mdata = Data(mdata_clim_file, varname, read=True, label=self.name, shift_lon=False, time_cycle=thetime_cylce, lat_name='lat', lon_name='lon') mdata_std = Data(mdata_clim_std_file, varname, read=True, label=self.name + ' std', unit='-', shift_lon=False, time_cycle=thetime_cylce, lat_name='lat', lon_name='lon') mdata.std = mdata_std.data.copy() del mdata_std mdata_N = Data(mdata_N_file, varname, read=True, label=self.name + ' std', shift_lon=False, lat_name='lat', lon_name='lon') mdata.n = mdata_N.data.copy() del mdata_N # ensure that climatology always starts with January, therefore set date and then sort mdata.adjust_time(year=1700, day=15) # set arbitrary time for climatology mdata.timsort() #4) read monthly data mdata_all = Data(file_monthly, varname, read=True, label=self.name, shift_lon=False, time_cycle=12, lat_name='lat', lon_name='lon') mdata_all.adjust_time(day=15) #mask_antarctica masks everything below 60 degree S. #here we only mask Antarctica, if only LAND points shall be used if valid_mask == 'land': mask_antarctica = True elif valid_mask == 'ocean': mask_antarctica = False else: mask_antarctica = False if target_grid == 't63grid': mdata._apply_mask(get_T63_landseamask(False, area=valid_mask, mask_antarctica=mask_antarctica)) mdata_all._apply_mask(get_T63_landseamask(False, area=valid_mask, mask_antarctica=mask_antarctica)) else: tmpmsk = get_generic_landseamask(False, area=valid_mask, target_grid=target_grid, mask_antarctica=mask_antarctica) mdata._apply_mask(tmpmsk) mdata_all._apply_mask(tmpmsk) del tmpmsk mdata_mean = mdata_all.fldmean() # return data as a tuple list retval = (mdata_all.time, mdata_mean, mdata_all) del mdata_all return mdata, retval
pygeo/pycmbs
[ 15, 10, 15, 9, 1391865830 ]
def get_surface_shortwave_radiation_up(self, interval='monthly', **kwargs): """ get surface shortwave upward radiation data for JSBACH Parameters ---------- interval : str specifies the aggregation interval. Possible options: ['season','monthly'] """ locdict = kwargs[self.type] y1 = '1980-01-01' # TODO: move this to the JSON dictionary or some parameter file y2 = '2010-12-31' rawfile = self.data_dir + self.experiment + '_jsbach_' + y1[0: 4] + '_' + y2[0: 4] + '.nc' mdata, retval = self._do_preprocessing(rawfile, 'swdown_reflect_acc', y1, y2, interval=interval, valid_mask=locdict['valid_mask']) return mdata, retval
pygeo/pycmbs
[ 15, 10, 15, 9, 1391865830 ]
def get_model_data_generic(self, interval='monthly', **kwargs): """ This is only a wrapper to redirect to individual functions for the JSBACH_RAW class Currently only the usage for rainfall is supported! """ # HACK: only a wrapper, should be depreciated raise ValueError('Rainfall analysis not working yet!') self.get_rainfall_data(interval=interval, **kwargs)
pygeo/pycmbs
[ 15, 10, 15, 9, 1391865830 ]
def get_gpp_data(self, interval='season'): """ get surface GPP data for JSBACH todo temporal aggregation of data --> or leave it to the user! """ cdo = Cdo() v = 'var167' y1 = str(self.start_time)[0:10] y2 = str(self.stop_time)[0:10] rawfilename = self.data_dir + 'data/model/' + self.experiment + '_' + y1[0:4] + '-' + y2[0:4] + '.nc' times_in_file = int(''.join(cdo.ntime(input=rawfilename))) if interval == 'season': if times_in_file != 4: tmp_file = get_temporary_directory() + os.path.basename(rawfilename) cdo.yseasmean(options='-f nc -b 32 -r ', input='-selvar,' + v + ' ' + rawfilename, output=tmp_file[:-3] + '_yseasmean.nc') rawfilename = tmp_file[:-3] + '_yseasmean.nc' if interval == 'monthly': if times_in_file != 12: tmp_file = get_temporary_directory() + os.path.basename(rawfilename) cdo.ymonmean(options='-f nc -b 32 -r ', input='-selvar,' + v + ' ' + rawfilename, output=tmp_file[:-3] + '_ymonmean.nc') rawfilename = tmp_file[:-3] + '_ymonmean.nc' if not os.path.exists(rawfilename): return None filename = rawfilename #--- read land-sea mask ls_mask = get_T63_landseamask(self.shift_lon) #--- read SW up data gpp = Data4D(filename, v, read=True, label=self.experiment + ' ' + v, unit='gC m-2 a-1', lat_name='lat', lon_name='lon', shift_lon=self.shift_lon, mask=ls_mask.data.data, scale_factor=3600. * 24. * 30. / 0.083 ) return gpp.sum_data4D()
pygeo/pycmbs
[ 15, 10, 15, 9, 1391865830 ]
def __init__(self, base_site, auth, listing_type, listing_uuid): super(RemoteListingImages, self).__init__(auth) self._base_site = base_site self._auth = auth self._listing_type = listing_type self._listing_uuid = listing_uuid self._endpoint = base_site + "/api/listings/%s/%s/images/" % ( listing_type, listing_uuid)
propdata/rewp-api
[ 5, 1, 5, 3, 1300814012 ]
def get(self, uuid): """ Returns a single ListingImage instance, matching uuid. Raises a DoesNotExist exception if the object does not exist. """ b = ListingResidential() b.branch_name = "Foo" return b
propdata/rewp-api
[ 5, 1, 5, 3, 1300814012 ]
def set_fields(self, images): self.images = images
propdata/rewp-api
[ 5, 1, 5, 3, 1300814012 ]
def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def get_long_running_output(pipeline_response): deserialized = self._deserialize('CalculateExchangeOperationResultResponse', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def reset(cls): cls.info = [ [ "Keyboard Control:", " auto repeat: on key click percent: 0 LED mask: 00000002", " XKB indicators:", " 00: Caps Lock: off 01: Num Lock: on 02: Scroll Lock: off", " 03: Compose: off 04: Kana: off 05: Sleep: off", ], [ "Keyboard Control:", " auto repeat: on key click percent: 0 LED mask: 00000002", " XKB indicators:", " 00: Caps Lock: on 01: Num Lock: on 02: Scroll Lock: off", " 03: Compose: off 04: Kana: off 05: Sleep: off", ], ] cls.index = 0 cls.is_error = False
qtile/qtile
[ 3906, 650, 3906, 237, 1220055400 ]
def call_process(cls, cmd): if cls.is_error: raise subprocess.CalledProcessError(-1, cmd=cmd, output="Couldn't call xset.") if cmd[1:] == ["q"]: track = cls.info[cls.index] output = "\n".join(track) return output
qtile/qtile
[ 3906, 650, 3906, 237, 1220055400 ]
def patched_cnli(monkeypatch): MockCapsNumLockIndicator.reset() monkeypatch.setattr( "libqtile.widget.caps_num_lock_indicator.subprocess", MockCapsNumLockIndicator ) monkeypatch.setattr( "libqtile.widget.caps_num_lock_indicator.subprocess.CalledProcessError", subprocess.CalledProcessError, ) monkeypatch.setattr( "libqtile.widget.caps_num_lock_indicator.base.ThreadPoolText.call_process", MockCapsNumLockIndicator.call_process, ) return caps_num_lock_indicator
qtile/qtile
[ 3906, 650, 3906, 237, 1220055400 ]
def test_cnli_caps_on(fake_qtile, patched_cnli, fake_window): widget = patched_cnli.CapsNumLockIndicator() # Simulate Caps on MockCapsNumLockIndicator.index = 1 fakebar = FakeBar([widget], window=fake_window) widget._configure(fake_qtile, fakebar) text = widget.poll() assert text == "Caps on Num on"
qtile/qtile
[ 3906, 650, 3906, 237, 1220055400 ]
def callback(response): assert response is not None assert response.model_version assert response.raw_response is not None assert response.statistics.document_count == 5 assert response.statistics.transaction_count == 4 assert response.statistics.valid_document_count == 4 assert response.statistics.erroneous_document_count == 1
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def callback(resp): language_str = "\"language\": \"fr\"" language = resp.http_request.body.count(language_str) assert language == 3
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def callback(resp): language_str = "\"language\": \"\"" language = resp.http_request.body.count(language_str) assert language == 3
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def callback(resp): language_str = "\"language\": \"\"" language = resp.http_request.body.count(language_str) assert language == 2 language_str = "\"language\": \"en\"" language = resp.http_request.body.count(language_str) assert language == 1
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def callback(resp): language_str = "\"language\": \"de\"" language = resp.http_request.body.count(language_str) assert language == 3
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def callback(resp): language_str = "\"language\": \"es\"" language = resp.http_request.body.count(language_str) assert language == 2 language_str = "\"language\": \"en\"" language = resp.http_request.body.count(language_str) assert language == 1
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def callback(resp): language_str = "\"language\": \"es\"" language = resp.http_request.body.count(language_str) assert language == 2 language_str = "\"language\": \"en\"" language = resp.http_request.body.count(language_str) assert language == 1
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def callback(resp): language_str = "\"language\": \"es\"" language = resp.http_request.body.count(language_str) assert language == 3
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def callback(resp): assert "azsdk-python-ai-textanalytics/{} Python/{} ({})".format( VERSION, platform.python_version(), platform.platform()) in \ resp.http_request.headers["User-Agent"]
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def callback(response): language_str = "\"language\": \"es\"" assert response.http_request.body.count(language_str) == 1 assert response.model_version is not None assert response.statistics is not None
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def callback(pipeline_response, deserialized, _): return "cls result"
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def callback(response): assert response.http_request.query["stringIndexType"] == "UnicodeCodePoint"
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def callback(response): assert response.http_request.query["stringIndexType"] == "TextElements_v8"
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def callback(resp): assert resp.http_request.query['loggingOptOut']
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def getattributes(nc, exclude=[]): # in order not to rely on implementation, provide fallback try: a = dict(nc._attributes) except AttributeError: a = dict((k, getattr(nc, k)) for k in dir(nc) if k[0] != '_' and k not in exclude) return a
altMITgcm/MITgcm66h
[ 3, 9, 3, 7, 1496868193 ]
def __init__(self, fpatt, layout=None, multitime=False): fnames = glob.glob(fpatt)
altMITgcm/MITgcm66h
[ 3, 9, 3, 7, 1496868193 ]
def __getattr__(self, k): try: return self._attributes[k] except KeyError: raise AttributeError("'MNC' object has no attribute '" + k + "'")
altMITgcm/MITgcm66h
[ 3, 9, 3, 7, 1496868193 ]
def close(self): """Close tile files""" for nc in self.nc: nc.close()
altMITgcm/MITgcm66h
[ 3, 9, 3, 7, 1496868193 ]
def faces(self): if self.layout == 'faces': return self._fns else: return None
altMITgcm/MITgcm66h
[ 3, 9, 3, 7, 1496868193 ]
def __init__(self, mnc, name): self._name = name self.nc = mnc.nc self.layout = mnc.layout self._i0 = mnc._i0 self._ie = mnc._ie self._j0 = mnc._j0 self._je = mnc._je self._nf = mnc._nf self._fn = mnc._fn v0 = mnc.nc[0].variables[name] self._attributes = getattributes(v0, _exclude_var) self.itemsize = v0.data.itemsize self.typecode = v0.typecode self.dtype = np.dtype(self.typecode()) self.dimensions = v0.dimensions self.shape = tuple( mnc.dimensions[d] for d in self.dimensions ) self.isrec = self.shape[0] is None if self.isrec: self.shape = (mnc.nrec,) + self.shape[1:] # which dimensions are tiled self._Xdim = None self._Ydim = None for i,d in enumerate(self.dimensions): if d[0] == 'X': self._Xdim = i if d[0] == 'Y': self._Ydim = i
altMITgcm/MITgcm66h
[ 3, 9, 3, 7, 1496868193 ]
def __dir__(self): return self.__dict__.keys() + self._attributes.keys()
altMITgcm/MITgcm66h
[ 3, 9, 3, 7, 1496868193 ]
def _getfaces(self, ind): res = [] for f in range(self._nf): shape = tuple(np.isscalar(d) and d or d[f] for d in self.shape) a = np.zeros(shape, self.typecode()) res.append(a) s = [slice(None) for d in self.shape] for i,nc in enumerate(self.nc): fn = self._fn[i] if self._Xdim is not None: s[self._Xdim] = slice(self._i0[i], self._ie[i]) if self._Ydim is not None: s[self._Ydim] = slice(self._j0[i], self._je[i]) res[fn][s] = nc.variables[self._name][:] for f in range(self._nf): res[f] = res[f][ind] return res
altMITgcm/MITgcm66h
[ 3, 9, 3, 7, 1496868193 ]
def mnc_files(fpatt, layout=None): return MNC(fpatt, layout)
altMITgcm/MITgcm66h
[ 3, 9, 3, 7, 1496868193 ]
def rdmnc(fpatt, varnames=None, iters=None, slices=Ellipsis, layout=None): ''' Read one or more variables from an mnc file set.
altMITgcm/MITgcm66h
[ 3, 9, 3, 7, 1496868193 ]
def define_tables(cls, metadata): Table( "table1", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("data", String(30)), ) Table( "table2", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("someid", None, ForeignKey("table1.id")), )
sqlalchemy/sqlalchemy
[ 6899, 1088, 6899, 170, 1543289703 ]
def test_config_errors(self): Session = scoped_session(sa.orm.sessionmaker()) s = Session() # noqa assert_raises_message( sa.exc.InvalidRequestError, "Scoped session is already present", Session, bind=testing.db, ) assert_warns_message( sa.exc.SAWarning, "At least one scoped session is already present. ", Session.configure, bind=testing.db, )
sqlalchemy/sqlalchemy
[ 6899, 1088, 6899, 170, 1543289703 ]
def test_methods_etc(self): mock_session = Mock() mock_session.bind = "the bind" sess = scoped_session(lambda: mock_session) sess.add("add") sess.delete("delete") sess.get("Cls", 5) eq_(sess.bind, "the bind") eq_( mock_session.mock_calls, [ mock.call.add("add", _warn=True), mock.call.delete("delete"), mock.call.get( "Cls", 5, options=None, populate_existing=False, with_for_update=None, identity_token=None, execution_options=None, ), ], ) with mock.patch( "sqlalchemy.orm.session.object_session" ) as mock_object_session: sess.object_session("foo") eq_(mock_object_session.mock_calls, [mock.call("foo")])
sqlalchemy/sqlalchemy
[ 6899, 1088, 6899, 170, 1543289703 ]
def test_get_bind_custom_session_subclass(self, style): """test #6285""" class MySession(Session): if style == "style1": def get_bind(self, mapper=None, **kwargs): return super().get_bind(mapper=mapper, **kwargs) elif style == "style2": # this was the workaround for #6285, ensure it continues # working as well def get_bind(self, mapper=None, *args, **kwargs): return super().get_bind(mapper, *args, **kwargs) elif style == "style3": # py2k style def get_bind(self, mapper=None, *args, **kwargs): return super(MySession, self).get_bind( mapper, *args, **kwargs ) elif style == "style4": # py2k style def get_bind(self, mapper=None, **kwargs): return super(MySession, self).get_bind( mapper=mapper, **kwargs ) s1 = MySession(testing.db) is_(s1.get_bind(), testing.db) ss = scoped_session(sessionmaker(testing.db, class_=MySession)) is_(ss.get_bind(), testing.db)
sqlalchemy/sqlalchemy
[ 6899, 1088, 6899, 170, 1543289703 ]
def test_parse_timespan_invalid_values(self): self.assertRaises(ValueError, parse_timespan, None) self.assertRaises(ValueError, parse_timespan, "") self.assertRaises(ValueError, parse_timespan, "q")
heyman/locust
[ 19, 6, 19, 1, 1294745352 ]
def test_parse_timespan(self): self.assertEqual(7, parse_timespan("7")) self.assertEqual(7, parse_timespan("7s")) self.assertEqual(60, parse_timespan("1m")) self.assertEqual(7200, parse_timespan("2h")) self.assertEqual(3787, parse_timespan("1h3m7s"))
heyman/locust
[ 19, 6, 19, 1, 1294745352 ]
def test_rounding_down(self): self.assertEqual(1, proper_round(1.499999999)) self.assertEqual(5, proper_round(5.499999999)) self.assertEqual(2, proper_round(2.05)) self.assertEqual(3, proper_round(3.05))
heyman/locust
[ 19, 6, 19, 1, 1294745352 ]
def __init__(self, data: T) -> None: self.data = data self.parent = self self.rank = 0
TheAlgorithms/Python
[ 154959, 39275, 154959, 147, 1468662241 ]