repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
astraw/stdeb | stdeb/util.py | parse_val | def parse_val(cfg,section,option):
"""extract a single value from .cfg"""
vals = parse_vals(cfg,section,option)
if len(vals)==0:
return ''
else:
assert len(vals)==1, (section, option, vals, type(vals))
return vals[0] | python | def parse_val(cfg,section,option):
"""extract a single value from .cfg"""
vals = parse_vals(cfg,section,option)
if len(vals)==0:
return ''
else:
assert len(vals)==1, (section, option, vals, type(vals))
return vals[0] | [
"def",
"parse_val",
"(",
"cfg",
",",
"section",
",",
"option",
")",
":",
"vals",
"=",
"parse_vals",
"(",
"cfg",
",",
"section",
",",
"option",
")",
"if",
"len",
"(",
"vals",
")",
"==",
"0",
":",
"return",
"''",
"else",
":",
"assert",
"len",
"(",
"vals",
")",
"==",
"1",
",",
"(",
"section",
",",
"option",
",",
"vals",
",",
"type",
"(",
"vals",
")",
")",
"return",
"vals",
"[",
"0",
"]"
] | extract a single value from .cfg | [
"extract",
"a",
"single",
"value",
"from",
".",
"cfg"
] | 493ab88e8a60be053b1baef81fb39b45e17ceef5 | https://github.com/astraw/stdeb/blob/493ab88e8a60be053b1baef81fb39b45e17ceef5/stdeb/util.py#L611-L618 | train |
astraw/stdeb | stdeb/util.py | check_cfg_files | def check_cfg_files(cfg_files,module_name):
"""check if the configuration files actually specify something
If config files are given, give warning if they don't contain
information. This may indicate a wrong module name name, for
example.
"""
cfg = ConfigParser.SafeConfigParser()
cfg.read(cfg_files)
if cfg.has_section(module_name):
section_items = cfg.items(module_name)
else:
section_items = []
default_items = cfg.items('DEFAULT')
n_items = len(section_items) + len(default_items)
if n_items==0:
log.warn('configuration files were specified, but no options were '
'found in "%s" or "DEFAULT" sections.' % (module_name,) ) | python | def check_cfg_files(cfg_files,module_name):
"""check if the configuration files actually specify something
If config files are given, give warning if they don't contain
information. This may indicate a wrong module name name, for
example.
"""
cfg = ConfigParser.SafeConfigParser()
cfg.read(cfg_files)
if cfg.has_section(module_name):
section_items = cfg.items(module_name)
else:
section_items = []
default_items = cfg.items('DEFAULT')
n_items = len(section_items) + len(default_items)
if n_items==0:
log.warn('configuration files were specified, but no options were '
'found in "%s" or "DEFAULT" sections.' % (module_name,) ) | [
"def",
"check_cfg_files",
"(",
"cfg_files",
",",
"module_name",
")",
":",
"cfg",
"=",
"ConfigParser",
".",
"SafeConfigParser",
"(",
")",
"cfg",
".",
"read",
"(",
"cfg_files",
")",
"if",
"cfg",
".",
"has_section",
"(",
"module_name",
")",
":",
"section_items",
"=",
"cfg",
".",
"items",
"(",
"module_name",
")",
"else",
":",
"section_items",
"=",
"[",
"]",
"default_items",
"=",
"cfg",
".",
"items",
"(",
"'DEFAULT'",
")",
"n_items",
"=",
"len",
"(",
"section_items",
")",
"+",
"len",
"(",
"default_items",
")",
"if",
"n_items",
"==",
"0",
":",
"log",
".",
"warn",
"(",
"'configuration files were specified, but no options were '",
"'found in \"%s\" or \"DEFAULT\" sections.'",
"%",
"(",
"module_name",
",",
")",
")"
] | check if the configuration files actually specify something
If config files are given, give warning if they don't contain
information. This may indicate a wrong module name name, for
example. | [
"check",
"if",
"the",
"configuration",
"files",
"actually",
"specify",
"something"
] | 493ab88e8a60be053b1baef81fb39b45e17ceef5 | https://github.com/astraw/stdeb/blob/493ab88e8a60be053b1baef81fb39b45e17ceef5/stdeb/util.py#L667-L686 | train |
astraw/stdeb | stdeb/transport.py | RequestsTransport._build_url | def _build_url(self, host, handler):
"""
Build a url for our request based on the host, handler and use_http
property
"""
scheme = 'https' if self.use_https else 'http'
return '%s://%s/%s' % (scheme, host, handler) | python | def _build_url(self, host, handler):
"""
Build a url for our request based on the host, handler and use_http
property
"""
scheme = 'https' if self.use_https else 'http'
return '%s://%s/%s' % (scheme, host, handler) | [
"def",
"_build_url",
"(",
"self",
",",
"host",
",",
"handler",
")",
":",
"scheme",
"=",
"'https'",
"if",
"self",
".",
"use_https",
"else",
"'http'",
"return",
"'%s://%s/%s'",
"%",
"(",
"scheme",
",",
"host",
",",
"handler",
")"
] | Build a url for our request based on the host, handler and use_http
property | [
"Build",
"a",
"url",
"for",
"our",
"request",
"based",
"on",
"the",
"host",
"handler",
"and",
"use_http",
"property"
] | 493ab88e8a60be053b1baef81fb39b45e17ceef5 | https://github.com/astraw/stdeb/blob/493ab88e8a60be053b1baef81fb39b45e17ceef5/stdeb/transport.py#L90-L96 | train |
nwhitehead/pyfluidsynth | fluidsynth.py | Synth.setting | def setting(self, opt, val):
"""change an arbitrary synth setting, type-smart"""
opt = opt.encode()
if isinstance(val, basestring):
fluid_settings_setstr(self.settings, opt, val)
elif isinstance(val, int):
fluid_settings_setint(self.settings, opt, val)
elif isinstance(val, float):
fluid_settings_setnum(self.settings, opt, val) | python | def setting(self, opt, val):
"""change an arbitrary synth setting, type-smart"""
opt = opt.encode()
if isinstance(val, basestring):
fluid_settings_setstr(self.settings, opt, val)
elif isinstance(val, int):
fluid_settings_setint(self.settings, opt, val)
elif isinstance(val, float):
fluid_settings_setnum(self.settings, opt, val) | [
"def",
"setting",
"(",
"self",
",",
"opt",
",",
"val",
")",
":",
"opt",
"=",
"opt",
".",
"encode",
"(",
")",
"if",
"isinstance",
"(",
"val",
",",
"basestring",
")",
":",
"fluid_settings_setstr",
"(",
"self",
".",
"settings",
",",
"opt",
",",
"val",
")",
"elif",
"isinstance",
"(",
"val",
",",
"int",
")",
":",
"fluid_settings_setint",
"(",
"self",
".",
"settings",
",",
"opt",
",",
"val",
")",
"elif",
"isinstance",
"(",
"val",
",",
"float",
")",
":",
"fluid_settings_setnum",
"(",
"self",
".",
"settings",
",",
"opt",
",",
"val",
")"
] | change an arbitrary synth setting, type-smart | [
"change",
"an",
"arbitrary",
"synth",
"setting",
"type",
"-",
"smart"
] | 9a8ecee996e83a279e8d29d75e8a859aee4aba67 | https://github.com/nwhitehead/pyfluidsynth/blob/9a8ecee996e83a279e8d29d75e8a859aee4aba67/fluidsynth.py#L421-L429 | train |
nwhitehead/pyfluidsynth | fluidsynth.py | Synth.start | def start(self, driver=None, device=None, midi_driver=None):
"""Start audio output driver in separate background thread
Call this function any time after creating the Synth object.
If you don't call this function, use get_samples() to generate
samples.
Optional keyword argument:
driver : which audio driver to use for output
Possible choices:
'alsa', 'oss', 'jack', 'portaudio'
'sndmgr', 'coreaudio', 'Direct Sound'
device: the device to use for audio output
Not all drivers will be available for every platform, it
depends on which drivers were compiled into FluidSynth for
your platform.
"""
if driver is not None:
assert (driver in ['alsa', 'oss', 'jack', 'portaudio', 'sndmgr', 'coreaudio', 'Direct Sound', 'pulseaudio'])
fluid_settings_setstr(self.settings, b'audio.driver', driver.encode())
if device is not None:
fluid_settings_setstr(self.settings, str('audio.%s.device' % (driver)).encode(), device.encode())
self.audio_driver = new_fluid_audio_driver(self.settings, self.synth)
if midi_driver is not None:
assert (midi_driver in ['alsa_seq', 'alsa_raw', 'oss', 'winmidi', 'midishare', 'coremidi'])
fluid_settings_setstr(self.settings, b'midi.driver', midi_driver.encode())
self.router = new_fluid_midi_router(self.settings, fluid_synth_handle_midi_event, self.synth)
fluid_synth_set_midi_router(self.synth, self.router)
self.midi_driver = new_fluid_midi_driver(self.settings, fluid_midi_router_handle_midi_event, self.router) | python | def start(self, driver=None, device=None, midi_driver=None):
"""Start audio output driver in separate background thread
Call this function any time after creating the Synth object.
If you don't call this function, use get_samples() to generate
samples.
Optional keyword argument:
driver : which audio driver to use for output
Possible choices:
'alsa', 'oss', 'jack', 'portaudio'
'sndmgr', 'coreaudio', 'Direct Sound'
device: the device to use for audio output
Not all drivers will be available for every platform, it
depends on which drivers were compiled into FluidSynth for
your platform.
"""
if driver is not None:
assert (driver in ['alsa', 'oss', 'jack', 'portaudio', 'sndmgr', 'coreaudio', 'Direct Sound', 'pulseaudio'])
fluid_settings_setstr(self.settings, b'audio.driver', driver.encode())
if device is not None:
fluid_settings_setstr(self.settings, str('audio.%s.device' % (driver)).encode(), device.encode())
self.audio_driver = new_fluid_audio_driver(self.settings, self.synth)
if midi_driver is not None:
assert (midi_driver in ['alsa_seq', 'alsa_raw', 'oss', 'winmidi', 'midishare', 'coremidi'])
fluid_settings_setstr(self.settings, b'midi.driver', midi_driver.encode())
self.router = new_fluid_midi_router(self.settings, fluid_synth_handle_midi_event, self.synth)
fluid_synth_set_midi_router(self.synth, self.router)
self.midi_driver = new_fluid_midi_driver(self.settings, fluid_midi_router_handle_midi_event, self.router) | [
"def",
"start",
"(",
"self",
",",
"driver",
"=",
"None",
",",
"device",
"=",
"None",
",",
"midi_driver",
"=",
"None",
")",
":",
"if",
"driver",
"is",
"not",
"None",
":",
"assert",
"(",
"driver",
"in",
"[",
"'alsa'",
",",
"'oss'",
",",
"'jack'",
",",
"'portaudio'",
",",
"'sndmgr'",
",",
"'coreaudio'",
",",
"'Direct Sound'",
",",
"'pulseaudio'",
"]",
")",
"fluid_settings_setstr",
"(",
"self",
".",
"settings",
",",
"b'audio.driver'",
",",
"driver",
".",
"encode",
"(",
")",
")",
"if",
"device",
"is",
"not",
"None",
":",
"fluid_settings_setstr",
"(",
"self",
".",
"settings",
",",
"str",
"(",
"'audio.%s.device'",
"%",
"(",
"driver",
")",
")",
".",
"encode",
"(",
")",
",",
"device",
".",
"encode",
"(",
")",
")",
"self",
".",
"audio_driver",
"=",
"new_fluid_audio_driver",
"(",
"self",
".",
"settings",
",",
"self",
".",
"synth",
")",
"if",
"midi_driver",
"is",
"not",
"None",
":",
"assert",
"(",
"midi_driver",
"in",
"[",
"'alsa_seq'",
",",
"'alsa_raw'",
",",
"'oss'",
",",
"'winmidi'",
",",
"'midishare'",
",",
"'coremidi'",
"]",
")",
"fluid_settings_setstr",
"(",
"self",
".",
"settings",
",",
"b'midi.driver'",
",",
"midi_driver",
".",
"encode",
"(",
")",
")",
"self",
".",
"router",
"=",
"new_fluid_midi_router",
"(",
"self",
".",
"settings",
",",
"fluid_synth_handle_midi_event",
",",
"self",
".",
"synth",
")",
"fluid_synth_set_midi_router",
"(",
"self",
".",
"synth",
",",
"self",
".",
"router",
")",
"self",
".",
"midi_driver",
"=",
"new_fluid_midi_driver",
"(",
"self",
".",
"settings",
",",
"fluid_midi_router_handle_midi_event",
",",
"self",
".",
"router",
")"
] | Start audio output driver in separate background thread
Call this function any time after creating the Synth object.
If you don't call this function, use get_samples() to generate
samples.
Optional keyword argument:
driver : which audio driver to use for output
Possible choices:
'alsa', 'oss', 'jack', 'portaudio'
'sndmgr', 'coreaudio', 'Direct Sound'
device: the device to use for audio output
Not all drivers will be available for every platform, it
depends on which drivers were compiled into FluidSynth for
your platform. | [
"Start",
"audio",
"output",
"driver",
"in",
"separate",
"background",
"thread"
] | 9a8ecee996e83a279e8d29d75e8a859aee4aba67 | https://github.com/nwhitehead/pyfluidsynth/blob/9a8ecee996e83a279e8d29d75e8a859aee4aba67/fluidsynth.py#L430-L460 | train |
nwhitehead/pyfluidsynth | fluidsynth.py | Synth.sfload | def sfload(self, filename, update_midi_preset=0):
"""Load SoundFont and return its ID"""
return fluid_synth_sfload(self.synth, filename.encode(), update_midi_preset) | python | def sfload(self, filename, update_midi_preset=0):
"""Load SoundFont and return its ID"""
return fluid_synth_sfload(self.synth, filename.encode(), update_midi_preset) | [
"def",
"sfload",
"(",
"self",
",",
"filename",
",",
"update_midi_preset",
"=",
"0",
")",
":",
"return",
"fluid_synth_sfload",
"(",
"self",
".",
"synth",
",",
"filename",
".",
"encode",
"(",
")",
",",
"update_midi_preset",
")"
] | Load SoundFont and return its ID | [
"Load",
"SoundFont",
"and",
"return",
"its",
"ID"
] | 9a8ecee996e83a279e8d29d75e8a859aee4aba67 | https://github.com/nwhitehead/pyfluidsynth/blob/9a8ecee996e83a279e8d29d75e8a859aee4aba67/fluidsynth.py#L466-L468 | train |
nwhitehead/pyfluidsynth | fluidsynth.py | Synth.channel_info | def channel_info(self, chan):
"""get soundfont, bank, prog, preset name of channel"""
info=fluid_synth_channel_info_t()
fluid_synth_get_channel_info(self.synth, chan, byref(info))
return (info.sfont_id, info.bank, info.program, info.name) | python | def channel_info(self, chan):
"""get soundfont, bank, prog, preset name of channel"""
info=fluid_synth_channel_info_t()
fluid_synth_get_channel_info(self.synth, chan, byref(info))
return (info.sfont_id, info.bank, info.program, info.name) | [
"def",
"channel_info",
"(",
"self",
",",
"chan",
")",
":",
"info",
"=",
"fluid_synth_channel_info_t",
"(",
")",
"fluid_synth_get_channel_info",
"(",
"self",
".",
"synth",
",",
"chan",
",",
"byref",
"(",
"info",
")",
")",
"return",
"(",
"info",
".",
"sfont_id",
",",
"info",
".",
"bank",
",",
"info",
".",
"program",
",",
"info",
".",
"name",
")"
] | get soundfont, bank, prog, preset name of channel | [
"get",
"soundfont",
"bank",
"prog",
"preset",
"name",
"of",
"channel"
] | 9a8ecee996e83a279e8d29d75e8a859aee4aba67 | https://github.com/nwhitehead/pyfluidsynth/blob/9a8ecee996e83a279e8d29d75e8a859aee4aba67/fluidsynth.py#L475-L479 | train |
scrapinghub/kafka-scanner | kafka_scanner/msg_processor_handlers.py | MsgProcessorHandlers.decompress_messages | def decompress_messages(self, partitions_offmsgs):
""" Decompress pre-defined compressed fields for each message. """
for pomsg in partitions_offmsgs:
if pomsg['message']:
pomsg['message'] = self.decompress_fun(pomsg['message'])
yield pomsg | python | def decompress_messages(self, partitions_offmsgs):
""" Decompress pre-defined compressed fields for each message. """
for pomsg in partitions_offmsgs:
if pomsg['message']:
pomsg['message'] = self.decompress_fun(pomsg['message'])
yield pomsg | [
"def",
"decompress_messages",
"(",
"self",
",",
"partitions_offmsgs",
")",
":",
"for",
"pomsg",
"in",
"partitions_offmsgs",
":",
"if",
"pomsg",
"[",
"'message'",
"]",
":",
"pomsg",
"[",
"'message'",
"]",
"=",
"self",
".",
"decompress_fun",
"(",
"pomsg",
"[",
"'message'",
"]",
")",
"yield",
"pomsg"
] | Decompress pre-defined compressed fields for each message. | [
"Decompress",
"pre",
"-",
"defined",
"compressed",
"fields",
"for",
"each",
"message",
"."
] | 8a71901012e8c948180f70a485b57f8d2e7e3ec1 | https://github.com/scrapinghub/kafka-scanner/blob/8a71901012e8c948180f70a485b57f8d2e7e3ec1/kafka_scanner/msg_processor_handlers.py#L85-L91 | train |
scrapinghub/kafka-scanner | kafka_scanner/__init__.py | KafkaScanner._init_offsets | def _init_offsets(self, batchsize):
"""
Compute new initial and target offsets and do other maintenance tasks
"""
upper_offsets = previous_lower_offsets = self._lower_offsets
if not upper_offsets:
upper_offsets = self.latest_offsets
self._upper_offsets = {p: o for p, o in upper_offsets.items() if o > self._min_lower_offsets[p]}
# remove db dupes not used anymore
if self._dupes:
for p in list(six.iterkeys(self._dupes)):
if p not in self._upper_offsets:
db = self._dupes.pop(p)
db.close()
os.remove(db.filename)
partition_batchsize = 0
if self._upper_offsets:
partition_batchsize = max(int(batchsize * self.__scan_excess), batchsize)
self._lower_offsets = self._upper_offsets.copy()
total_offsets_run = 0
for p in sorted(self._upper_offsets.keys()):
# readjust partition_batchsize when a partition scan starts from latest offset
if total_offsets_run > 0 and partition_batchsize > batchsize:
partition_batchsize = batchsize
if partition_batchsize > 0:
self._lower_offsets[p] = max(self._upper_offsets[p] - partition_batchsize, self._min_lower_offsets[p])
offsets_run = self._upper_offsets[p] - self._lower_offsets[p]
total_offsets_run += offsets_run
partition_batchsize = partition_batchsize - offsets_run
else:
break
log.info('Offset run: %d', total_offsets_run)
# create new consumer if partition list changes
if previous_lower_offsets is not None and set(previous_lower_offsets.keys()) != set(self._lower_offsets):
self._create_scan_consumer(self._lower_offsets.keys())
# consumer must restart from newly computed lower offsets
self._update_offsets(self._lower_offsets)
log.info('Initial offsets for topic %s: %s', self._topic, repr(self._lower_offsets))
log.info('Target offsets for topic %s: %s', self._topic, repr(self._upper_offsets))
return batchsize | python | def _init_offsets(self, batchsize):
"""
Compute new initial and target offsets and do other maintenance tasks
"""
upper_offsets = previous_lower_offsets = self._lower_offsets
if not upper_offsets:
upper_offsets = self.latest_offsets
self._upper_offsets = {p: o for p, o in upper_offsets.items() if o > self._min_lower_offsets[p]}
# remove db dupes not used anymore
if self._dupes:
for p in list(six.iterkeys(self._dupes)):
if p not in self._upper_offsets:
db = self._dupes.pop(p)
db.close()
os.remove(db.filename)
partition_batchsize = 0
if self._upper_offsets:
partition_batchsize = max(int(batchsize * self.__scan_excess), batchsize)
self._lower_offsets = self._upper_offsets.copy()
total_offsets_run = 0
for p in sorted(self._upper_offsets.keys()):
# readjust partition_batchsize when a partition scan starts from latest offset
if total_offsets_run > 0 and partition_batchsize > batchsize:
partition_batchsize = batchsize
if partition_batchsize > 0:
self._lower_offsets[p] = max(self._upper_offsets[p] - partition_batchsize, self._min_lower_offsets[p])
offsets_run = self._upper_offsets[p] - self._lower_offsets[p]
total_offsets_run += offsets_run
partition_batchsize = partition_batchsize - offsets_run
else:
break
log.info('Offset run: %d', total_offsets_run)
# create new consumer if partition list changes
if previous_lower_offsets is not None and set(previous_lower_offsets.keys()) != set(self._lower_offsets):
self._create_scan_consumer(self._lower_offsets.keys())
# consumer must restart from newly computed lower offsets
self._update_offsets(self._lower_offsets)
log.info('Initial offsets for topic %s: %s', self._topic, repr(self._lower_offsets))
log.info('Target offsets for topic %s: %s', self._topic, repr(self._upper_offsets))
return batchsize | [
"def",
"_init_offsets",
"(",
"self",
",",
"batchsize",
")",
":",
"upper_offsets",
"=",
"previous_lower_offsets",
"=",
"self",
".",
"_lower_offsets",
"if",
"not",
"upper_offsets",
":",
"upper_offsets",
"=",
"self",
".",
"latest_offsets",
"self",
".",
"_upper_offsets",
"=",
"{",
"p",
":",
"o",
"for",
"p",
",",
"o",
"in",
"upper_offsets",
".",
"items",
"(",
")",
"if",
"o",
">",
"self",
".",
"_min_lower_offsets",
"[",
"p",
"]",
"}",
"# remove db dupes not used anymore",
"if",
"self",
".",
"_dupes",
":",
"for",
"p",
"in",
"list",
"(",
"six",
".",
"iterkeys",
"(",
"self",
".",
"_dupes",
")",
")",
":",
"if",
"p",
"not",
"in",
"self",
".",
"_upper_offsets",
":",
"db",
"=",
"self",
".",
"_dupes",
".",
"pop",
"(",
"p",
")",
"db",
".",
"close",
"(",
")",
"os",
".",
"remove",
"(",
"db",
".",
"filename",
")",
"partition_batchsize",
"=",
"0",
"if",
"self",
".",
"_upper_offsets",
":",
"partition_batchsize",
"=",
"max",
"(",
"int",
"(",
"batchsize",
"*",
"self",
".",
"__scan_excess",
")",
",",
"batchsize",
")",
"self",
".",
"_lower_offsets",
"=",
"self",
".",
"_upper_offsets",
".",
"copy",
"(",
")",
"total_offsets_run",
"=",
"0",
"for",
"p",
"in",
"sorted",
"(",
"self",
".",
"_upper_offsets",
".",
"keys",
"(",
")",
")",
":",
"# readjust partition_batchsize when a partition scan starts from latest offset",
"if",
"total_offsets_run",
">",
"0",
"and",
"partition_batchsize",
">",
"batchsize",
":",
"partition_batchsize",
"=",
"batchsize",
"if",
"partition_batchsize",
">",
"0",
":",
"self",
".",
"_lower_offsets",
"[",
"p",
"]",
"=",
"max",
"(",
"self",
".",
"_upper_offsets",
"[",
"p",
"]",
"-",
"partition_batchsize",
",",
"self",
".",
"_min_lower_offsets",
"[",
"p",
"]",
")",
"offsets_run",
"=",
"self",
".",
"_upper_offsets",
"[",
"p",
"]",
"-",
"self",
".",
"_lower_offsets",
"[",
"p",
"]",
"total_offsets_run",
"+=",
"offsets_run",
"partition_batchsize",
"=",
"partition_batchsize",
"-",
"offsets_run",
"else",
":",
"break",
"log",
".",
"info",
"(",
"'Offset run: %d'",
",",
"total_offsets_run",
")",
"# create new consumer if partition list changes",
"if",
"previous_lower_offsets",
"is",
"not",
"None",
"and",
"set",
"(",
"previous_lower_offsets",
".",
"keys",
"(",
")",
")",
"!=",
"set",
"(",
"self",
".",
"_lower_offsets",
")",
":",
"self",
".",
"_create_scan_consumer",
"(",
"self",
".",
"_lower_offsets",
".",
"keys",
"(",
")",
")",
"# consumer must restart from newly computed lower offsets",
"self",
".",
"_update_offsets",
"(",
"self",
".",
"_lower_offsets",
")",
"log",
".",
"info",
"(",
"'Initial offsets for topic %s: %s'",
",",
"self",
".",
"_topic",
",",
"repr",
"(",
"self",
".",
"_lower_offsets",
")",
")",
"log",
".",
"info",
"(",
"'Target offsets for topic %s: %s'",
",",
"self",
".",
"_topic",
",",
"repr",
"(",
"self",
".",
"_upper_offsets",
")",
")",
"return",
"batchsize"
] | Compute new initial and target offsets and do other maintenance tasks | [
"Compute",
"new",
"initial",
"and",
"target",
"offsets",
"and",
"do",
"other",
"maintenance",
"tasks"
] | 8a71901012e8c948180f70a485b57f8d2e7e3ec1 | https://github.com/scrapinghub/kafka-scanner/blob/8a71901012e8c948180f70a485b57f8d2e7e3ec1/kafka_scanner/__init__.py#L275-L318 | train |
scrapinghub/kafka-scanner | kafka_scanner/__init__.py | KafkaScanner._filter_deleted_records | def _filter_deleted_records(self, batches):
"""
Filter out deleted records
"""
for batch in batches:
for record in batch:
if not self.must_delete_record(record):
yield record | python | def _filter_deleted_records(self, batches):
"""
Filter out deleted records
"""
for batch in batches:
for record in batch:
if not self.must_delete_record(record):
yield record | [
"def",
"_filter_deleted_records",
"(",
"self",
",",
"batches",
")",
":",
"for",
"batch",
"in",
"batches",
":",
"for",
"record",
"in",
"batch",
":",
"if",
"not",
"self",
".",
"must_delete_record",
"(",
"record",
")",
":",
"yield",
"record"
] | Filter out deleted records | [
"Filter",
"out",
"deleted",
"records"
] | 8a71901012e8c948180f70a485b57f8d2e7e3ec1 | https://github.com/scrapinghub/kafka-scanner/blob/8a71901012e8c948180f70a485b57f8d2e7e3ec1/kafka_scanner/__init__.py#L404-L411 | train |
systemd/python-systemd | systemd/journal.py | get_catalog | def get_catalog(mid):
"""Return catalog entry for the specified ID.
`mid` should be either a UUID or a 32 digit hex number.
"""
if isinstance(mid, _uuid.UUID):
mid = mid.hex
return _get_catalog(mid) | python | def get_catalog(mid):
"""Return catalog entry for the specified ID.
`mid` should be either a UUID or a 32 digit hex number.
"""
if isinstance(mid, _uuid.UUID):
mid = mid.hex
return _get_catalog(mid) | [
"def",
"get_catalog",
"(",
"mid",
")",
":",
"if",
"isinstance",
"(",
"mid",
",",
"_uuid",
".",
"UUID",
")",
":",
"mid",
"=",
"mid",
".",
"hex",
"return",
"_get_catalog",
"(",
"mid",
")"
] | Return catalog entry for the specified ID.
`mid` should be either a UUID or a 32 digit hex number. | [
"Return",
"catalog",
"entry",
"for",
"the",
"specified",
"ID",
"."
] | c06c5d401d60ae9175367be0797a6c2b562ac5ba | https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L393-L400 | train |
systemd/python-systemd | systemd/journal.py | Reader._convert_entry | def _convert_entry(self, entry):
"""Convert entire journal entry utilising _convert_field."""
result = {}
for key, value in entry.items():
if isinstance(value, list):
result[key] = [self._convert_field(key, val) for val in value]
else:
result[key] = self._convert_field(key, value)
return result | python | def _convert_entry(self, entry):
"""Convert entire journal entry utilising _convert_field."""
result = {}
for key, value in entry.items():
if isinstance(value, list):
result[key] = [self._convert_field(key, val) for val in value]
else:
result[key] = self._convert_field(key, value)
return result | [
"def",
"_convert_entry",
"(",
"self",
",",
"entry",
")",
":",
"result",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"entry",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"result",
"[",
"key",
"]",
"=",
"[",
"self",
".",
"_convert_field",
"(",
"key",
",",
"val",
")",
"for",
"val",
"in",
"value",
"]",
"else",
":",
"result",
"[",
"key",
"]",
"=",
"self",
".",
"_convert_field",
"(",
"key",
",",
"value",
")",
"return",
"result"
] | Convert entire journal entry utilising _convert_field. | [
"Convert",
"entire",
"journal",
"entry",
"utilising",
"_convert_field",
"."
] | c06c5d401d60ae9175367be0797a6c2b562ac5ba | https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L200-L208 | train |
systemd/python-systemd | systemd/journal.py | Reader.add_match | def add_match(self, *args, **kwargs):
"""Add one or more matches to the filter journal log entries.
All matches of different field are combined with logical AND, and
matches of the same field are automatically combined with logical OR.
Matches can be passed as strings of form "FIELD=value", or keyword
arguments FIELD="value".
"""
args = list(args)
args.extend(_make_line(key, val) for key, val in kwargs.items())
for arg in args:
super(Reader, self).add_match(arg) | python | def add_match(self, *args, **kwargs):
"""Add one or more matches to the filter journal log entries.
All matches of different field are combined with logical AND, and
matches of the same field are automatically combined with logical OR.
Matches can be passed as strings of form "FIELD=value", or keyword
arguments FIELD="value".
"""
args = list(args)
args.extend(_make_line(key, val) for key, val in kwargs.items())
for arg in args:
super(Reader, self).add_match(arg) | [
"def",
"add_match",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"list",
"(",
"args",
")",
"args",
".",
"extend",
"(",
"_make_line",
"(",
"key",
",",
"val",
")",
"for",
"key",
",",
"val",
"in",
"kwargs",
".",
"items",
"(",
")",
")",
"for",
"arg",
"in",
"args",
":",
"super",
"(",
"Reader",
",",
"self",
")",
".",
"add_match",
"(",
"arg",
")"
] | Add one or more matches to the filter journal log entries.
All matches of different field are combined with logical AND, and
matches of the same field are automatically combined with logical OR.
Matches can be passed as strings of form "FIELD=value", or keyword
arguments FIELD="value". | [
"Add",
"one",
"or",
"more",
"matches",
"to",
"the",
"filter",
"journal",
"log",
"entries",
"."
] | c06c5d401d60ae9175367be0797a6c2b562ac5ba | https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L233-L244 | train |
systemd/python-systemd | systemd/journal.py | Reader.get_next | def get_next(self, skip=1):
r"""Return the next log entry as a dictionary.
Entries will be processed with converters specified during Reader
creation.
Optional `skip` value will return the `skip`-th log entry.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
if super(Reader, self)._next(skip):
entry = super(Reader, self)._get_all()
if entry:
entry['__REALTIME_TIMESTAMP'] = self._get_realtime()
entry['__MONOTONIC_TIMESTAMP'] = self._get_monotonic()
entry['__CURSOR'] = self._get_cursor()
return self._convert_entry(entry)
return dict() | python | def get_next(self, skip=1):
r"""Return the next log entry as a dictionary.
Entries will be processed with converters specified during Reader
creation.
Optional `skip` value will return the `skip`-th log entry.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type.
"""
if super(Reader, self)._next(skip):
entry = super(Reader, self)._get_all()
if entry:
entry['__REALTIME_TIMESTAMP'] = self._get_realtime()
entry['__MONOTONIC_TIMESTAMP'] = self._get_monotonic()
entry['__CURSOR'] = self._get_cursor()
return self._convert_entry(entry)
return dict() | [
"def",
"get_next",
"(",
"self",
",",
"skip",
"=",
"1",
")",
":",
"if",
"super",
"(",
"Reader",
",",
"self",
")",
".",
"_next",
"(",
"skip",
")",
":",
"entry",
"=",
"super",
"(",
"Reader",
",",
"self",
")",
".",
"_get_all",
"(",
")",
"if",
"entry",
":",
"entry",
"[",
"'__REALTIME_TIMESTAMP'",
"]",
"=",
"self",
".",
"_get_realtime",
"(",
")",
"entry",
"[",
"'__MONOTONIC_TIMESTAMP'",
"]",
"=",
"self",
".",
"_get_monotonic",
"(",
")",
"entry",
"[",
"'__CURSOR'",
"]",
"=",
"self",
".",
"_get_cursor",
"(",
")",
"return",
"self",
".",
"_convert_entry",
"(",
"entry",
")",
"return",
"dict",
"(",
")"
] | r"""Return the next log entry as a dictionary.
Entries will be processed with converters specified during Reader
creation.
Optional `skip` value will return the `skip`-th log entry.
Currently a standard dictionary of fields is returned, but in the
future this might be changed to a different mapping type, so the
calling code should not make assumptions about a specific type. | [
"r",
"Return",
"the",
"next",
"log",
"entry",
"as",
"a",
"dictionary",
"."
] | c06c5d401d60ae9175367be0797a6c2b562ac5ba | https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L246-L265 | train |
systemd/python-systemd | systemd/journal.py | Reader.query_unique | def query_unique(self, field):
"""Return a list of unique values appearing in the journal for the given
`field`.
Note this does not respect any journal matches.
Entries will be processed with converters specified during
Reader creation.
"""
return set(self._convert_field(field, value)
for value in super(Reader, self).query_unique(field)) | python | def query_unique(self, field):
"""Return a list of unique values appearing in the journal for the given
`field`.
Note this does not respect any journal matches.
Entries will be processed with converters specified during
Reader creation.
"""
return set(self._convert_field(field, value)
for value in super(Reader, self).query_unique(field)) | [
"def",
"query_unique",
"(",
"self",
",",
"field",
")",
":",
"return",
"set",
"(",
"self",
".",
"_convert_field",
"(",
"field",
",",
"value",
")",
"for",
"value",
"in",
"super",
"(",
"Reader",
",",
"self",
")",
".",
"query_unique",
"(",
"field",
")",
")"
] | Return a list of unique values appearing in the journal for the given
`field`.
Note this does not respect any journal matches.
Entries will be processed with converters specified during
Reader creation. | [
"Return",
"a",
"list",
"of",
"unique",
"values",
"appearing",
"in",
"the",
"journal",
"for",
"the",
"given",
"field",
"."
] | c06c5d401d60ae9175367be0797a6c2b562ac5ba | https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L283-L293 | train |
systemd/python-systemd | systemd/journal.py | Reader.wait | def wait(self, timeout=None):
"""Wait for a change in the journal.
`timeout` is the maximum time in seconds to wait, or None which
means to wait forever.
Returns one of NOP (no change), APPEND (new entries have been added to
the end of the journal), or INVALIDATE (journal files have been added or
removed).
"""
us = -1 if timeout is None else int(timeout * 1000000)
return super(Reader, self).wait(us) | python | def wait(self, timeout=None):
"""Wait for a change in the journal.
`timeout` is the maximum time in seconds to wait, or None which
means to wait forever.
Returns one of NOP (no change), APPEND (new entries have been added to
the end of the journal), or INVALIDATE (journal files have been added or
removed).
"""
us = -1 if timeout is None else int(timeout * 1000000)
return super(Reader, self).wait(us) | [
"def",
"wait",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"us",
"=",
"-",
"1",
"if",
"timeout",
"is",
"None",
"else",
"int",
"(",
"timeout",
"*",
"1000000",
")",
"return",
"super",
"(",
"Reader",
",",
"self",
")",
".",
"wait",
"(",
"us",
")"
] | Wait for a change in the journal.
`timeout` is the maximum time in seconds to wait, or None which
means to wait forever.
Returns one of NOP (no change), APPEND (new entries have been added to
the end of the journal), or INVALIDATE (journal files have been added or
removed). | [
"Wait",
"for",
"a",
"change",
"in",
"the",
"journal",
"."
] | c06c5d401d60ae9175367be0797a6c2b562ac5ba | https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L295-L306 | train |
systemd/python-systemd | systemd/journal.py | Reader.seek_realtime | def seek_realtime(self, realtime):
"""Seek to a matching journal entry nearest to `timestamp` time.
Argument `realtime` must be either an integer UNIX timestamp (in
microseconds since the beginning of the UNIX epoch), or an float UNIX
timestamp (in seconds since the beginning of the UNIX epoch), or a
datetime.datetime instance. The integer form is deprecated.
>>> import time
>>> from systemd import journal
>>> yesterday = time.time() - 24 * 60**2
>>> j = journal.Reader()
>>> j.seek_realtime(yesterday)
"""
if isinstance(realtime, _datetime.datetime):
realtime = int(float(realtime.strftime("%s.%f")) * 1000000)
elif not isinstance(realtime, int):
realtime = int(realtime * 1000000)
return super(Reader, self).seek_realtime(realtime) | python | def seek_realtime(self, realtime):
"""Seek to a matching journal entry nearest to `timestamp` time.
Argument `realtime` must be either an integer UNIX timestamp (in
microseconds since the beginning of the UNIX epoch), or an float UNIX
timestamp (in seconds since the beginning of the UNIX epoch), or a
datetime.datetime instance. The integer form is deprecated.
>>> import time
>>> from systemd import journal
>>> yesterday = time.time() - 24 * 60**2
>>> j = journal.Reader()
>>> j.seek_realtime(yesterday)
"""
if isinstance(realtime, _datetime.datetime):
realtime = int(float(realtime.strftime("%s.%f")) * 1000000)
elif not isinstance(realtime, int):
realtime = int(realtime * 1000000)
return super(Reader, self).seek_realtime(realtime) | [
"def",
"seek_realtime",
"(",
"self",
",",
"realtime",
")",
":",
"if",
"isinstance",
"(",
"realtime",
",",
"_datetime",
".",
"datetime",
")",
":",
"realtime",
"=",
"int",
"(",
"float",
"(",
"realtime",
".",
"strftime",
"(",
"\"%s.%f\"",
")",
")",
"*",
"1000000",
")",
"elif",
"not",
"isinstance",
"(",
"realtime",
",",
"int",
")",
":",
"realtime",
"=",
"int",
"(",
"realtime",
"*",
"1000000",
")",
"return",
"super",
"(",
"Reader",
",",
"self",
")",
".",
"seek_realtime",
"(",
"realtime",
")"
] | Seek to a matching journal entry nearest to `timestamp` time.
Argument `realtime` must be either an integer UNIX timestamp (in
microseconds since the beginning of the UNIX epoch), or an float UNIX
timestamp (in seconds since the beginning of the UNIX epoch), or a
datetime.datetime instance. The integer form is deprecated.
>>> import time
>>> from systemd import journal
>>> yesterday = time.time() - 24 * 60**2
>>> j = journal.Reader()
>>> j.seek_realtime(yesterday) | [
"Seek",
"to",
"a",
"matching",
"journal",
"entry",
"nearest",
"to",
"timestamp",
"time",
"."
] | c06c5d401d60ae9175367be0797a6c2b562ac5ba | https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L308-L327 | train |
systemd/python-systemd | systemd/journal.py | Reader.seek_monotonic | def seek_monotonic(self, monotonic, bootid=None):
"""Seek to a matching journal entry nearest to `monotonic` time.
Argument `monotonic` is a timestamp from boot in either seconds or a
datetime.timedelta instance. Argument `bootid` is a string or UUID
representing which boot the monotonic time is reference to. Defaults to
current bootid.
"""
if isinstance(monotonic, _datetime.timedelta):
monotonic = monotonic.total_seconds()
monotonic = int(monotonic * 1000000)
if isinstance(bootid, _uuid.UUID):
bootid = bootid.hex
return super(Reader, self).seek_monotonic(monotonic, bootid) | python | def seek_monotonic(self, monotonic, bootid=None):
"""Seek to a matching journal entry nearest to `monotonic` time.
Argument `monotonic` is a timestamp from boot in either seconds or a
datetime.timedelta instance. Argument `bootid` is a string or UUID
representing which boot the monotonic time is reference to. Defaults to
current bootid.
"""
if isinstance(monotonic, _datetime.timedelta):
monotonic = monotonic.total_seconds()
monotonic = int(monotonic * 1000000)
if isinstance(bootid, _uuid.UUID):
bootid = bootid.hex
return super(Reader, self).seek_monotonic(monotonic, bootid) | [
"def",
"seek_monotonic",
"(",
"self",
",",
"monotonic",
",",
"bootid",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"monotonic",
",",
"_datetime",
".",
"timedelta",
")",
":",
"monotonic",
"=",
"monotonic",
".",
"total_seconds",
"(",
")",
"monotonic",
"=",
"int",
"(",
"monotonic",
"*",
"1000000",
")",
"if",
"isinstance",
"(",
"bootid",
",",
"_uuid",
".",
"UUID",
")",
":",
"bootid",
"=",
"bootid",
".",
"hex",
"return",
"super",
"(",
"Reader",
",",
"self",
")",
".",
"seek_monotonic",
"(",
"monotonic",
",",
"bootid",
")"
] | Seek to a matching journal entry nearest to `monotonic` time.
Argument `monotonic` is a timestamp from boot in either seconds or a
datetime.timedelta instance. Argument `bootid` is a string or UUID
representing which boot the monotonic time is reference to. Defaults to
current bootid. | [
"Seek",
"to",
"a",
"matching",
"journal",
"entry",
"nearest",
"to",
"monotonic",
"time",
"."
] | c06c5d401d60ae9175367be0797a6c2b562ac5ba | https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L329-L342 | train |
systemd/python-systemd | systemd/journal.py | Reader.log_level | def log_level(self, level):
"""Set maximum log `level` by setting matches for PRIORITY.
"""
if 0 <= level <= 7:
for i in range(level+1):
self.add_match(PRIORITY="%d" % i)
else:
raise ValueError("Log level must be 0 <= level <= 7") | python | def log_level(self, level):
"""Set maximum log `level` by setting matches for PRIORITY.
"""
if 0 <= level <= 7:
for i in range(level+1):
self.add_match(PRIORITY="%d" % i)
else:
raise ValueError("Log level must be 0 <= level <= 7") | [
"def",
"log_level",
"(",
"self",
",",
"level",
")",
":",
"if",
"0",
"<=",
"level",
"<=",
"7",
":",
"for",
"i",
"in",
"range",
"(",
"level",
"+",
"1",
")",
":",
"self",
".",
"add_match",
"(",
"PRIORITY",
"=",
"\"%d\"",
"%",
"i",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Log level must be 0 <= level <= 7\"",
")"
] | Set maximum log `level` by setting matches for PRIORITY. | [
"Set",
"maximum",
"log",
"level",
"by",
"setting",
"matches",
"for",
"PRIORITY",
"."
] | c06c5d401d60ae9175367be0797a6c2b562ac5ba | https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L344-L351 | train |
systemd/python-systemd | systemd/journal.py | Reader.messageid_match | def messageid_match(self, messageid):
"""Add match for log entries with specified `messageid`.
`messageid` can be string of hexadicimal digits or a UUID
instance. Standard message IDs can be found in systemd.id128.
Equivalent to add_match(MESSAGE_ID=`messageid`).
"""
if isinstance(messageid, _uuid.UUID):
messageid = messageid.hex
self.add_match(MESSAGE_ID=messageid) | python | def messageid_match(self, messageid):
"""Add match for log entries with specified `messageid`.
`messageid` can be string of hexadicimal digits or a UUID
instance. Standard message IDs can be found in systemd.id128.
Equivalent to add_match(MESSAGE_ID=`messageid`).
"""
if isinstance(messageid, _uuid.UUID):
messageid = messageid.hex
self.add_match(MESSAGE_ID=messageid) | [
"def",
"messageid_match",
"(",
"self",
",",
"messageid",
")",
":",
"if",
"isinstance",
"(",
"messageid",
",",
"_uuid",
".",
"UUID",
")",
":",
"messageid",
"=",
"messageid",
".",
"hex",
"self",
".",
"add_match",
"(",
"MESSAGE_ID",
"=",
"messageid",
")"
] | Add match for log entries with specified `messageid`.
`messageid` can be string of hexadicimal digits or a UUID
instance. Standard message IDs can be found in systemd.id128.
Equivalent to add_match(MESSAGE_ID=`messageid`). | [
"Add",
"match",
"for",
"log",
"entries",
"with",
"specified",
"messageid",
"."
] | c06c5d401d60ae9175367be0797a6c2b562ac5ba | https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L353-L363 | train |
systemd/python-systemd | systemd/journal.py | Reader.this_boot | def this_boot(self, bootid=None):
"""Add match for _BOOT_ID for current boot or the specified boot ID.
If specified, bootid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_BOOT_ID='bootid').
"""
if bootid is None:
bootid = _id128.get_boot().hex
else:
bootid = getattr(bootid, 'hex', bootid)
self.add_match(_BOOT_ID=bootid) | python | def this_boot(self, bootid=None):
"""Add match for _BOOT_ID for current boot or the specified boot ID.
If specified, bootid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_BOOT_ID='bootid').
"""
if bootid is None:
bootid = _id128.get_boot().hex
else:
bootid = getattr(bootid, 'hex', bootid)
self.add_match(_BOOT_ID=bootid) | [
"def",
"this_boot",
"(",
"self",
",",
"bootid",
"=",
"None",
")",
":",
"if",
"bootid",
"is",
"None",
":",
"bootid",
"=",
"_id128",
".",
"get_boot",
"(",
")",
".",
"hex",
"else",
":",
"bootid",
"=",
"getattr",
"(",
"bootid",
",",
"'hex'",
",",
"bootid",
")",
"self",
".",
"add_match",
"(",
"_BOOT_ID",
"=",
"bootid",
")"
] | Add match for _BOOT_ID for current boot or the specified boot ID.
If specified, bootid should be either a UUID or a 32 digit hex number.
Equivalent to add_match(_BOOT_ID='bootid'). | [
"Add",
"match",
"for",
"_BOOT_ID",
"for",
"current",
"boot",
"or",
"the",
"specified",
"boot",
"ID",
"."
] | c06c5d401d60ae9175367be0797a6c2b562ac5ba | https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L365-L376 | train |
systemd/python-systemd | systemd/journal.py | Reader.this_machine | def this_machine(self, machineid=None):
"""Add match for _MACHINE_ID equal to the ID of this machine.
If specified, machineid should be either a UUID or a 32 digit hex
number.
Equivalent to add_match(_MACHINE_ID='machineid').
"""
if machineid is None:
machineid = _id128.get_machine().hex
else:
machineid = getattr(machineid, 'hex', machineid)
self.add_match(_MACHINE_ID=machineid) | python | def this_machine(self, machineid=None):
"""Add match for _MACHINE_ID equal to the ID of this machine.
If specified, machineid should be either a UUID or a 32 digit hex
number.
Equivalent to add_match(_MACHINE_ID='machineid').
"""
if machineid is None:
machineid = _id128.get_machine().hex
else:
machineid = getattr(machineid, 'hex', machineid)
self.add_match(_MACHINE_ID=machineid) | [
"def",
"this_machine",
"(",
"self",
",",
"machineid",
"=",
"None",
")",
":",
"if",
"machineid",
"is",
"None",
":",
"machineid",
"=",
"_id128",
".",
"get_machine",
"(",
")",
".",
"hex",
"else",
":",
"machineid",
"=",
"getattr",
"(",
"machineid",
",",
"'hex'",
",",
"machineid",
")",
"self",
".",
"add_match",
"(",
"_MACHINE_ID",
"=",
"machineid",
")"
] | Add match for _MACHINE_ID equal to the ID of this machine.
If specified, machineid should be either a UUID or a 32 digit hex
number.
Equivalent to add_match(_MACHINE_ID='machineid'). | [
"Add",
"match",
"for",
"_MACHINE_ID",
"equal",
"to",
"the",
"ID",
"of",
"this",
"machine",
"."
] | c06c5d401d60ae9175367be0797a6c2b562ac5ba | https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L378-L390 | train |
systemd/python-systemd | systemd/journal.py | JournalHandler.emit | def emit(self, record):
"""Write `record` as a journal event.
MESSAGE is taken from the message provided by the user, and PRIORITY,
LOGGER, THREAD_NAME, CODE_{FILE,LINE,FUNC} fields are appended
automatically. In addition, record.MESSAGE_ID will be used if present.
"""
try:
msg = self.format(record)
pri = self.map_priority(record.levelno)
# defaults
extras = self._extra.copy()
# higher priority
if record.exc_text:
extras['EXCEPTION_TEXT'] = record.exc_text
if record.exc_info:
extras['EXCEPTION_INFO'] = record.exc_info
if record.args:
extras['CODE_ARGS'] = str(record.args)
# explicit arguments — highest priority
extras.update(record.__dict__)
self.send(msg,
PRIORITY=format(pri),
LOGGER=record.name,
THREAD_NAME=record.threadName,
PROCESS_NAME=record.processName,
CODE_FILE=record.pathname,
CODE_LINE=record.lineno,
CODE_FUNC=record.funcName,
**extras)
except Exception:
self.handleError(record) | python | def emit(self, record):
"""Write `record` as a journal event.
MESSAGE is taken from the message provided by the user, and PRIORITY,
LOGGER, THREAD_NAME, CODE_{FILE,LINE,FUNC} fields are appended
automatically. In addition, record.MESSAGE_ID will be used if present.
"""
try:
msg = self.format(record)
pri = self.map_priority(record.levelno)
# defaults
extras = self._extra.copy()
# higher priority
if record.exc_text:
extras['EXCEPTION_TEXT'] = record.exc_text
if record.exc_info:
extras['EXCEPTION_INFO'] = record.exc_info
if record.args:
extras['CODE_ARGS'] = str(record.args)
# explicit arguments — highest priority
extras.update(record.__dict__)
self.send(msg,
PRIORITY=format(pri),
LOGGER=record.name,
THREAD_NAME=record.threadName,
PROCESS_NAME=record.processName,
CODE_FILE=record.pathname,
CODE_LINE=record.lineno,
CODE_FUNC=record.funcName,
**extras)
except Exception:
self.handleError(record) | [
"def",
"emit",
"(",
"self",
",",
"record",
")",
":",
"try",
":",
"msg",
"=",
"self",
".",
"format",
"(",
"record",
")",
"pri",
"=",
"self",
".",
"map_priority",
"(",
"record",
".",
"levelno",
")",
"# defaults",
"extras",
"=",
"self",
".",
"_extra",
".",
"copy",
"(",
")",
"# higher priority",
"if",
"record",
".",
"exc_text",
":",
"extras",
"[",
"'EXCEPTION_TEXT'",
"]",
"=",
"record",
".",
"exc_text",
"if",
"record",
".",
"exc_info",
":",
"extras",
"[",
"'EXCEPTION_INFO'",
"]",
"=",
"record",
".",
"exc_info",
"if",
"record",
".",
"args",
":",
"extras",
"[",
"'CODE_ARGS'",
"]",
"=",
"str",
"(",
"record",
".",
"args",
")",
"# explicit arguments — highest priority",
"extras",
".",
"update",
"(",
"record",
".",
"__dict__",
")",
"self",
".",
"send",
"(",
"msg",
",",
"PRIORITY",
"=",
"format",
"(",
"pri",
")",
",",
"LOGGER",
"=",
"record",
".",
"name",
",",
"THREAD_NAME",
"=",
"record",
".",
"threadName",
",",
"PROCESS_NAME",
"=",
"record",
".",
"processName",
",",
"CODE_FILE",
"=",
"record",
".",
"pathname",
",",
"CODE_LINE",
"=",
"record",
".",
"lineno",
",",
"CODE_FUNC",
"=",
"record",
".",
"funcName",
",",
"*",
"*",
"extras",
")",
"except",
"Exception",
":",
"self",
".",
"handleError",
"(",
"record",
")"
] | Write `record` as a journal event.
MESSAGE is taken from the message provided by the user, and PRIORITY,
LOGGER, THREAD_NAME, CODE_{FILE,LINE,FUNC} fields are appended
automatically. In addition, record.MESSAGE_ID will be used if present. | [
"Write",
"record",
"as",
"a",
"journal",
"event",
"."
] | c06c5d401d60ae9175367be0797a6c2b562ac5ba | https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/journal.py#L568-L604 | train |
systemd/python-systemd | systemd/daemon.py | listen_fds | def listen_fds(unset_environment=True):
"""Return a list of socket activated descriptors
Example::
(in primary window)
$ systemd-activate -l 2000 python3 -c \\
'from systemd.daemon import listen_fds; print(listen_fds())'
(in another window)
$ telnet localhost 2000
(in primary window)
...
Execing python3 (...)
[3]
"""
num = _listen_fds(unset_environment)
return list(range(LISTEN_FDS_START, LISTEN_FDS_START + num)) | python | def listen_fds(unset_environment=True):
"""Return a list of socket activated descriptors
Example::
(in primary window)
$ systemd-activate -l 2000 python3 -c \\
'from systemd.daemon import listen_fds; print(listen_fds())'
(in another window)
$ telnet localhost 2000
(in primary window)
...
Execing python3 (...)
[3]
"""
num = _listen_fds(unset_environment)
return list(range(LISTEN_FDS_START, LISTEN_FDS_START + num)) | [
"def",
"listen_fds",
"(",
"unset_environment",
"=",
"True",
")",
":",
"num",
"=",
"_listen_fds",
"(",
"unset_environment",
")",
"return",
"list",
"(",
"range",
"(",
"LISTEN_FDS_START",
",",
"LISTEN_FDS_START",
"+",
"num",
")",
")"
] | Return a list of socket activated descriptors
Example::
(in primary window)
$ systemd-activate -l 2000 python3 -c \\
'from systemd.daemon import listen_fds; print(listen_fds())'
(in another window)
$ telnet localhost 2000
(in primary window)
...
Execing python3 (...)
[3] | [
"Return",
"a",
"list",
"of",
"socket",
"activated",
"descriptors"
] | c06c5d401d60ae9175367be0797a6c2b562ac5ba | https://github.com/systemd/python-systemd/blob/c06c5d401d60ae9175367be0797a6c2b562ac5ba/systemd/daemon.py#L55-L71 | train |
earl/beanstalkc | beanstalkc.py | Connection.connect | def connect(self):
"""Connect to beanstalkd server."""
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.settimeout(self._connect_timeout)
SocketError.wrap(self._socket.connect, (self.host, self.port))
self._socket.settimeout(None)
self._socket_file = self._socket.makefile('rb') | python | def connect(self):
"""Connect to beanstalkd server."""
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.settimeout(self._connect_timeout)
SocketError.wrap(self._socket.connect, (self.host, self.port))
self._socket.settimeout(None)
self._socket_file = self._socket.makefile('rb') | [
"def",
"connect",
"(",
"self",
")",
":",
"self",
".",
"_socket",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"self",
".",
"_socket",
".",
"settimeout",
"(",
"self",
".",
"_connect_timeout",
")",
"SocketError",
".",
"wrap",
"(",
"self",
".",
"_socket",
".",
"connect",
",",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
")",
")",
"self",
".",
"_socket",
".",
"settimeout",
"(",
"None",
")",
"self",
".",
"_socket_file",
"=",
"self",
".",
"_socket",
".",
"makefile",
"(",
"'rb'",
")"
] | Connect to beanstalkd server. | [
"Connect",
"to",
"beanstalkd",
"server",
"."
] | 70c2ffc41cc84b0a1ae557e470e1db89b7b61023 | https://github.com/earl/beanstalkc/blob/70c2ffc41cc84b0a1ae557e470e1db89b7b61023/beanstalkc.py#L71-L77 | train |
earl/beanstalkc | beanstalkc.py | Connection.close | def close(self):
"""Close connection to server."""
try:
self._socket.sendall('quit\r\n')
except socket.error:
pass
try:
self._socket.close()
except socket.error:
pass | python | def close(self):
"""Close connection to server."""
try:
self._socket.sendall('quit\r\n')
except socket.error:
pass
try:
self._socket.close()
except socket.error:
pass | [
"def",
"close",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"_socket",
".",
"sendall",
"(",
"'quit\\r\\n'",
")",
"except",
"socket",
".",
"error",
":",
"pass",
"try",
":",
"self",
".",
"_socket",
".",
"close",
"(",
")",
"except",
"socket",
".",
"error",
":",
"pass"
] | Close connection to server. | [
"Close",
"connection",
"to",
"server",
"."
] | 70c2ffc41cc84b0a1ae557e470e1db89b7b61023 | https://github.com/earl/beanstalkc/blob/70c2ffc41cc84b0a1ae557e470e1db89b7b61023/beanstalkc.py#L79-L88 | train |
earl/beanstalkc | beanstalkc.py | Connection.put | def put(self, body, priority=DEFAULT_PRIORITY, delay=0, ttr=DEFAULT_TTR):
"""Put a job into the current tube. Returns job id."""
assert isinstance(body, str), 'Job body must be a str instance'
jid = self._interact_value('put %d %d %d %d\r\n%s\r\n' % (
priority, delay, ttr, len(body), body),
['INSERTED'],
['JOB_TOO_BIG', 'BURIED', 'DRAINING'])
return int(jid) | python | def put(self, body, priority=DEFAULT_PRIORITY, delay=0, ttr=DEFAULT_TTR):
"""Put a job into the current tube. Returns job id."""
assert isinstance(body, str), 'Job body must be a str instance'
jid = self._interact_value('put %d %d %d %d\r\n%s\r\n' % (
priority, delay, ttr, len(body), body),
['INSERTED'],
['JOB_TOO_BIG', 'BURIED', 'DRAINING'])
return int(jid) | [
"def",
"put",
"(",
"self",
",",
"body",
",",
"priority",
"=",
"DEFAULT_PRIORITY",
",",
"delay",
"=",
"0",
",",
"ttr",
"=",
"DEFAULT_TTR",
")",
":",
"assert",
"isinstance",
"(",
"body",
",",
"str",
")",
",",
"'Job body must be a str instance'",
"jid",
"=",
"self",
".",
"_interact_value",
"(",
"'put %d %d %d %d\\r\\n%s\\r\\n'",
"%",
"(",
"priority",
",",
"delay",
",",
"ttr",
",",
"len",
"(",
"body",
")",
",",
"body",
")",
",",
"[",
"'INSERTED'",
"]",
",",
"[",
"'JOB_TOO_BIG'",
",",
"'BURIED'",
",",
"'DRAINING'",
"]",
")",
"return",
"int",
"(",
"jid",
")"
] | Put a job into the current tube. Returns job id. | [
"Put",
"a",
"job",
"into",
"the",
"current",
"tube",
".",
"Returns",
"job",
"id",
"."
] | 70c2ffc41cc84b0a1ae557e470e1db89b7b61023 | https://github.com/earl/beanstalkc/blob/70c2ffc41cc84b0a1ae557e470e1db89b7b61023/beanstalkc.py#L140-L147 | train |
earl/beanstalkc | beanstalkc.py | Connection.reserve | def reserve(self, timeout=None):
"""Reserve a job from one of the watched tubes, with optional timeout
in seconds. Returns a Job object, or None if the request times out."""
if timeout is not None:
command = 'reserve-with-timeout %d\r\n' % timeout
else:
command = 'reserve\r\n'
try:
return self._interact_job(command,
['RESERVED'],
['DEADLINE_SOON', 'TIMED_OUT'])
except CommandFailed:
exc = sys.exc_info()[1]
_, status, results = exc.args
if status == 'TIMED_OUT':
return None
elif status == 'DEADLINE_SOON':
raise DeadlineSoon(results) | python | def reserve(self, timeout=None):
"""Reserve a job from one of the watched tubes, with optional timeout
in seconds. Returns a Job object, or None if the request times out."""
if timeout is not None:
command = 'reserve-with-timeout %d\r\n' % timeout
else:
command = 'reserve\r\n'
try:
return self._interact_job(command,
['RESERVED'],
['DEADLINE_SOON', 'TIMED_OUT'])
except CommandFailed:
exc = sys.exc_info()[1]
_, status, results = exc.args
if status == 'TIMED_OUT':
return None
elif status == 'DEADLINE_SOON':
raise DeadlineSoon(results) | [
"def",
"reserve",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"timeout",
"is",
"not",
"None",
":",
"command",
"=",
"'reserve-with-timeout %d\\r\\n'",
"%",
"timeout",
"else",
":",
"command",
"=",
"'reserve\\r\\n'",
"try",
":",
"return",
"self",
".",
"_interact_job",
"(",
"command",
",",
"[",
"'RESERVED'",
"]",
",",
"[",
"'DEADLINE_SOON'",
",",
"'TIMED_OUT'",
"]",
")",
"except",
"CommandFailed",
":",
"exc",
"=",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
"_",
",",
"status",
",",
"results",
"=",
"exc",
".",
"args",
"if",
"status",
"==",
"'TIMED_OUT'",
":",
"return",
"None",
"elif",
"status",
"==",
"'DEADLINE_SOON'",
":",
"raise",
"DeadlineSoon",
"(",
"results",
")"
] | Reserve a job from one of the watched tubes, with optional timeout
in seconds. Returns a Job object, or None if the request times out. | [
"Reserve",
"a",
"job",
"from",
"one",
"of",
"the",
"watched",
"tubes",
"with",
"optional",
"timeout",
"in",
"seconds",
".",
"Returns",
"a",
"Job",
"object",
"or",
"None",
"if",
"the",
"request",
"times",
"out",
"."
] | 70c2ffc41cc84b0a1ae557e470e1db89b7b61023 | https://github.com/earl/beanstalkc/blob/70c2ffc41cc84b0a1ae557e470e1db89b7b61023/beanstalkc.py#L149-L166 | train |
earl/beanstalkc | beanstalkc.py | Connection.release | def release(self, jid, priority=DEFAULT_PRIORITY, delay=0):
"""Release a reserved job back into the ready queue."""
self._interact('release %d %d %d\r\n' % (jid, priority, delay),
['RELEASED', 'BURIED'],
['NOT_FOUND']) | python | def release(self, jid, priority=DEFAULT_PRIORITY, delay=0):
"""Release a reserved job back into the ready queue."""
self._interact('release %d %d %d\r\n' % (jid, priority, delay),
['RELEASED', 'BURIED'],
['NOT_FOUND']) | [
"def",
"release",
"(",
"self",
",",
"jid",
",",
"priority",
"=",
"DEFAULT_PRIORITY",
",",
"delay",
"=",
"0",
")",
":",
"self",
".",
"_interact",
"(",
"'release %d %d %d\\r\\n'",
"%",
"(",
"jid",
",",
"priority",
",",
"delay",
")",
",",
"[",
"'RELEASED'",
",",
"'BURIED'",
"]",
",",
"[",
"'NOT_FOUND'",
"]",
")"
] | Release a reserved job back into the ready queue. | [
"Release",
"a",
"reserved",
"job",
"back",
"into",
"the",
"ready",
"queue",
"."
] | 70c2ffc41cc84b0a1ae557e470e1db89b7b61023 | https://github.com/earl/beanstalkc/blob/70c2ffc41cc84b0a1ae557e470e1db89b7b61023/beanstalkc.py#L244-L248 | train |
earl/beanstalkc | beanstalkc.py | Job.delete | def delete(self):
"""Delete this job."""
self.conn.delete(self.jid)
self.reserved = False | python | def delete(self):
"""Delete this job."""
self.conn.delete(self.jid)
self.reserved = False | [
"def",
"delete",
"(",
"self",
")",
":",
"self",
".",
"conn",
".",
"delete",
"(",
"self",
".",
"jid",
")",
"self",
".",
"reserved",
"=",
"False"
] | Delete this job. | [
"Delete",
"this",
"job",
"."
] | 70c2ffc41cc84b0a1ae557e470e1db89b7b61023 | https://github.com/earl/beanstalkc/blob/70c2ffc41cc84b0a1ae557e470e1db89b7b61023/beanstalkc.py#L283-L286 | train |
earl/beanstalkc | beanstalkc.py | Job.release | def release(self, priority=None, delay=0):
"""Release this job back into the ready queue."""
if self.reserved:
self.conn.release(self.jid, priority or self._priority(), delay)
self.reserved = False | python | def release(self, priority=None, delay=0):
"""Release this job back into the ready queue."""
if self.reserved:
self.conn.release(self.jid, priority or self._priority(), delay)
self.reserved = False | [
"def",
"release",
"(",
"self",
",",
"priority",
"=",
"None",
",",
"delay",
"=",
"0",
")",
":",
"if",
"self",
".",
"reserved",
":",
"self",
".",
"conn",
".",
"release",
"(",
"self",
".",
"jid",
",",
"priority",
"or",
"self",
".",
"_priority",
"(",
")",
",",
"delay",
")",
"self",
".",
"reserved",
"=",
"False"
] | Release this job back into the ready queue. | [
"Release",
"this",
"job",
"back",
"into",
"the",
"ready",
"queue",
"."
] | 70c2ffc41cc84b0a1ae557e470e1db89b7b61023 | https://github.com/earl/beanstalkc/blob/70c2ffc41cc84b0a1ae557e470e1db89b7b61023/beanstalkc.py#L288-L292 | train |
earl/beanstalkc | beanstalkc.py | Job.bury | def bury(self, priority=None):
"""Bury this job."""
if self.reserved:
self.conn.bury(self.jid, priority or self._priority())
self.reserved = False | python | def bury(self, priority=None):
"""Bury this job."""
if self.reserved:
self.conn.bury(self.jid, priority or self._priority())
self.reserved = False | [
"def",
"bury",
"(",
"self",
",",
"priority",
"=",
"None",
")",
":",
"if",
"self",
".",
"reserved",
":",
"self",
".",
"conn",
".",
"bury",
"(",
"self",
".",
"jid",
",",
"priority",
"or",
"self",
".",
"_priority",
"(",
")",
")",
"self",
".",
"reserved",
"=",
"False"
] | Bury this job. | [
"Bury",
"this",
"job",
"."
] | 70c2ffc41cc84b0a1ae557e470e1db89b7b61023 | https://github.com/earl/beanstalkc/blob/70c2ffc41cc84b0a1ae557e470e1db89b7b61023/beanstalkc.py#L294-L298 | train |
fatiando/pooch | pooch/core.py | Pooch.abspath | def abspath(self):
"Absolute path to the local storage"
return Path(os.path.abspath(os.path.expanduser(str(self.path)))) | python | def abspath(self):
"Absolute path to the local storage"
return Path(os.path.abspath(os.path.expanduser(str(self.path)))) | [
"def",
"abspath",
"(",
"self",
")",
":",
"return",
"Path",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"str",
"(",
"self",
".",
"path",
")",
")",
")",
")"
] | Absolute path to the local storage | [
"Absolute",
"path",
"to",
"the",
"local",
"storage"
] | fc38601d2d32809b4df75d0715922025740c869a | https://github.com/fatiando/pooch/blob/fc38601d2d32809b4df75d0715922025740c869a/pooch/core.py#L221-L223 | train |
fatiando/pooch | pooch/core.py | Pooch.fetch | def fetch(self, fname, processor=None):
"""
Get the absolute path to a file in the local storage.
If it's not in the local storage, it will be downloaded. If the hash of the file
in local storage doesn't match the one in the registry, will download a new copy
of the file. This is considered a sign that the file was updated in the remote
storage. If the hash of the downloaded file still doesn't match the one in the
registry, will raise an exception to warn of possible file corruption.
Post-processing actions sometimes need to be taken on downloaded files
(unzipping, conversion to a more efficient format, etc). If these actions are
time or memory consuming, it would be best to do this only once when the file is
actually downloaded. Use the *processor* argument to specify a function that is
executed after the downloaded (if required) to perform these actions. See below.
Parameters
----------
fname : str
The file name (relative to the *base_url* of the remote data storage) to
fetch from the local storage.
processor : None or callable
If not None, then a function (or callable object) that will be called
before returning the full path and after the file has been downloaded (if
required). See below.
Returns
-------
full_path : str
The absolute path (including the file name) of the file in the local
storage.
Notes
-----
Processor functions should have the following format:
.. code:: python
def myprocessor(fname, action, update):
'''
Processes the downloaded file and returns a new file name.
The function **must** take as arguments (in order):
fname : str
The full path of the file in the local data storage
action : str
Either:
"download" (file doesn't exist and will be downloaded),
"update" (file is outdated and will be downloaded), or
"fetch" (file exists and is updated so no download is necessary).
pooch : pooch.Pooch
The instance of the Pooch class that is calling this function.
The return value can be anything but is usually a full path to a file
(or list of files). This is what will be returned by *fetch* in place of
the original file path.
'''
...
"""
self._assert_file_in_registry(fname)
# Create the local data directory if it doesn't already exist
if not self.abspath.exists():
os.makedirs(str(self.abspath))
full_path = self.abspath / fname
in_storage = full_path.exists()
if not in_storage:
action = "download"
elif in_storage and file_hash(str(full_path)) != self.registry[fname]:
action = "update"
else:
action = "fetch"
if action in ("download", "update"):
action_word = dict(download="Downloading", update="Updating")
warn(
"{} data file '{}' from remote data store '{}' to '{}'.".format(
action_word[action], fname, self.get_url(fname), str(self.path)
)
)
self._download_file(fname)
if processor is not None:
return processor(str(full_path), action, self)
return str(full_path) | python | def fetch(self, fname, processor=None):
"""
Get the absolute path to a file in the local storage.
If it's not in the local storage, it will be downloaded. If the hash of the file
in local storage doesn't match the one in the registry, will download a new copy
of the file. This is considered a sign that the file was updated in the remote
storage. If the hash of the downloaded file still doesn't match the one in the
registry, will raise an exception to warn of possible file corruption.
Post-processing actions sometimes need to be taken on downloaded files
(unzipping, conversion to a more efficient format, etc). If these actions are
time or memory consuming, it would be best to do this only once when the file is
actually downloaded. Use the *processor* argument to specify a function that is
executed after the downloaded (if required) to perform these actions. See below.
Parameters
----------
fname : str
The file name (relative to the *base_url* of the remote data storage) to
fetch from the local storage.
processor : None or callable
If not None, then a function (or callable object) that will be called
before returning the full path and after the file has been downloaded (if
required). See below.
Returns
-------
full_path : str
The absolute path (including the file name) of the file in the local
storage.
Notes
-----
Processor functions should have the following format:
.. code:: python
def myprocessor(fname, action, update):
'''
Processes the downloaded file and returns a new file name.
The function **must** take as arguments (in order):
fname : str
The full path of the file in the local data storage
action : str
Either:
"download" (file doesn't exist and will be downloaded),
"update" (file is outdated and will be downloaded), or
"fetch" (file exists and is updated so no download is necessary).
pooch : pooch.Pooch
The instance of the Pooch class that is calling this function.
The return value can be anything but is usually a full path to a file
(or list of files). This is what will be returned by *fetch* in place of
the original file path.
'''
...
"""
self._assert_file_in_registry(fname)
# Create the local data directory if it doesn't already exist
if not self.abspath.exists():
os.makedirs(str(self.abspath))
full_path = self.abspath / fname
in_storage = full_path.exists()
if not in_storage:
action = "download"
elif in_storage and file_hash(str(full_path)) != self.registry[fname]:
action = "update"
else:
action = "fetch"
if action in ("download", "update"):
action_word = dict(download="Downloading", update="Updating")
warn(
"{} data file '{}' from remote data store '{}' to '{}'.".format(
action_word[action], fname, self.get_url(fname), str(self.path)
)
)
self._download_file(fname)
if processor is not None:
return processor(str(full_path), action, self)
return str(full_path) | [
"def",
"fetch",
"(",
"self",
",",
"fname",
",",
"processor",
"=",
"None",
")",
":",
"self",
".",
"_assert_file_in_registry",
"(",
"fname",
")",
"# Create the local data directory if it doesn't already exist",
"if",
"not",
"self",
".",
"abspath",
".",
"exists",
"(",
")",
":",
"os",
".",
"makedirs",
"(",
"str",
"(",
"self",
".",
"abspath",
")",
")",
"full_path",
"=",
"self",
".",
"abspath",
"/",
"fname",
"in_storage",
"=",
"full_path",
".",
"exists",
"(",
")",
"if",
"not",
"in_storage",
":",
"action",
"=",
"\"download\"",
"elif",
"in_storage",
"and",
"file_hash",
"(",
"str",
"(",
"full_path",
")",
")",
"!=",
"self",
".",
"registry",
"[",
"fname",
"]",
":",
"action",
"=",
"\"update\"",
"else",
":",
"action",
"=",
"\"fetch\"",
"if",
"action",
"in",
"(",
"\"download\"",
",",
"\"update\"",
")",
":",
"action_word",
"=",
"dict",
"(",
"download",
"=",
"\"Downloading\"",
",",
"update",
"=",
"\"Updating\"",
")",
"warn",
"(",
"\"{} data file '{}' from remote data store '{}' to '{}'.\"",
".",
"format",
"(",
"action_word",
"[",
"action",
"]",
",",
"fname",
",",
"self",
".",
"get_url",
"(",
"fname",
")",
",",
"str",
"(",
"self",
".",
"path",
")",
")",
")",
"self",
".",
"_download_file",
"(",
"fname",
")",
"if",
"processor",
"is",
"not",
"None",
":",
"return",
"processor",
"(",
"str",
"(",
"full_path",
")",
",",
"action",
",",
"self",
")",
"return",
"str",
"(",
"full_path",
")"
] | Get the absolute path to a file in the local storage.
If it's not in the local storage, it will be downloaded. If the hash of the file
in local storage doesn't match the one in the registry, will download a new copy
of the file. This is considered a sign that the file was updated in the remote
storage. If the hash of the downloaded file still doesn't match the one in the
registry, will raise an exception to warn of possible file corruption.
Post-processing actions sometimes need to be taken on downloaded files
(unzipping, conversion to a more efficient format, etc). If these actions are
time or memory consuming, it would be best to do this only once when the file is
actually downloaded. Use the *processor* argument to specify a function that is
executed after the downloaded (if required) to perform these actions. See below.
Parameters
----------
fname : str
The file name (relative to the *base_url* of the remote data storage) to
fetch from the local storage.
processor : None or callable
If not None, then a function (or callable object) that will be called
before returning the full path and after the file has been downloaded (if
required). See below.
Returns
-------
full_path : str
The absolute path (including the file name) of the file in the local
storage.
Notes
-----
Processor functions should have the following format:
.. code:: python
def myprocessor(fname, action, update):
'''
Processes the downloaded file and returns a new file name.
The function **must** take as arguments (in order):
fname : str
The full path of the file in the local data storage
action : str
Either:
"download" (file doesn't exist and will be downloaded),
"update" (file is outdated and will be downloaded), or
"fetch" (file exists and is updated so no download is necessary).
pooch : pooch.Pooch
The instance of the Pooch class that is calling this function.
The return value can be anything but is usually a full path to a file
(or list of files). This is what will be returned by *fetch* in place of
the original file path.
'''
... | [
"Get",
"the",
"absolute",
"path",
"to",
"a",
"file",
"in",
"the",
"local",
"storage",
"."
] | fc38601d2d32809b4df75d0715922025740c869a | https://github.com/fatiando/pooch/blob/fc38601d2d32809b4df75d0715922025740c869a/pooch/core.py#L230-L320 | train |
fatiando/pooch | pooch/core.py | Pooch.get_url | def get_url(self, fname):
"""
Get the full URL to download a file in the registry.
Parameters
----------
fname : str
The file name (relative to the *base_url* of the remote data storage) to
fetch from the local storage.
"""
self._assert_file_in_registry(fname)
return self.urls.get(fname, "".join([self.base_url, fname])) | python | def get_url(self, fname):
"""
Get the full URL to download a file in the registry.
Parameters
----------
fname : str
The file name (relative to the *base_url* of the remote data storage) to
fetch from the local storage.
"""
self._assert_file_in_registry(fname)
return self.urls.get(fname, "".join([self.base_url, fname])) | [
"def",
"get_url",
"(",
"self",
",",
"fname",
")",
":",
"self",
".",
"_assert_file_in_registry",
"(",
"fname",
")",
"return",
"self",
".",
"urls",
".",
"get",
"(",
"fname",
",",
"\"\"",
".",
"join",
"(",
"[",
"self",
".",
"base_url",
",",
"fname",
"]",
")",
")"
] | Get the full URL to download a file in the registry.
Parameters
----------
fname : str
The file name (relative to the *base_url* of the remote data storage) to
fetch from the local storage. | [
"Get",
"the",
"full",
"URL",
"to",
"download",
"a",
"file",
"in",
"the",
"registry",
"."
] | fc38601d2d32809b4df75d0715922025740c869a | https://github.com/fatiando/pooch/blob/fc38601d2d32809b4df75d0715922025740c869a/pooch/core.py#L329-L341 | train |
fatiando/pooch | pooch/core.py | Pooch._download_file | def _download_file(self, fname):
"""
Download a file from the remote data storage to the local storage.
Used by :meth:`~pooch.Pooch.fetch` to do the actual downloading.
Parameters
----------
fname : str
The file name (relative to the *base_url* of the remote data storage) to
fetch from the local storage.
Raises
------
ValueError
If the hash of the downloaded file doesn't match the hash in the registry.
"""
destination = self.abspath / fname
source = self.get_url(fname)
# Stream the file to a temporary so that we can safely check its hash before
# overwriting the original
fout = tempfile.NamedTemporaryFile(delete=False, dir=str(self.abspath))
try:
with fout:
response = requests.get(source, stream=True)
response.raise_for_status()
for chunk in response.iter_content(chunk_size=1024):
if chunk:
fout.write(chunk)
tmphash = file_hash(fout.name)
if tmphash != self.registry[fname]:
raise ValueError(
"Hash of downloaded file '{}' doesn't match the entry in the registry:"
" Expected '{}' and got '{}'.".format(
fout.name, self.registry[fname], tmphash
)
)
# Make sure the parent directory exists in case the file is in a subdirectory.
# Otherwise, move will cause an error.
if not os.path.exists(str(destination.parent)):
os.makedirs(str(destination.parent))
shutil.move(fout.name, str(destination))
except Exception:
os.remove(fout.name)
raise | python | def _download_file(self, fname):
"""
Download a file from the remote data storage to the local storage.
Used by :meth:`~pooch.Pooch.fetch` to do the actual downloading.
Parameters
----------
fname : str
The file name (relative to the *base_url* of the remote data storage) to
fetch from the local storage.
Raises
------
ValueError
If the hash of the downloaded file doesn't match the hash in the registry.
"""
destination = self.abspath / fname
source = self.get_url(fname)
# Stream the file to a temporary so that we can safely check its hash before
# overwriting the original
fout = tempfile.NamedTemporaryFile(delete=False, dir=str(self.abspath))
try:
with fout:
response = requests.get(source, stream=True)
response.raise_for_status()
for chunk in response.iter_content(chunk_size=1024):
if chunk:
fout.write(chunk)
tmphash = file_hash(fout.name)
if tmphash != self.registry[fname]:
raise ValueError(
"Hash of downloaded file '{}' doesn't match the entry in the registry:"
" Expected '{}' and got '{}'.".format(
fout.name, self.registry[fname], tmphash
)
)
# Make sure the parent directory exists in case the file is in a subdirectory.
# Otherwise, move will cause an error.
if not os.path.exists(str(destination.parent)):
os.makedirs(str(destination.parent))
shutil.move(fout.name, str(destination))
except Exception:
os.remove(fout.name)
raise | [
"def",
"_download_file",
"(",
"self",
",",
"fname",
")",
":",
"destination",
"=",
"self",
".",
"abspath",
"/",
"fname",
"source",
"=",
"self",
".",
"get_url",
"(",
"fname",
")",
"# Stream the file to a temporary so that we can safely check its hash before",
"# overwriting the original",
"fout",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"delete",
"=",
"False",
",",
"dir",
"=",
"str",
"(",
"self",
".",
"abspath",
")",
")",
"try",
":",
"with",
"fout",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"source",
",",
"stream",
"=",
"True",
")",
"response",
".",
"raise_for_status",
"(",
")",
"for",
"chunk",
"in",
"response",
".",
"iter_content",
"(",
"chunk_size",
"=",
"1024",
")",
":",
"if",
"chunk",
":",
"fout",
".",
"write",
"(",
"chunk",
")",
"tmphash",
"=",
"file_hash",
"(",
"fout",
".",
"name",
")",
"if",
"tmphash",
"!=",
"self",
".",
"registry",
"[",
"fname",
"]",
":",
"raise",
"ValueError",
"(",
"\"Hash of downloaded file '{}' doesn't match the entry in the registry:\"",
"\" Expected '{}' and got '{}'.\"",
".",
"format",
"(",
"fout",
".",
"name",
",",
"self",
".",
"registry",
"[",
"fname",
"]",
",",
"tmphash",
")",
")",
"# Make sure the parent directory exists in case the file is in a subdirectory.",
"# Otherwise, move will cause an error.",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"str",
"(",
"destination",
".",
"parent",
")",
")",
":",
"os",
".",
"makedirs",
"(",
"str",
"(",
"destination",
".",
"parent",
")",
")",
"shutil",
".",
"move",
"(",
"fout",
".",
"name",
",",
"str",
"(",
"destination",
")",
")",
"except",
"Exception",
":",
"os",
".",
"remove",
"(",
"fout",
".",
"name",
")",
"raise"
] | Download a file from the remote data storage to the local storage.
Used by :meth:`~pooch.Pooch.fetch` to do the actual downloading.
Parameters
----------
fname : str
The file name (relative to the *base_url* of the remote data storage) to
fetch from the local storage.
Raises
------
ValueError
If the hash of the downloaded file doesn't match the hash in the registry. | [
"Download",
"a",
"file",
"from",
"the",
"remote",
"data",
"storage",
"to",
"the",
"local",
"storage",
"."
] | fc38601d2d32809b4df75d0715922025740c869a | https://github.com/fatiando/pooch/blob/fc38601d2d32809b4df75d0715922025740c869a/pooch/core.py#L343-L388 | train |
fatiando/pooch | pooch/core.py | Pooch.load_registry | def load_registry(self, fname):
"""
Load entries from a file and add them to the registry.
Use this if you are managing many files.
Each line of the file should have file name and its SHA256 hash separate by a
space. Only one file per line is allowed. Custom download URLs for individual
files can be specified as a third element on the line.
Parameters
----------
fname : str
File name and path to the registry file.
"""
with open(fname) as fin:
for linenum, line in enumerate(fin):
elements = line.strip().split()
if len(elements) > 3 or len(elements) < 2:
raise IOError(
"Expected 2 or 3 elements in line {} but got {}.".format(
linenum, len(elements)
)
)
file_name = elements[0]
file_sha256 = elements[1]
if len(elements) == 3:
file_url = elements[2]
self.urls[file_name] = file_url
self.registry[file_name] = file_sha256 | python | def load_registry(self, fname):
"""
Load entries from a file and add them to the registry.
Use this if you are managing many files.
Each line of the file should have file name and its SHA256 hash separate by a
space. Only one file per line is allowed. Custom download URLs for individual
files can be specified as a third element on the line.
Parameters
----------
fname : str
File name and path to the registry file.
"""
with open(fname) as fin:
for linenum, line in enumerate(fin):
elements = line.strip().split()
if len(elements) > 3 or len(elements) < 2:
raise IOError(
"Expected 2 or 3 elements in line {} but got {}.".format(
linenum, len(elements)
)
)
file_name = elements[0]
file_sha256 = elements[1]
if len(elements) == 3:
file_url = elements[2]
self.urls[file_name] = file_url
self.registry[file_name] = file_sha256 | [
"def",
"load_registry",
"(",
"self",
",",
"fname",
")",
":",
"with",
"open",
"(",
"fname",
")",
"as",
"fin",
":",
"for",
"linenum",
",",
"line",
"in",
"enumerate",
"(",
"fin",
")",
":",
"elements",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"if",
"len",
"(",
"elements",
")",
">",
"3",
"or",
"len",
"(",
"elements",
")",
"<",
"2",
":",
"raise",
"IOError",
"(",
"\"Expected 2 or 3 elements in line {} but got {}.\"",
".",
"format",
"(",
"linenum",
",",
"len",
"(",
"elements",
")",
")",
")",
"file_name",
"=",
"elements",
"[",
"0",
"]",
"file_sha256",
"=",
"elements",
"[",
"1",
"]",
"if",
"len",
"(",
"elements",
")",
"==",
"3",
":",
"file_url",
"=",
"elements",
"[",
"2",
"]",
"self",
".",
"urls",
"[",
"file_name",
"]",
"=",
"file_url",
"self",
".",
"registry",
"[",
"file_name",
"]",
"=",
"file_sha256"
] | Load entries from a file and add them to the registry.
Use this if you are managing many files.
Each line of the file should have file name and its SHA256 hash separate by a
space. Only one file per line is allowed. Custom download URLs for individual
files can be specified as a third element on the line.
Parameters
----------
fname : str
File name and path to the registry file. | [
"Load",
"entries",
"from",
"a",
"file",
"and",
"add",
"them",
"to",
"the",
"registry",
"."
] | fc38601d2d32809b4df75d0715922025740c869a | https://github.com/fatiando/pooch/blob/fc38601d2d32809b4df75d0715922025740c869a/pooch/core.py#L390-L420 | train |
fatiando/pooch | pooch/core.py | Pooch.is_available | def is_available(self, fname):
"""
Check availability of a remote file without downloading it.
Use this method when working with large files to check if they are available for
download.
Parameters
----------
fname : str
The file name (relative to the *base_url* of the remote data storage) to
fetch from the local storage.
Returns
-------
status : bool
True if the file is available for download. False otherwise.
"""
self._assert_file_in_registry(fname)
source = self.get_url(fname)
response = requests.head(source, allow_redirects=True)
return bool(response.status_code == 200) | python | def is_available(self, fname):
"""
Check availability of a remote file without downloading it.
Use this method when working with large files to check if they are available for
download.
Parameters
----------
fname : str
The file name (relative to the *base_url* of the remote data storage) to
fetch from the local storage.
Returns
-------
status : bool
True if the file is available for download. False otherwise.
"""
self._assert_file_in_registry(fname)
source = self.get_url(fname)
response = requests.head(source, allow_redirects=True)
return bool(response.status_code == 200) | [
"def",
"is_available",
"(",
"self",
",",
"fname",
")",
":",
"self",
".",
"_assert_file_in_registry",
"(",
"fname",
")",
"source",
"=",
"self",
".",
"get_url",
"(",
"fname",
")",
"response",
"=",
"requests",
".",
"head",
"(",
"source",
",",
"allow_redirects",
"=",
"True",
")",
"return",
"bool",
"(",
"response",
".",
"status_code",
"==",
"200",
")"
] | Check availability of a remote file without downloading it.
Use this method when working with large files to check if they are available for
download.
Parameters
----------
fname : str
The file name (relative to the *base_url* of the remote data storage) to
fetch from the local storage.
Returns
-------
status : bool
True if the file is available for download. False otherwise. | [
"Check",
"availability",
"of",
"a",
"remote",
"file",
"without",
"downloading",
"it",
"."
] | fc38601d2d32809b4df75d0715922025740c869a | https://github.com/fatiando/pooch/blob/fc38601d2d32809b4df75d0715922025740c869a/pooch/core.py#L422-L444 | train |
fatiando/pooch | pooch/utils.py | file_hash | def file_hash(fname):
"""
Calculate the SHA256 hash of a given file.
Useful for checking if a file has changed or been corrupted.
Parameters
----------
fname : str
The name of the file.
Returns
-------
hash : str
The hash of the file.
Examples
--------
>>> fname = "test-file-for-hash.txt"
>>> with open(fname, "w") as f:
... __ = f.write("content of the file")
>>> print(file_hash(fname))
0fc74468e6a9a829f103d069aeb2bb4f8646bad58bf146bb0e3379b759ec4a00
>>> import os
>>> os.remove(fname)
"""
# Calculate the hash in chunks to avoid overloading the memory
chunksize = 65536
hasher = hashlib.sha256()
with open(fname, "rb") as fin:
buff = fin.read(chunksize)
while buff:
hasher.update(buff)
buff = fin.read(chunksize)
return hasher.hexdigest() | python | def file_hash(fname):
"""
Calculate the SHA256 hash of a given file.
Useful for checking if a file has changed or been corrupted.
Parameters
----------
fname : str
The name of the file.
Returns
-------
hash : str
The hash of the file.
Examples
--------
>>> fname = "test-file-for-hash.txt"
>>> with open(fname, "w") as f:
... __ = f.write("content of the file")
>>> print(file_hash(fname))
0fc74468e6a9a829f103d069aeb2bb4f8646bad58bf146bb0e3379b759ec4a00
>>> import os
>>> os.remove(fname)
"""
# Calculate the hash in chunks to avoid overloading the memory
chunksize = 65536
hasher = hashlib.sha256()
with open(fname, "rb") as fin:
buff = fin.read(chunksize)
while buff:
hasher.update(buff)
buff = fin.read(chunksize)
return hasher.hexdigest() | [
"def",
"file_hash",
"(",
"fname",
")",
":",
"# Calculate the hash in chunks to avoid overloading the memory",
"chunksize",
"=",
"65536",
"hasher",
"=",
"hashlib",
".",
"sha256",
"(",
")",
"with",
"open",
"(",
"fname",
",",
"\"rb\"",
")",
"as",
"fin",
":",
"buff",
"=",
"fin",
".",
"read",
"(",
"chunksize",
")",
"while",
"buff",
":",
"hasher",
".",
"update",
"(",
"buff",
")",
"buff",
"=",
"fin",
".",
"read",
"(",
"chunksize",
")",
"return",
"hasher",
".",
"hexdigest",
"(",
")"
] | Calculate the SHA256 hash of a given file.
Useful for checking if a file has changed or been corrupted.
Parameters
----------
fname : str
The name of the file.
Returns
-------
hash : str
The hash of the file.
Examples
--------
>>> fname = "test-file-for-hash.txt"
>>> with open(fname, "w") as f:
... __ = f.write("content of the file")
>>> print(file_hash(fname))
0fc74468e6a9a829f103d069aeb2bb4f8646bad58bf146bb0e3379b759ec4a00
>>> import os
>>> os.remove(fname) | [
"Calculate",
"the",
"SHA256",
"hash",
"of",
"a",
"given",
"file",
"."
] | fc38601d2d32809b4df75d0715922025740c869a | https://github.com/fatiando/pooch/blob/fc38601d2d32809b4df75d0715922025740c869a/pooch/utils.py#L39-L75 | train |
fatiando/pooch | pooch/utils.py | check_version | def check_version(version, fallback="master"):
"""
Check that a version string is PEP440 compliant and there are no unreleased changes.
For example, ``version = "0.1"`` will be returned as is but
``version = "0.1+10.8dl8dh9"`` will return the fallback. This is the convention used
by `versioneer <https://github.com/warner/python-versioneer>`__ to mark that this
version is 10 commits ahead of the last release.
Parameters
----------
version : str
A version string.
fallback : str
What to return if the version string has unreleased changes.
Returns
-------
version : str
If *version* is PEP440 compliant and there are unreleased changes, then return
*version*. Otherwise, return *fallback*.
Raises
------
InvalidVersion
If *version* is not PEP440 compliant.
Examples
--------
>>> check_version("0.1")
'0.1'
>>> check_version("0.1a10")
'0.1a10'
>>> check_version("0.1+111.9hdg36")
'master'
>>> check_version("0.1+111.9hdg36", fallback="dev")
'dev'
"""
parse = Version(version)
if parse.local is not None:
return fallback
return version | python | def check_version(version, fallback="master"):
"""
Check that a version string is PEP440 compliant and there are no unreleased changes.
For example, ``version = "0.1"`` will be returned as is but
``version = "0.1+10.8dl8dh9"`` will return the fallback. This is the convention used
by `versioneer <https://github.com/warner/python-versioneer>`__ to mark that this
version is 10 commits ahead of the last release.
Parameters
----------
version : str
A version string.
fallback : str
What to return if the version string has unreleased changes.
Returns
-------
version : str
If *version* is PEP440 compliant and there are unreleased changes, then return
*version*. Otherwise, return *fallback*.
Raises
------
InvalidVersion
If *version* is not PEP440 compliant.
Examples
--------
>>> check_version("0.1")
'0.1'
>>> check_version("0.1a10")
'0.1a10'
>>> check_version("0.1+111.9hdg36")
'master'
>>> check_version("0.1+111.9hdg36", fallback="dev")
'dev'
"""
parse = Version(version)
if parse.local is not None:
return fallback
return version | [
"def",
"check_version",
"(",
"version",
",",
"fallback",
"=",
"\"master\"",
")",
":",
"parse",
"=",
"Version",
"(",
"version",
")",
"if",
"parse",
".",
"local",
"is",
"not",
"None",
":",
"return",
"fallback",
"return",
"version"
] | Check that a version string is PEP440 compliant and there are no unreleased changes.
For example, ``version = "0.1"`` will be returned as is but
``version = "0.1+10.8dl8dh9"`` will return the fallback. This is the convention used
by `versioneer <https://github.com/warner/python-versioneer>`__ to mark that this
version is 10 commits ahead of the last release.
Parameters
----------
version : str
A version string.
fallback : str
What to return if the version string has unreleased changes.
Returns
-------
version : str
If *version* is PEP440 compliant and there are unreleased changes, then return
*version*. Otherwise, return *fallback*.
Raises
------
InvalidVersion
If *version* is not PEP440 compliant.
Examples
--------
>>> check_version("0.1")
'0.1'
>>> check_version("0.1a10")
'0.1a10'
>>> check_version("0.1+111.9hdg36")
'master'
>>> check_version("0.1+111.9hdg36", fallback="dev")
'dev' | [
"Check",
"that",
"a",
"version",
"string",
"is",
"PEP440",
"compliant",
"and",
"there",
"are",
"no",
"unreleased",
"changes",
"."
] | fc38601d2d32809b4df75d0715922025740c869a | https://github.com/fatiando/pooch/blob/fc38601d2d32809b4df75d0715922025740c869a/pooch/utils.py#L78-L121 | train |
fatiando/pooch | pooch/utils.py | make_registry | def make_registry(directory, output, recursive=True):
"""
Make a registry of files and hashes for the given directory.
This is helpful if you have many files in your test dataset as it keeps you
from needing to manually update the registry.
Parameters
----------
directory : str
Directory of the test data to put in the registry. All file names in the
registry will be relative to this directory.
output : str
Name of the output registry file.
recursive : bool
If True, will recursively look for files in subdirectories of *directory*.
"""
directory = Path(directory)
if recursive:
pattern = "**/*"
else:
pattern = "*"
files = sorted(
[
str(path.relative_to(directory))
for path in directory.glob(pattern)
if path.is_file()
]
)
hashes = [file_hash(str(directory / fname)) for fname in files]
with open(output, "w") as outfile:
for fname, fhash in zip(files, hashes):
# Only use Unix separators for the registry so that we don't go insane
# dealing with file paths.
outfile.write("{} {}\n".format(fname.replace("\\", "/"), fhash)) | python | def make_registry(directory, output, recursive=True):
"""
Make a registry of files and hashes for the given directory.
This is helpful if you have many files in your test dataset as it keeps you
from needing to manually update the registry.
Parameters
----------
directory : str
Directory of the test data to put in the registry. All file names in the
registry will be relative to this directory.
output : str
Name of the output registry file.
recursive : bool
If True, will recursively look for files in subdirectories of *directory*.
"""
directory = Path(directory)
if recursive:
pattern = "**/*"
else:
pattern = "*"
files = sorted(
[
str(path.relative_to(directory))
for path in directory.glob(pattern)
if path.is_file()
]
)
hashes = [file_hash(str(directory / fname)) for fname in files]
with open(output, "w") as outfile:
for fname, fhash in zip(files, hashes):
# Only use Unix separators for the registry so that we don't go insane
# dealing with file paths.
outfile.write("{} {}\n".format(fname.replace("\\", "/"), fhash)) | [
"def",
"make_registry",
"(",
"directory",
",",
"output",
",",
"recursive",
"=",
"True",
")",
":",
"directory",
"=",
"Path",
"(",
"directory",
")",
"if",
"recursive",
":",
"pattern",
"=",
"\"**/*\"",
"else",
":",
"pattern",
"=",
"\"*\"",
"files",
"=",
"sorted",
"(",
"[",
"str",
"(",
"path",
".",
"relative_to",
"(",
"directory",
")",
")",
"for",
"path",
"in",
"directory",
".",
"glob",
"(",
"pattern",
")",
"if",
"path",
".",
"is_file",
"(",
")",
"]",
")",
"hashes",
"=",
"[",
"file_hash",
"(",
"str",
"(",
"directory",
"/",
"fname",
")",
")",
"for",
"fname",
"in",
"files",
"]",
"with",
"open",
"(",
"output",
",",
"\"w\"",
")",
"as",
"outfile",
":",
"for",
"fname",
",",
"fhash",
"in",
"zip",
"(",
"files",
",",
"hashes",
")",
":",
"# Only use Unix separators for the registry so that we don't go insane",
"# dealing with file paths.",
"outfile",
".",
"write",
"(",
"\"{} {}\\n\"",
".",
"format",
"(",
"fname",
".",
"replace",
"(",
"\"\\\\\"",
",",
"\"/\"",
")",
",",
"fhash",
")",
")"
] | Make a registry of files and hashes for the given directory.
This is helpful if you have many files in your test dataset as it keeps you
from needing to manually update the registry.
Parameters
----------
directory : str
Directory of the test data to put in the registry. All file names in the
registry will be relative to this directory.
output : str
Name of the output registry file.
recursive : bool
If True, will recursively look for files in subdirectories of *directory*. | [
"Make",
"a",
"registry",
"of",
"files",
"and",
"hashes",
"for",
"the",
"given",
"directory",
"."
] | fc38601d2d32809b4df75d0715922025740c869a | https://github.com/fatiando/pooch/blob/fc38601d2d32809b4df75d0715922025740c869a/pooch/utils.py#L124-L162 | train |
kennethreitz/omnijson | omnijson/core.py | loads | def loads(s, **kwargs):
"""Loads JSON object."""
try:
return _engine[0](s)
except _engine[2]:
# except_clause: 'except' [test ['as' NAME]] # grammar for py3x
# except_clause: 'except' [test [('as' | ',') test]] # grammar for py2x
why = sys.exc_info()[1]
raise JSONError(why) | python | def loads(s, **kwargs):
"""Loads JSON object."""
try:
return _engine[0](s)
except _engine[2]:
# except_clause: 'except' [test ['as' NAME]] # grammar for py3x
# except_clause: 'except' [test [('as' | ',') test]] # grammar for py2x
why = sys.exc_info()[1]
raise JSONError(why) | [
"def",
"loads",
"(",
"s",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"_engine",
"[",
"0",
"]",
"(",
"s",
")",
"except",
"_engine",
"[",
"2",
"]",
":",
"# except_clause: 'except' [test ['as' NAME]] # grammar for py3x",
"# except_clause: 'except' [test [('as' | ',') test]] # grammar for py2x",
"why",
"=",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
"raise",
"JSONError",
"(",
"why",
")"
] | Loads JSON object. | [
"Loads",
"JSON",
"object",
"."
] | a5890a51a59ad76f78a61f5bf91fa86b784cf694 | https://github.com/kennethreitz/omnijson/blob/a5890a51a59ad76f78a61f5bf91fa86b784cf694/omnijson/core.py#L41-L51 | train |
kennethreitz/omnijson | omnijson/core.py | dumps | def dumps(o, **kwargs):
"""Dumps JSON object."""
try:
return _engine[1](o)
except:
ExceptionClass, why = sys.exc_info()[:2]
if any([(issubclass(ExceptionClass, e)) for e in _engine[2]]):
raise JSONError(why)
else:
raise why | python | def dumps(o, **kwargs):
"""Dumps JSON object."""
try:
return _engine[1](o)
except:
ExceptionClass, why = sys.exc_info()[:2]
if any([(issubclass(ExceptionClass, e)) for e in _engine[2]]):
raise JSONError(why)
else:
raise why | [
"def",
"dumps",
"(",
"o",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"_engine",
"[",
"1",
"]",
"(",
"o",
")",
"except",
":",
"ExceptionClass",
",",
"why",
"=",
"sys",
".",
"exc_info",
"(",
")",
"[",
":",
"2",
"]",
"if",
"any",
"(",
"[",
"(",
"issubclass",
"(",
"ExceptionClass",
",",
"e",
")",
")",
"for",
"e",
"in",
"_engine",
"[",
"2",
"]",
"]",
")",
":",
"raise",
"JSONError",
"(",
"why",
")",
"else",
":",
"raise",
"why"
] | Dumps JSON object. | [
"Dumps",
"JSON",
"object",
"."
] | a5890a51a59ad76f78a61f5bf91fa86b784cf694 | https://github.com/kennethreitz/omnijson/blob/a5890a51a59ad76f78a61f5bf91fa86b784cf694/omnijson/core.py#L54-L66 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/pt.py | from_table | def from_table(table, engine, limit=None):
"""
Select data in a database table and put into prettytable.
Create a :class:`prettytable.PrettyTable` from :class:`sqlalchemy.Table`.
**中文文档**
将数据表中的数据放入prettytable中.
"""
sql = select([table])
if limit is not None:
sql = sql.limit(limit)
result_proxy = engine.execute(sql)
return from_db_cursor(result_proxy.cursor) | python | def from_table(table, engine, limit=None):
"""
Select data in a database table and put into prettytable.
Create a :class:`prettytable.PrettyTable` from :class:`sqlalchemy.Table`.
**中文文档**
将数据表中的数据放入prettytable中.
"""
sql = select([table])
if limit is not None:
sql = sql.limit(limit)
result_proxy = engine.execute(sql)
return from_db_cursor(result_proxy.cursor) | [
"def",
"from_table",
"(",
"table",
",",
"engine",
",",
"limit",
"=",
"None",
")",
":",
"sql",
"=",
"select",
"(",
"[",
"table",
"]",
")",
"if",
"limit",
"is",
"not",
"None",
":",
"sql",
"=",
"sql",
".",
"limit",
"(",
"limit",
")",
"result_proxy",
"=",
"engine",
".",
"execute",
"(",
"sql",
")",
"return",
"from_db_cursor",
"(",
"result_proxy",
".",
"cursor",
")"
] | Select data in a database table and put into prettytable.
Create a :class:`prettytable.PrettyTable` from :class:`sqlalchemy.Table`.
**中文文档**
将数据表中的数据放入prettytable中. | [
"Select",
"data",
"in",
"a",
"database",
"table",
"and",
"put",
"into",
"prettytable",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/pt.py#L68-L82 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/pt.py | from_data | def from_data(data):
"""
Construct a Prettytable from list of rows.
"""
if len(data) == 0: # pragma: no cover
return None
else:
ptable = PrettyTable()
ptable.field_names = data[0].keys()
for row in data:
ptable.add_row(row)
return ptable | python | def from_data(data):
"""
Construct a Prettytable from list of rows.
"""
if len(data) == 0: # pragma: no cover
return None
else:
ptable = PrettyTable()
ptable.field_names = data[0].keys()
for row in data:
ptable.add_row(row)
return ptable | [
"def",
"from_data",
"(",
"data",
")",
":",
"if",
"len",
"(",
"data",
")",
"==",
"0",
":",
"# pragma: no cover",
"return",
"None",
"else",
":",
"ptable",
"=",
"PrettyTable",
"(",
")",
"ptable",
".",
"field_names",
"=",
"data",
"[",
"0",
"]",
".",
"keys",
"(",
")",
"for",
"row",
"in",
"data",
":",
"ptable",
".",
"add_row",
"(",
"row",
")",
"return",
"ptable"
] | Construct a Prettytable from list of rows. | [
"Construct",
"a",
"Prettytable",
"from",
"list",
"of",
"rows",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/pt.py#L117-L128 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/pkg/prettytable/factory.py | TableHandler.generate_table | def generate_table(self, rows):
"""
Generates from a list of rows a PrettyTable object.
"""
table = PrettyTable(**self.kwargs)
for row in self.rows:
if len(row[0]) < self.max_row_width:
appends = self.max_row_width - len(row[0])
for i in range(1, appends):
row[0].append("-")
if row[1] is True:
self.make_fields_unique(row[0])
table.field_names = row[0]
else:
table.add_row(row[0])
return table | python | def generate_table(self, rows):
"""
Generates from a list of rows a PrettyTable object.
"""
table = PrettyTable(**self.kwargs)
for row in self.rows:
if len(row[0]) < self.max_row_width:
appends = self.max_row_width - len(row[0])
for i in range(1, appends):
row[0].append("-")
if row[1] is True:
self.make_fields_unique(row[0])
table.field_names = row[0]
else:
table.add_row(row[0])
return table | [
"def",
"generate_table",
"(",
"self",
",",
"rows",
")",
":",
"table",
"=",
"PrettyTable",
"(",
"*",
"*",
"self",
".",
"kwargs",
")",
"for",
"row",
"in",
"self",
".",
"rows",
":",
"if",
"len",
"(",
"row",
"[",
"0",
"]",
")",
"<",
"self",
".",
"max_row_width",
":",
"appends",
"=",
"self",
".",
"max_row_width",
"-",
"len",
"(",
"row",
"[",
"0",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"appends",
")",
":",
"row",
"[",
"0",
"]",
".",
"append",
"(",
"\"-\"",
")",
"if",
"row",
"[",
"1",
"]",
"is",
"True",
":",
"self",
".",
"make_fields_unique",
"(",
"row",
"[",
"0",
"]",
")",
"table",
".",
"field_names",
"=",
"row",
"[",
"0",
"]",
"else",
":",
"table",
".",
"add_row",
"(",
"row",
"[",
"0",
"]",
")",
"return",
"table"
] | Generates from a list of rows a PrettyTable object. | [
"Generates",
"from",
"a",
"list",
"of",
"rows",
"a",
"PrettyTable",
"object",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/pkg/prettytable/factory.py#L99-L115 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/io.py | sql_to_csv | def sql_to_csv(sql, engine, filepath, chunksize=1000, overwrite=False):
"""
Export sql result to csv file.
:param sql: :class:`sqlalchemy.sql.selectable.Select` instance.
:param engine: :class:`sqlalchemy.engine.base.Engine`.
:param filepath: file path.
:param chunksize: number of rows write to csv each time.
:param overwrite: bool, if True, avoid to overite existing file.
**中文文档**
将执行sql的结果中的所有数据, 以生成器的方式(一次只使用一小部分内存), 将
整个结果写入csv文件。
"""
if overwrite: # pragma: no cover
if os.path.exists(filepath):
raise Exception("'%s' already exists!" % filepath)
import pandas as pd
columns = [str(column.name) for column in sql.columns]
with open(filepath, "w") as f:
# write header
df = pd.DataFrame([], columns=columns)
df.to_csv(f, header=True, index=False)
# iterate big database table
result_proxy = engine.execute(sql)
while True:
data = result_proxy.fetchmany(chunksize)
if len(data) == 0:
break
else:
df = pd.DataFrame(data, columns=columns)
df.to_csv(f, header=False, index=False) | python | def sql_to_csv(sql, engine, filepath, chunksize=1000, overwrite=False):
"""
Export sql result to csv file.
:param sql: :class:`sqlalchemy.sql.selectable.Select` instance.
:param engine: :class:`sqlalchemy.engine.base.Engine`.
:param filepath: file path.
:param chunksize: number of rows write to csv each time.
:param overwrite: bool, if True, avoid to overite existing file.
**中文文档**
将执行sql的结果中的所有数据, 以生成器的方式(一次只使用一小部分内存), 将
整个结果写入csv文件。
"""
if overwrite: # pragma: no cover
if os.path.exists(filepath):
raise Exception("'%s' already exists!" % filepath)
import pandas as pd
columns = [str(column.name) for column in sql.columns]
with open(filepath, "w") as f:
# write header
df = pd.DataFrame([], columns=columns)
df.to_csv(f, header=True, index=False)
# iterate big database table
result_proxy = engine.execute(sql)
while True:
data = result_proxy.fetchmany(chunksize)
if len(data) == 0:
break
else:
df = pd.DataFrame(data, columns=columns)
df.to_csv(f, header=False, index=False) | [
"def",
"sql_to_csv",
"(",
"sql",
",",
"engine",
",",
"filepath",
",",
"chunksize",
"=",
"1000",
",",
"overwrite",
"=",
"False",
")",
":",
"if",
"overwrite",
":",
"# pragma: no cover",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filepath",
")",
":",
"raise",
"Exception",
"(",
"\"'%s' already exists!\"",
"%",
"filepath",
")",
"import",
"pandas",
"as",
"pd",
"columns",
"=",
"[",
"str",
"(",
"column",
".",
"name",
")",
"for",
"column",
"in",
"sql",
".",
"columns",
"]",
"with",
"open",
"(",
"filepath",
",",
"\"w\"",
")",
"as",
"f",
":",
"# write header",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"[",
"]",
",",
"columns",
"=",
"columns",
")",
"df",
".",
"to_csv",
"(",
"f",
",",
"header",
"=",
"True",
",",
"index",
"=",
"False",
")",
"# iterate big database table",
"result_proxy",
"=",
"engine",
".",
"execute",
"(",
"sql",
")",
"while",
"True",
":",
"data",
"=",
"result_proxy",
".",
"fetchmany",
"(",
"chunksize",
")",
"if",
"len",
"(",
"data",
")",
"==",
"0",
":",
"break",
"else",
":",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
",",
"columns",
"=",
"columns",
")",
"df",
".",
"to_csv",
"(",
"f",
",",
"header",
"=",
"False",
",",
"index",
"=",
"False",
")"
] | Export sql result to csv file.
:param sql: :class:`sqlalchemy.sql.selectable.Select` instance.
:param engine: :class:`sqlalchemy.engine.base.Engine`.
:param filepath: file path.
:param chunksize: number of rows write to csv each time.
:param overwrite: bool, if True, avoid to overite existing file.
**中文文档**
将执行sql的结果中的所有数据, 以生成器的方式(一次只使用一小部分内存), 将
整个结果写入csv文件。 | [
"Export",
"sql",
"result",
"to",
"csv",
"file",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/io.py#L12-L47 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/io.py | table_to_csv | def table_to_csv(table, engine, filepath, chunksize=1000, overwrite=False):
"""
Export entire table to a csv file.
:param table: :class:`sqlalchemy.Table` instance.
:param engine: :class:`sqlalchemy.engine.base.Engine`.
:param filepath: file path.
:param chunksize: number of rows write to csv each time.
:param overwrite: bool, if True, avoid to overite existing file.
**中文文档**
将整个表中的所有数据, 写入csv文件。
"""
sql = select([table])
sql_to_csv(sql, engine, filepath, chunksize) | python | def table_to_csv(table, engine, filepath, chunksize=1000, overwrite=False):
"""
Export entire table to a csv file.
:param table: :class:`sqlalchemy.Table` instance.
:param engine: :class:`sqlalchemy.engine.base.Engine`.
:param filepath: file path.
:param chunksize: number of rows write to csv each time.
:param overwrite: bool, if True, avoid to overite existing file.
**中文文档**
将整个表中的所有数据, 写入csv文件。
"""
sql = select([table])
sql_to_csv(sql, engine, filepath, chunksize) | [
"def",
"table_to_csv",
"(",
"table",
",",
"engine",
",",
"filepath",
",",
"chunksize",
"=",
"1000",
",",
"overwrite",
"=",
"False",
")",
":",
"sql",
"=",
"select",
"(",
"[",
"table",
"]",
")",
"sql_to_csv",
"(",
"sql",
",",
"engine",
",",
"filepath",
",",
"chunksize",
")"
] | Export entire table to a csv file.
:param table: :class:`sqlalchemy.Table` instance.
:param engine: :class:`sqlalchemy.engine.base.Engine`.
:param filepath: file path.
:param chunksize: number of rows write to csv each time.
:param overwrite: bool, if True, avoid to overite existing file.
**中文文档**
将整个表中的所有数据, 写入csv文件。 | [
"Export",
"entire",
"table",
"to",
"a",
"csv",
"file",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/io.py#L50-L65 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/crud/updating.py | update_all | def update_all(engine, table, data, upsert=False):
"""
Update data by its primary_key column.
"""
data = ensure_list(data)
ins = table.insert()
upd = table.update()
# Find all primary key columns
pk_cols = OrderedDict()
for column in table._columns:
if column.primary_key:
pk_cols[column.name] = column
data_to_insert = list()
# Multiple primary key column
if len(pk_cols) >= 2:
for row in data:
result = engine.execute(
upd.
where(
and_(
*[col == row[name] for name, col in pk_cols.items()]
)
).
values(**row)
)
if result.rowcount == 0:
data_to_insert.append(row)
# Single primary key column
elif len(pk_cols) == 1:
for row in data:
result = engine.execute(
upd.
where(
[col == row[name] for name, col in pk_cols.items()][0]
).
values(**row)
)
if result.rowcount == 0:
data_to_insert.append(row)
else: # pragma: no cover
data_to_insert = data
# Insert rest of data
if upsert:
if len(data_to_insert):
engine.execute(ins, data_to_insert) | python | def update_all(engine, table, data, upsert=False):
"""
Update data by its primary_key column.
"""
data = ensure_list(data)
ins = table.insert()
upd = table.update()
# Find all primary key columns
pk_cols = OrderedDict()
for column in table._columns:
if column.primary_key:
pk_cols[column.name] = column
data_to_insert = list()
# Multiple primary key column
if len(pk_cols) >= 2:
for row in data:
result = engine.execute(
upd.
where(
and_(
*[col == row[name] for name, col in pk_cols.items()]
)
).
values(**row)
)
if result.rowcount == 0:
data_to_insert.append(row)
# Single primary key column
elif len(pk_cols) == 1:
for row in data:
result = engine.execute(
upd.
where(
[col == row[name] for name, col in pk_cols.items()][0]
).
values(**row)
)
if result.rowcount == 0:
data_to_insert.append(row)
else: # pragma: no cover
data_to_insert = data
# Insert rest of data
if upsert:
if len(data_to_insert):
engine.execute(ins, data_to_insert) | [
"def",
"update_all",
"(",
"engine",
",",
"table",
",",
"data",
",",
"upsert",
"=",
"False",
")",
":",
"data",
"=",
"ensure_list",
"(",
"data",
")",
"ins",
"=",
"table",
".",
"insert",
"(",
")",
"upd",
"=",
"table",
".",
"update",
"(",
")",
"# Find all primary key columns",
"pk_cols",
"=",
"OrderedDict",
"(",
")",
"for",
"column",
"in",
"table",
".",
"_columns",
":",
"if",
"column",
".",
"primary_key",
":",
"pk_cols",
"[",
"column",
".",
"name",
"]",
"=",
"column",
"data_to_insert",
"=",
"list",
"(",
")",
"# Multiple primary key column",
"if",
"len",
"(",
"pk_cols",
")",
">=",
"2",
":",
"for",
"row",
"in",
"data",
":",
"result",
"=",
"engine",
".",
"execute",
"(",
"upd",
".",
"where",
"(",
"and_",
"(",
"*",
"[",
"col",
"==",
"row",
"[",
"name",
"]",
"for",
"name",
",",
"col",
"in",
"pk_cols",
".",
"items",
"(",
")",
"]",
")",
")",
".",
"values",
"(",
"*",
"*",
"row",
")",
")",
"if",
"result",
".",
"rowcount",
"==",
"0",
":",
"data_to_insert",
".",
"append",
"(",
"row",
")",
"# Single primary key column",
"elif",
"len",
"(",
"pk_cols",
")",
"==",
"1",
":",
"for",
"row",
"in",
"data",
":",
"result",
"=",
"engine",
".",
"execute",
"(",
"upd",
".",
"where",
"(",
"[",
"col",
"==",
"row",
"[",
"name",
"]",
"for",
"name",
",",
"col",
"in",
"pk_cols",
".",
"items",
"(",
")",
"]",
"[",
"0",
"]",
")",
".",
"values",
"(",
"*",
"*",
"row",
")",
")",
"if",
"result",
".",
"rowcount",
"==",
"0",
":",
"data_to_insert",
".",
"append",
"(",
"row",
")",
"else",
":",
"# pragma: no cover",
"data_to_insert",
"=",
"data",
"# Insert rest of data",
"if",
"upsert",
":",
"if",
"len",
"(",
"data_to_insert",
")",
":",
"engine",
".",
"execute",
"(",
"ins",
",",
"data_to_insert",
")"
] | Update data by its primary_key column. | [
"Update",
"data",
"by",
"its",
"primary_key",
"column",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/crud/updating.py#L13-L64 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/crud/updating.py | upsert_all | def upsert_all(engine, table, data):
"""
Update data by primary key columns. If not able to update, do insert.
Example::
# suppose in database we already have {"id": 1, "name": "Alice"}
>>> data = [
... {"id": 1, "name": "Bob"}, # this will be updated
... {"id": 2, "name": "Cathy"}, # this will be added
... ]
>>> upsert_all(engine, table_user, data)
>>> engine.execute(select([table_user])).fetchall()
[{"id": 1, "name": "Bob"}, {"id": 2, "name": "Cathy"}]
**中文文档**
批量更新文档. 如果该表格定义了Primary Key, 则用Primary Key约束where语句. 对于
where语句无法找到的行, 自动进行批量bulk insert.
"""
update_all(engine, table, data, upsert=True) | python | def upsert_all(engine, table, data):
"""
Update data by primary key columns. If not able to update, do insert.
Example::
# suppose in database we already have {"id": 1, "name": "Alice"}
>>> data = [
... {"id": 1, "name": "Bob"}, # this will be updated
... {"id": 2, "name": "Cathy"}, # this will be added
... ]
>>> upsert_all(engine, table_user, data)
>>> engine.execute(select([table_user])).fetchall()
[{"id": 1, "name": "Bob"}, {"id": 2, "name": "Cathy"}]
**中文文档**
批量更新文档. 如果该表格定义了Primary Key, 则用Primary Key约束where语句. 对于
where语句无法找到的行, 自动进行批量bulk insert.
"""
update_all(engine, table, data, upsert=True) | [
"def",
"upsert_all",
"(",
"engine",
",",
"table",
",",
"data",
")",
":",
"update_all",
"(",
"engine",
",",
"table",
",",
"data",
",",
"upsert",
"=",
"True",
")"
] | Update data by primary key columns. If not able to update, do insert.
Example::
# suppose in database we already have {"id": 1, "name": "Alice"}
>>> data = [
... {"id": 1, "name": "Bob"}, # this will be updated
... {"id": 2, "name": "Cathy"}, # this will be added
... ]
>>> upsert_all(engine, table_user, data)
>>> engine.execute(select([table_user])).fetchall()
[{"id": 1, "name": "Bob"}, {"id": 2, "name": "Cathy"}]
**中文文档**
批量更新文档. 如果该表格定义了Primary Key, 则用Primary Key约束where语句. 对于
where语句无法找到的行, 自动进行批量bulk insert. | [
"Update",
"data",
"by",
"primary",
"key",
"columns",
".",
"If",
"not",
"able",
"to",
"update",
"do",
"insert",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/crud/updating.py#L67-L87 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py | ExtendedBase.pk_names | def pk_names(cls):
"""
Primary key column name list.
"""
if cls._cache_pk_names is None:
cls._cache_pk_names = cls._get_primary_key_names()
return cls._cache_pk_names | python | def pk_names(cls):
"""
Primary key column name list.
"""
if cls._cache_pk_names is None:
cls._cache_pk_names = cls._get_primary_key_names()
return cls._cache_pk_names | [
"def",
"pk_names",
"(",
"cls",
")",
":",
"if",
"cls",
".",
"_cache_pk_names",
"is",
"None",
":",
"cls",
".",
"_cache_pk_names",
"=",
"cls",
".",
"_get_primary_key_names",
"(",
")",
"return",
"cls",
".",
"_cache_pk_names"
] | Primary key column name list. | [
"Primary",
"key",
"column",
"name",
"list",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py#L62-L68 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py | ExtendedBase.id_field_name | def id_field_name(cls):
"""
If only one primary_key, then return it. Otherwise, raise ValueError.
"""
if cls._cache_id_field_name is None:
pk_names = cls.pk_names()
if len(pk_names) == 1:
cls._cache_id_field_name = pk_names[0]
else: # pragma: no cover
raise ValueError(
"{classname} has more than 1 primary key!"
.format(classname=cls.__name__)
)
return cls._cache_id_field_name | python | def id_field_name(cls):
"""
If only one primary_key, then return it. Otherwise, raise ValueError.
"""
if cls._cache_id_field_name is None:
pk_names = cls.pk_names()
if len(pk_names) == 1:
cls._cache_id_field_name = pk_names[0]
else: # pragma: no cover
raise ValueError(
"{classname} has more than 1 primary key!"
.format(classname=cls.__name__)
)
return cls._cache_id_field_name | [
"def",
"id_field_name",
"(",
"cls",
")",
":",
"if",
"cls",
".",
"_cache_id_field_name",
"is",
"None",
":",
"pk_names",
"=",
"cls",
".",
"pk_names",
"(",
")",
"if",
"len",
"(",
"pk_names",
")",
"==",
"1",
":",
"cls",
".",
"_cache_id_field_name",
"=",
"pk_names",
"[",
"0",
"]",
"else",
":",
"# pragma: no cover",
"raise",
"ValueError",
"(",
"\"{classname} has more than 1 primary key!\"",
".",
"format",
"(",
"classname",
"=",
"cls",
".",
"__name__",
")",
")",
"return",
"cls",
".",
"_cache_id_field_name"
] | If only one primary_key, then return it. Otherwise, raise ValueError. | [
"If",
"only",
"one",
"primary_key",
"then",
"return",
"it",
".",
"Otherwise",
"raise",
"ValueError",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py#L77-L90 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py | ExtendedBase.values | def values(self):
"""
return list of value of all declared columns.
"""
return [getattr(self, c.name, None) for c in self.__table__._columns] | python | def values(self):
"""
return list of value of all declared columns.
"""
return [getattr(self, c.name, None) for c in self.__table__._columns] | [
"def",
"values",
"(",
"self",
")",
":",
"return",
"[",
"getattr",
"(",
"self",
",",
"c",
".",
"name",
",",
"None",
")",
"for",
"c",
"in",
"self",
".",
"__table__",
".",
"_columns",
"]"
] | return list of value of all declared columns. | [
"return",
"list",
"of",
"value",
"of",
"all",
"declared",
"columns",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py#L101-L105 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py | ExtendedBase.items | def items(self):
"""
return list of pair of name and value of all declared columns.
"""
return [
(c.name, getattr(self, c.name, None))
for c in self.__table__._columns
] | python | def items(self):
"""
return list of pair of name and value of all declared columns.
"""
return [
(c.name, getattr(self, c.name, None))
for c in self.__table__._columns
] | [
"def",
"items",
"(",
"self",
")",
":",
"return",
"[",
"(",
"c",
".",
"name",
",",
"getattr",
"(",
"self",
",",
"c",
".",
"name",
",",
"None",
")",
")",
"for",
"c",
"in",
"self",
".",
"__table__",
".",
"_columns",
"]"
] | return list of pair of name and value of all declared columns. | [
"return",
"list",
"of",
"pair",
"of",
"name",
"and",
"value",
"of",
"all",
"declared",
"columns",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py#L107-L114 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py | ExtendedBase.to_dict | def to_dict(self, include_null=True):
"""
Convert to dict.
"""
if include_null:
return dict(self.items())
else:
return {
attr: value
for attr, value in self.__dict__.items()
if not attr.startswith("_sa_")
} | python | def to_dict(self, include_null=True):
"""
Convert to dict.
"""
if include_null:
return dict(self.items())
else:
return {
attr: value
for attr, value in self.__dict__.items()
if not attr.startswith("_sa_")
} | [
"def",
"to_dict",
"(",
"self",
",",
"include_null",
"=",
"True",
")",
":",
"if",
"include_null",
":",
"return",
"dict",
"(",
"self",
".",
"items",
"(",
")",
")",
"else",
":",
"return",
"{",
"attr",
":",
"value",
"for",
"attr",
",",
"value",
"in",
"self",
".",
"__dict__",
".",
"items",
"(",
")",
"if",
"not",
"attr",
".",
"startswith",
"(",
"\"_sa_\"",
")",
"}"
] | Convert to dict. | [
"Convert",
"to",
"dict",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py#L125-L136 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py | ExtendedBase.to_OrderedDict | def to_OrderedDict(self, include_null=True):
"""
Convert to OrderedDict.
"""
if include_null:
return OrderedDict(self.items())
else:
items = list()
for c in self.__table__._columns:
try:
items.append((c.name, self.__dict__[c.name]))
except KeyError:
pass
return OrderedDict(items) | python | def to_OrderedDict(self, include_null=True):
"""
Convert to OrderedDict.
"""
if include_null:
return OrderedDict(self.items())
else:
items = list()
for c in self.__table__._columns:
try:
items.append((c.name, self.__dict__[c.name]))
except KeyError:
pass
return OrderedDict(items) | [
"def",
"to_OrderedDict",
"(",
"self",
",",
"include_null",
"=",
"True",
")",
":",
"if",
"include_null",
":",
"return",
"OrderedDict",
"(",
"self",
".",
"items",
"(",
")",
")",
"else",
":",
"items",
"=",
"list",
"(",
")",
"for",
"c",
"in",
"self",
".",
"__table__",
".",
"_columns",
":",
"try",
":",
"items",
".",
"append",
"(",
"(",
"c",
".",
"name",
",",
"self",
".",
"__dict__",
"[",
"c",
".",
"name",
"]",
")",
")",
"except",
"KeyError",
":",
"pass",
"return",
"OrderedDict",
"(",
"items",
")"
] | Convert to OrderedDict. | [
"Convert",
"to",
"OrderedDict",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py#L138-L151 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py | ExtendedBase.by_id | def by_id(cls, _id, engine_or_session):
"""
Get one object by primary_key value.
"""
ses, auto_close = ensure_session(engine_or_session)
obj = ses.query(cls).get(_id)
if auto_close:
ses.close()
return obj | python | def by_id(cls, _id, engine_or_session):
"""
Get one object by primary_key value.
"""
ses, auto_close = ensure_session(engine_or_session)
obj = ses.query(cls).get(_id)
if auto_close:
ses.close()
return obj | [
"def",
"by_id",
"(",
"cls",
",",
"_id",
",",
"engine_or_session",
")",
":",
"ses",
",",
"auto_close",
"=",
"ensure_session",
"(",
"engine_or_session",
")",
"obj",
"=",
"ses",
".",
"query",
"(",
"cls",
")",
".",
"get",
"(",
"_id",
")",
"if",
"auto_close",
":",
"ses",
".",
"close",
"(",
")",
"return",
"obj"
] | Get one object by primary_key value. | [
"Get",
"one",
"object",
"by",
"primary_key",
"value",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py#L185-L193 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py | ExtendedBase.by_sql | def by_sql(cls, sql, engine_or_session):
"""
Query with sql statement or texture sql.
"""
ses, auto_close = ensure_session(engine_or_session)
result = ses.query(cls).from_statement(sql).all()
if auto_close:
ses.close()
return result | python | def by_sql(cls, sql, engine_or_session):
"""
Query with sql statement or texture sql.
"""
ses, auto_close = ensure_session(engine_or_session)
result = ses.query(cls).from_statement(sql).all()
if auto_close:
ses.close()
return result | [
"def",
"by_sql",
"(",
"cls",
",",
"sql",
",",
"engine_or_session",
")",
":",
"ses",
",",
"auto_close",
"=",
"ensure_session",
"(",
"engine_or_session",
")",
"result",
"=",
"ses",
".",
"query",
"(",
"cls",
")",
".",
"from_statement",
"(",
"sql",
")",
".",
"all",
"(",
")",
"if",
"auto_close",
":",
"ses",
".",
"close",
"(",
")",
"return",
"result"
] | Query with sql statement or texture sql. | [
"Query",
"with",
"sql",
"statement",
"or",
"texture",
"sql",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/orm/extended_declarative_base.py#L196-L204 | train |
MacHu-GWU/uszipcode-project | fixcode.py | fixcode | def fixcode(**kwargs):
"""
auto pep8 format all python file in ``source code`` and ``tests`` dir.
"""
# repository direcotry
repo_dir = Path(__file__).parent.absolute()
# source code directory
source_dir = Path(repo_dir, package.__name__)
if source_dir.exists():
print("Source code locate at: '%s'." % source_dir)
print("Auto pep8 all python file ...")
source_dir.autopep8(**kwargs)
else:
print("Source code directory not found!")
# unittest code directory
unittest_dir = Path(repo_dir, "tests")
if unittest_dir.exists():
print("Unittest code locate at: '%s'." % unittest_dir)
print("Auto pep8 all python file ...")
unittest_dir.autopep8(**kwargs)
else:
print("Unittest code directory not found!")
print("Complete!") | python | def fixcode(**kwargs):
"""
auto pep8 format all python file in ``source code`` and ``tests`` dir.
"""
# repository direcotry
repo_dir = Path(__file__).parent.absolute()
# source code directory
source_dir = Path(repo_dir, package.__name__)
if source_dir.exists():
print("Source code locate at: '%s'." % source_dir)
print("Auto pep8 all python file ...")
source_dir.autopep8(**kwargs)
else:
print("Source code directory not found!")
# unittest code directory
unittest_dir = Path(repo_dir, "tests")
if unittest_dir.exists():
print("Unittest code locate at: '%s'." % unittest_dir)
print("Auto pep8 all python file ...")
unittest_dir.autopep8(**kwargs)
else:
print("Unittest code directory not found!")
print("Complete!") | [
"def",
"fixcode",
"(",
"*",
"*",
"kwargs",
")",
":",
"# repository direcotry",
"repo_dir",
"=",
"Path",
"(",
"__file__",
")",
".",
"parent",
".",
"absolute",
"(",
")",
"# source code directory",
"source_dir",
"=",
"Path",
"(",
"repo_dir",
",",
"package",
".",
"__name__",
")",
"if",
"source_dir",
".",
"exists",
"(",
")",
":",
"print",
"(",
"\"Source code locate at: '%s'.\"",
"%",
"source_dir",
")",
"print",
"(",
"\"Auto pep8 all python file ...\"",
")",
"source_dir",
".",
"autopep8",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"print",
"(",
"\"Source code directory not found!\"",
")",
"# unittest code directory",
"unittest_dir",
"=",
"Path",
"(",
"repo_dir",
",",
"\"tests\"",
")",
"if",
"unittest_dir",
".",
"exists",
"(",
")",
":",
"print",
"(",
"\"Unittest code locate at: '%s'.\"",
"%",
"unittest_dir",
")",
"print",
"(",
"\"Auto pep8 all python file ...\"",
")",
"unittest_dir",
".",
"autopep8",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"print",
"(",
"\"Unittest code directory not found!\"",
")",
"print",
"(",
"\"Complete!\"",
")"
] | auto pep8 format all python file in ``source code`` and ``tests`` dir. | [
"auto",
"pep8",
"format",
"all",
"python",
"file",
"in",
"source",
"code",
"and",
"tests",
"dir",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/fixcode.py#L13-L39 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/pkg/prettytable/prettytable.py | PrettyTable._get_rows | def _get_rows(self, options):
"""Return only those data rows that should be printed, based on slicing and sorting.
Arguments:
options - dictionary of option settings."""
if options["oldsortslice"]:
rows = copy.deepcopy(self._rows[options["start"]:options["end"]])
else:
rows = copy.deepcopy(self._rows)
# Sort
if options["sortby"]:
sortindex = self._field_names.index(options["sortby"])
# Decorate
rows = [[row[sortindex]] + row for row in rows]
# Sort
rows.sort(reverse=options["reversesort"], key=options["sort_key"])
# Undecorate
rows = [row[1:] for row in rows]
# Slice if necessary
if not options["oldsortslice"]:
rows = rows[options["start"]:options["end"]]
return rows | python | def _get_rows(self, options):
"""Return only those data rows that should be printed, based on slicing and sorting.
Arguments:
options - dictionary of option settings."""
if options["oldsortslice"]:
rows = copy.deepcopy(self._rows[options["start"]:options["end"]])
else:
rows = copy.deepcopy(self._rows)
# Sort
if options["sortby"]:
sortindex = self._field_names.index(options["sortby"])
# Decorate
rows = [[row[sortindex]] + row for row in rows]
# Sort
rows.sort(reverse=options["reversesort"], key=options["sort_key"])
# Undecorate
rows = [row[1:] for row in rows]
# Slice if necessary
if not options["oldsortslice"]:
rows = rows[options["start"]:options["end"]]
return rows | [
"def",
"_get_rows",
"(",
"self",
",",
"options",
")",
":",
"if",
"options",
"[",
"\"oldsortslice\"",
"]",
":",
"rows",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"_rows",
"[",
"options",
"[",
"\"start\"",
"]",
":",
"options",
"[",
"\"end\"",
"]",
"]",
")",
"else",
":",
"rows",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"_rows",
")",
"# Sort",
"if",
"options",
"[",
"\"sortby\"",
"]",
":",
"sortindex",
"=",
"self",
".",
"_field_names",
".",
"index",
"(",
"options",
"[",
"\"sortby\"",
"]",
")",
"# Decorate",
"rows",
"=",
"[",
"[",
"row",
"[",
"sortindex",
"]",
"]",
"+",
"row",
"for",
"row",
"in",
"rows",
"]",
"# Sort",
"rows",
".",
"sort",
"(",
"reverse",
"=",
"options",
"[",
"\"reversesort\"",
"]",
",",
"key",
"=",
"options",
"[",
"\"sort_key\"",
"]",
")",
"# Undecorate",
"rows",
"=",
"[",
"row",
"[",
"1",
":",
"]",
"for",
"row",
"in",
"rows",
"]",
"# Slice if necessary",
"if",
"not",
"options",
"[",
"\"oldsortslice\"",
"]",
":",
"rows",
"=",
"rows",
"[",
"options",
"[",
"\"start\"",
"]",
":",
"options",
"[",
"\"end\"",
"]",
"]",
"return",
"rows"
] | Return only those data rows that should be printed, based on slicing and sorting.
Arguments:
options - dictionary of option settings. | [
"Return",
"only",
"those",
"data",
"rows",
"that",
"should",
"be",
"printed",
"based",
"on",
"slicing",
"and",
"sorting",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/pkg/prettytable/prettytable.py#L1080-L1106 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/engine_creator.py | create_postgresql_pg8000 | def create_postgresql_pg8000(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a postgresql database using pg8000.
"""
return create_engine(
_create_postgresql_pg8000(username, password, host, port, database),
**kwargs
) | python | def create_postgresql_pg8000(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a postgresql database using pg8000.
"""
return create_engine(
_create_postgresql_pg8000(username, password, host, port, database),
**kwargs
) | [
"def",
"create_postgresql_pg8000",
"(",
"username",
",",
"password",
",",
"host",
",",
"port",
",",
"database",
",",
"*",
"*",
"kwargs",
")",
":",
"# pragma: no cover",
"return",
"create_engine",
"(",
"_create_postgresql_pg8000",
"(",
"username",
",",
"password",
",",
"host",
",",
"port",
",",
"database",
")",
",",
"*",
"*",
"kwargs",
")"
] | create an engine connected to a postgresql database using pg8000. | [
"create",
"an",
"engine",
"connected",
"to",
"a",
"postgresql",
"database",
"using",
"pg8000",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/engine_creator.py#L94-L101 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/engine_creator.py | create_postgresql_pygresql | def create_postgresql_pygresql(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a postgresql database using pygresql.
"""
return create_engine(
_create_postgresql_pygresql(username, password, host, port, database),
**kwargs
) | python | def create_postgresql_pygresql(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a postgresql database using pygresql.
"""
return create_engine(
_create_postgresql_pygresql(username, password, host, port, database),
**kwargs
) | [
"def",
"create_postgresql_pygresql",
"(",
"username",
",",
"password",
",",
"host",
",",
"port",
",",
"database",
",",
"*",
"*",
"kwargs",
")",
":",
"# pragma: no cover",
"return",
"create_engine",
"(",
"_create_postgresql_pygresql",
"(",
"username",
",",
"password",
",",
"host",
",",
"port",
",",
"database",
")",
",",
"*",
"*",
"kwargs",
")"
] | create an engine connected to a postgresql database using pygresql. | [
"create",
"an",
"engine",
"connected",
"to",
"a",
"postgresql",
"database",
"using",
"pygresql",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/engine_creator.py#L110-L117 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/engine_creator.py | create_postgresql_psycopg2cffi | def create_postgresql_psycopg2cffi(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a postgresql database using psycopg2cffi.
"""
return create_engine(
_create_postgresql_psycopg2cffi(
username, password, host, port, database),
**kwargs
) | python | def create_postgresql_psycopg2cffi(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a postgresql database using psycopg2cffi.
"""
return create_engine(
_create_postgresql_psycopg2cffi(
username, password, host, port, database),
**kwargs
) | [
"def",
"create_postgresql_psycopg2cffi",
"(",
"username",
",",
"password",
",",
"host",
",",
"port",
",",
"database",
",",
"*",
"*",
"kwargs",
")",
":",
"# pragma: no cover",
"return",
"create_engine",
"(",
"_create_postgresql_psycopg2cffi",
"(",
"username",
",",
"password",
",",
"host",
",",
"port",
",",
"database",
")",
",",
"*",
"*",
"kwargs",
")"
] | create an engine connected to a postgresql database using psycopg2cffi. | [
"create",
"an",
"engine",
"connected",
"to",
"a",
"postgresql",
"database",
"using",
"psycopg2cffi",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/engine_creator.py#L126-L134 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/engine_creator.py | create_postgresql_pypostgresql | def create_postgresql_pypostgresql(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a postgresql database using pypostgresql.
"""
return create_engine(
_create_postgresql_pypostgresql(
username, password, host, port, database),
**kwargs
) | python | def create_postgresql_pypostgresql(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a postgresql database using pypostgresql.
"""
return create_engine(
_create_postgresql_pypostgresql(
username, password, host, port, database),
**kwargs
) | [
"def",
"create_postgresql_pypostgresql",
"(",
"username",
",",
"password",
",",
"host",
",",
"port",
",",
"database",
",",
"*",
"*",
"kwargs",
")",
":",
"# pragma: no cover",
"return",
"create_engine",
"(",
"_create_postgresql_pypostgresql",
"(",
"username",
",",
"password",
",",
"host",
",",
"port",
",",
"database",
")",
",",
"*",
"*",
"kwargs",
")"
] | create an engine connected to a postgresql database using pypostgresql. | [
"create",
"an",
"engine",
"connected",
"to",
"a",
"postgresql",
"database",
"using",
"pypostgresql",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/engine_creator.py#L143-L151 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/engine_creator.py | create_mysql_mysqlconnector | def create_mysql_mysqlconnector(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a mysql database using mysqlconnector.
"""
return create_engine(
_create_mysql_mysqlconnector(username, password, host, port, database),
**kwargs
) | python | def create_mysql_mysqlconnector(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a mysql database using mysqlconnector.
"""
return create_engine(
_create_mysql_mysqlconnector(username, password, host, port, database),
**kwargs
) | [
"def",
"create_mysql_mysqlconnector",
"(",
"username",
",",
"password",
",",
"host",
",",
"port",
",",
"database",
",",
"*",
"*",
"kwargs",
")",
":",
"# pragma: no cover",
"return",
"create_engine",
"(",
"_create_mysql_mysqlconnector",
"(",
"username",
",",
"password",
",",
"host",
",",
"port",
",",
"database",
")",
",",
"*",
"*",
"kwargs",
")"
] | create an engine connected to a mysql database using mysqlconnector. | [
"create",
"an",
"engine",
"connected",
"to",
"a",
"mysql",
"database",
"using",
"mysqlconnector",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/engine_creator.py#L194-L201 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/engine_creator.py | create_mysql_oursql | def create_mysql_oursql(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a mysql database using oursql.
"""
return create_engine(
_create_mysql_oursql(username, password, host, port, database),
**kwargs
) | python | def create_mysql_oursql(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a mysql database using oursql.
"""
return create_engine(
_create_mysql_oursql(username, password, host, port, database),
**kwargs
) | [
"def",
"create_mysql_oursql",
"(",
"username",
",",
"password",
",",
"host",
",",
"port",
",",
"database",
",",
"*",
"*",
"kwargs",
")",
":",
"# pragma: no cover",
"return",
"create_engine",
"(",
"_create_mysql_oursql",
"(",
"username",
",",
"password",
",",
"host",
",",
"port",
",",
"database",
")",
",",
"*",
"*",
"kwargs",
")"
] | create an engine connected to a mysql database using oursql. | [
"create",
"an",
"engine",
"connected",
"to",
"a",
"mysql",
"database",
"using",
"oursql",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/engine_creator.py#L210-L217 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/engine_creator.py | create_mysql_pymysql | def create_mysql_pymysql(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a mysql database using pymysql.
"""
return create_engine(
_create_mysql_pymysql(username, password, host, port, database),
**kwargs
) | python | def create_mysql_pymysql(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a mysql database using pymysql.
"""
return create_engine(
_create_mysql_pymysql(username, password, host, port, database),
**kwargs
) | [
"def",
"create_mysql_pymysql",
"(",
"username",
",",
"password",
",",
"host",
",",
"port",
",",
"database",
",",
"*",
"*",
"kwargs",
")",
":",
"# pragma: no cover",
"return",
"create_engine",
"(",
"_create_mysql_pymysql",
"(",
"username",
",",
"password",
",",
"host",
",",
"port",
",",
"database",
")",
",",
"*",
"*",
"kwargs",
")"
] | create an engine connected to a mysql database using pymysql. | [
"create",
"an",
"engine",
"connected",
"to",
"a",
"mysql",
"database",
"using",
"pymysql",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/engine_creator.py#L226-L233 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/engine_creator.py | create_mysql_cymysql | def create_mysql_cymysql(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a mysql database using cymysql.
"""
return create_engine(
_create_mysql_cymysql(username, password, host, port, database),
**kwargs
) | python | def create_mysql_cymysql(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a mysql database using cymysql.
"""
return create_engine(
_create_mysql_cymysql(username, password, host, port, database),
**kwargs
) | [
"def",
"create_mysql_cymysql",
"(",
"username",
",",
"password",
",",
"host",
",",
"port",
",",
"database",
",",
"*",
"*",
"kwargs",
")",
":",
"# pragma: no cover",
"return",
"create_engine",
"(",
"_create_mysql_cymysql",
"(",
"username",
",",
"password",
",",
"host",
",",
"port",
",",
"database",
")",
",",
"*",
"*",
"kwargs",
")"
] | create an engine connected to a mysql database using cymysql. | [
"create",
"an",
"engine",
"connected",
"to",
"a",
"mysql",
"database",
"using",
"cymysql",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/engine_creator.py#L242-L249 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/engine_creator.py | create_mssql_pyodbc | def create_mssql_pyodbc(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a mssql database using pyodbc.
"""
return create_engine(
_create_mssql_pyodbc(username, password, host, port, database),
**kwargs
) | python | def create_mssql_pyodbc(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a mssql database using pyodbc.
"""
return create_engine(
_create_mssql_pyodbc(username, password, host, port, database),
**kwargs
) | [
"def",
"create_mssql_pyodbc",
"(",
"username",
",",
"password",
",",
"host",
",",
"port",
",",
"database",
",",
"*",
"*",
"kwargs",
")",
":",
"# pragma: no cover",
"return",
"create_engine",
"(",
"_create_mssql_pyodbc",
"(",
"username",
",",
"password",
",",
"host",
",",
"port",
",",
"database",
")",
",",
"*",
"*",
"kwargs",
")"
] | create an engine connected to a mssql database using pyodbc. | [
"create",
"an",
"engine",
"connected",
"to",
"a",
"mssql",
"database",
"using",
"pyodbc",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/engine_creator.py#L294-L301 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/engine_creator.py | create_mssql_pymssql | def create_mssql_pymssql(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a mssql database using pymssql.
"""
return create_engine(
_create_mssql_pymssql(username, password, host, port, database),
**kwargs
) | python | def create_mssql_pymssql(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a mssql database using pymssql.
"""
return create_engine(
_create_mssql_pymssql(username, password, host, port, database),
**kwargs
) | [
"def",
"create_mssql_pymssql",
"(",
"username",
",",
"password",
",",
"host",
",",
"port",
",",
"database",
",",
"*",
"*",
"kwargs",
")",
":",
"# pragma: no cover",
"return",
"create_engine",
"(",
"_create_mssql_pymssql",
"(",
"username",
",",
"password",
",",
"host",
",",
"port",
",",
"database",
")",
",",
"*",
"*",
"kwargs",
")"
] | create an engine connected to a mssql database using pymssql. | [
"create",
"an",
"engine",
"connected",
"to",
"a",
"mssql",
"database",
"using",
"pymssql",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/engine_creator.py#L310-L317 | train |
MacHu-GWU/uszipcode-project | dataset/step2_merge_zipcode_data.py | titleize | def titleize(text):
"""Capitalizes all the words and replaces some characters in the string
to create a nicer looking title.
"""
if len(text) == 0: # if empty string, return it
return text
else:
text = text.lower() # lower all char
# delete redundant empty space
chunks = [chunk[0].upper() + chunk[1:] for chunk in text.split(" ") if len(chunk) >= 1]
return " ".join(chunks) | python | def titleize(text):
"""Capitalizes all the words and replaces some characters in the string
to create a nicer looking title.
"""
if len(text) == 0: # if empty string, return it
return text
else:
text = text.lower() # lower all char
# delete redundant empty space
chunks = [chunk[0].upper() + chunk[1:] for chunk in text.split(" ") if len(chunk) >= 1]
return " ".join(chunks) | [
"def",
"titleize",
"(",
"text",
")",
":",
"if",
"len",
"(",
"text",
")",
"==",
"0",
":",
"# if empty string, return it",
"return",
"text",
"else",
":",
"text",
"=",
"text",
".",
"lower",
"(",
")",
"# lower all char",
"# delete redundant empty space ",
"chunks",
"=",
"[",
"chunk",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"+",
"chunk",
"[",
"1",
":",
"]",
"for",
"chunk",
"in",
"text",
".",
"split",
"(",
"\" \"",
")",
"if",
"len",
"(",
"chunk",
")",
">=",
"1",
"]",
"return",
"\" \"",
".",
"join",
"(",
"chunks",
")"
] | Capitalizes all the words and replaces some characters in the string
to create a nicer looking title. | [
"Capitalizes",
"all",
"the",
"words",
"and",
"replaces",
"some",
"characters",
"in",
"the",
"string",
"to",
"create",
"a",
"nicer",
"looking",
"title",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/dataset/step2_merge_zipcode_data.py#L14-L24 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/utils.py | grouper_list | def grouper_list(l, n):
"""Evenly divide list into fixed-length piece, no filled value if chunk
size smaller than fixed-length.
Example::
>>> list(grouper(range(10), n=3)
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
**中文文档**
将一个列表按照尺寸n, 依次打包输出, 有多少输出多少, 并不强制填充包的大小到n。
下列实现是按照性能从高到低进行排列的:
- 方法1: 建立一个counter, 在向chunk中添加元素时, 同时将counter与n比较, 如果一致
则yield。然后在最后将剩余的item视情况yield。
- 方法2: 建立一个list, 每次添加一个元素, 并检查size。
- 方法3: 调用grouper()函数, 然后对里面的None元素进行清理。
"""
chunk = list()
counter = 0
for item in l:
counter += 1
chunk.append(item)
if counter == n:
yield chunk
chunk = list()
counter = 0
if len(chunk) > 0:
yield chunk | python | def grouper_list(l, n):
"""Evenly divide list into fixed-length piece, no filled value if chunk
size smaller than fixed-length.
Example::
>>> list(grouper(range(10), n=3)
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
**中文文档**
将一个列表按照尺寸n, 依次打包输出, 有多少输出多少, 并不强制填充包的大小到n。
下列实现是按照性能从高到低进行排列的:
- 方法1: 建立一个counter, 在向chunk中添加元素时, 同时将counter与n比较, 如果一致
则yield。然后在最后将剩余的item视情况yield。
- 方法2: 建立一个list, 每次添加一个元素, 并检查size。
- 方法3: 调用grouper()函数, 然后对里面的None元素进行清理。
"""
chunk = list()
counter = 0
for item in l:
counter += 1
chunk.append(item)
if counter == n:
yield chunk
chunk = list()
counter = 0
if len(chunk) > 0:
yield chunk | [
"def",
"grouper_list",
"(",
"l",
",",
"n",
")",
":",
"chunk",
"=",
"list",
"(",
")",
"counter",
"=",
"0",
"for",
"item",
"in",
"l",
":",
"counter",
"+=",
"1",
"chunk",
".",
"append",
"(",
"item",
")",
"if",
"counter",
"==",
"n",
":",
"yield",
"chunk",
"chunk",
"=",
"list",
"(",
")",
"counter",
"=",
"0",
"if",
"len",
"(",
"chunk",
")",
">",
"0",
":",
"yield",
"chunk"
] | Evenly divide list into fixed-length piece, no filled value if chunk
size smaller than fixed-length.
Example::
>>> list(grouper(range(10), n=3)
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
**中文文档**
将一个列表按照尺寸n, 依次打包输出, 有多少输出多少, 并不强制填充包的大小到n。
下列实现是按照性能从高到低进行排列的:
- 方法1: 建立一个counter, 在向chunk中添加元素时, 同时将counter与n比较, 如果一致
则yield。然后在最后将剩余的item视情况yield。
- 方法2: 建立一个list, 每次添加一个元素, 并检查size。
- 方法3: 调用grouper()函数, 然后对里面的None元素进行清理。 | [
"Evenly",
"divide",
"list",
"into",
"fixed",
"-",
"length",
"piece",
"no",
"filled",
"value",
"if",
"chunk",
"size",
"smaller",
"than",
"fixed",
"-",
"length",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/utils.py#L16-L46 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/utils.py | convert_query_to_sql_statement | def convert_query_to_sql_statement(query):
"""
Convert a Query object created from orm query, into executable sql statement.
:param query: :class:`sqlalchemy.orm.Query`
:return: :class:`sqlalchemy.sql.selectable.Select`
"""
context = query._compile_context()
context.statement.use_labels = False
return context.statement | python | def convert_query_to_sql_statement(query):
"""
Convert a Query object created from orm query, into executable sql statement.
:param query: :class:`sqlalchemy.orm.Query`
:return: :class:`sqlalchemy.sql.selectable.Select`
"""
context = query._compile_context()
context.statement.use_labels = False
return context.statement | [
"def",
"convert_query_to_sql_statement",
"(",
"query",
")",
":",
"context",
"=",
"query",
".",
"_compile_context",
"(",
")",
"context",
".",
"statement",
".",
"use_labels",
"=",
"False",
"return",
"context",
".",
"statement"
] | Convert a Query object created from orm query, into executable sql statement.
:param query: :class:`sqlalchemy.orm.Query`
:return: :class:`sqlalchemy.sql.selectable.Select` | [
"Convert",
"a",
"Query",
"object",
"created",
"from",
"orm",
"query",
"into",
"executable",
"sql",
"statement",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/utils.py#L49-L59 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/utils.py | execute_query_return_result_proxy | def execute_query_return_result_proxy(query):
"""
Execute a query, yield result proxy.
:param query: :class:`sqlalchemy.orm.Query`,
has to be created from ``session.query(Object)``
:return: :class:`sqlalchemy.engine.result.ResultProxy`
"""
context = query._compile_context()
context.statement.use_labels = False
if query._autoflush and not query._populate_existing:
query.session._autoflush()
conn = query._get_bind_args(
context,
query._connection_from_session,
close_with_result=True)
return conn.execute(context.statement, query._params) | python | def execute_query_return_result_proxy(query):
"""
Execute a query, yield result proxy.
:param query: :class:`sqlalchemy.orm.Query`,
has to be created from ``session.query(Object)``
:return: :class:`sqlalchemy.engine.result.ResultProxy`
"""
context = query._compile_context()
context.statement.use_labels = False
if query._autoflush and not query._populate_existing:
query.session._autoflush()
conn = query._get_bind_args(
context,
query._connection_from_session,
close_with_result=True)
return conn.execute(context.statement, query._params) | [
"def",
"execute_query_return_result_proxy",
"(",
"query",
")",
":",
"context",
"=",
"query",
".",
"_compile_context",
"(",
")",
"context",
".",
"statement",
".",
"use_labels",
"=",
"False",
"if",
"query",
".",
"_autoflush",
"and",
"not",
"query",
".",
"_populate_existing",
":",
"query",
".",
"session",
".",
"_autoflush",
"(",
")",
"conn",
"=",
"query",
".",
"_get_bind_args",
"(",
"context",
",",
"query",
".",
"_connection_from_session",
",",
"close_with_result",
"=",
"True",
")",
"return",
"conn",
".",
"execute",
"(",
"context",
".",
"statement",
",",
"query",
".",
"_params",
")"
] | Execute a query, yield result proxy.
:param query: :class:`sqlalchemy.orm.Query`,
has to be created from ``session.query(Object)``
:return: :class:`sqlalchemy.engine.result.ResultProxy` | [
"Execute",
"a",
"query",
"yield",
"result",
"proxy",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/utils.py#L62-L81 | train |
MacHu-GWU/uszipcode-project | uszipcode/search.py | SearchEngine.find_state | def find_state(self, state, best_match=True, min_similarity=70):
"""
Fuzzy search correct state.
:param best_match: bool, when True, only the best matched state
will be return. otherwise, will return all matching states.
"""
result_state_short_list = list()
# check if it is a abbreviate name
if state.upper() in STATE_ABBR_SHORT_TO_LONG:
result_state_short_list.append(state.upper())
# if not, find out what is the state that user looking for
else:
if best_match:
state_long, confidence = extractOne(state, self.state_list)
if confidence >= min_similarity:
result_state_short_list.append(
STATE_ABBR_LONG_TO_SHORT[state_long])
else:
for state_long, confidence in extract(state, self.state_list):
if confidence >= min_similarity:
result_state_short_list.append(
STATE_ABBR_LONG_TO_SHORT[state_long])
if len(result_state_short_list) == 0:
message = ("'%s' is not a valid state name, use 2 letter "
"short name or correct full name please.")
raise ValueError(message % state)
return result_state_short_list | python | def find_state(self, state, best_match=True, min_similarity=70):
"""
Fuzzy search correct state.
:param best_match: bool, when True, only the best matched state
will be return. otherwise, will return all matching states.
"""
result_state_short_list = list()
# check if it is a abbreviate name
if state.upper() in STATE_ABBR_SHORT_TO_LONG:
result_state_short_list.append(state.upper())
# if not, find out what is the state that user looking for
else:
if best_match:
state_long, confidence = extractOne(state, self.state_list)
if confidence >= min_similarity:
result_state_short_list.append(
STATE_ABBR_LONG_TO_SHORT[state_long])
else:
for state_long, confidence in extract(state, self.state_list):
if confidence >= min_similarity:
result_state_short_list.append(
STATE_ABBR_LONG_TO_SHORT[state_long])
if len(result_state_short_list) == 0:
message = ("'%s' is not a valid state name, use 2 letter "
"short name or correct full name please.")
raise ValueError(message % state)
return result_state_short_list | [
"def",
"find_state",
"(",
"self",
",",
"state",
",",
"best_match",
"=",
"True",
",",
"min_similarity",
"=",
"70",
")",
":",
"result_state_short_list",
"=",
"list",
"(",
")",
"# check if it is a abbreviate name",
"if",
"state",
".",
"upper",
"(",
")",
"in",
"STATE_ABBR_SHORT_TO_LONG",
":",
"result_state_short_list",
".",
"append",
"(",
"state",
".",
"upper",
"(",
")",
")",
"# if not, find out what is the state that user looking for",
"else",
":",
"if",
"best_match",
":",
"state_long",
",",
"confidence",
"=",
"extractOne",
"(",
"state",
",",
"self",
".",
"state_list",
")",
"if",
"confidence",
">=",
"min_similarity",
":",
"result_state_short_list",
".",
"append",
"(",
"STATE_ABBR_LONG_TO_SHORT",
"[",
"state_long",
"]",
")",
"else",
":",
"for",
"state_long",
",",
"confidence",
"in",
"extract",
"(",
"state",
",",
"self",
".",
"state_list",
")",
":",
"if",
"confidence",
">=",
"min_similarity",
":",
"result_state_short_list",
".",
"append",
"(",
"STATE_ABBR_LONG_TO_SHORT",
"[",
"state_long",
"]",
")",
"if",
"len",
"(",
"result_state_short_list",
")",
"==",
"0",
":",
"message",
"=",
"(",
"\"'%s' is not a valid state name, use 2 letter \"",
"\"short name or correct full name please.\"",
")",
"raise",
"ValueError",
"(",
"message",
"%",
"state",
")",
"return",
"result_state_short_list"
] | Fuzzy search correct state.
:param best_match: bool, when True, only the best matched state
will be return. otherwise, will return all matching states. | [
"Fuzzy",
"search",
"correct",
"state",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/search.py#L178-L209 | train |
MacHu-GWU/uszipcode-project | uszipcode/search.py | SearchEngine.find_city | def find_city(self, city, state=None, best_match=True, min_similarity=70):
"""
Fuzzy search correct city.
:param city: city name.
:param state: search city in specified state.
:param best_match: bool, when True, only the best matched city
will return. otherwise, will return all matching cities.
**中文文档**
如果给定了state, 则只在指定的state里的城市中寻找, 否则, 在全国所有的城市中寻找。
"""
# find out what is the city that user looking for
if state:
state_sort = self.find_state(state, best_match=True)[0]
city_pool = self.state_to_city_mapper[state_sort.upper()]
else:
city_pool = self.city_list
result_city_list = list()
if best_match:
city, confidence = extractOne(city, city_pool)
if confidence >= min_similarity:
result_city_list.append(city)
else:
for city, confidence in extract(city, city_pool):
if confidence >= min_similarity:
result_city_list.append(city)
if len(result_city_list) == 0:
raise ValueError("'%s' is not a valid city name" % city)
return result_city_list | python | def find_city(self, city, state=None, best_match=True, min_similarity=70):
"""
Fuzzy search correct city.
:param city: city name.
:param state: search city in specified state.
:param best_match: bool, when True, only the best matched city
will return. otherwise, will return all matching cities.
**中文文档**
如果给定了state, 则只在指定的state里的城市中寻找, 否则, 在全国所有的城市中寻找。
"""
# find out what is the city that user looking for
if state:
state_sort = self.find_state(state, best_match=True)[0]
city_pool = self.state_to_city_mapper[state_sort.upper()]
else:
city_pool = self.city_list
result_city_list = list()
if best_match:
city, confidence = extractOne(city, city_pool)
if confidence >= min_similarity:
result_city_list.append(city)
else:
for city, confidence in extract(city, city_pool):
if confidence >= min_similarity:
result_city_list.append(city)
if len(result_city_list) == 0:
raise ValueError("'%s' is not a valid city name" % city)
return result_city_list | [
"def",
"find_city",
"(",
"self",
",",
"city",
",",
"state",
"=",
"None",
",",
"best_match",
"=",
"True",
",",
"min_similarity",
"=",
"70",
")",
":",
"# find out what is the city that user looking for",
"if",
"state",
":",
"state_sort",
"=",
"self",
".",
"find_state",
"(",
"state",
",",
"best_match",
"=",
"True",
")",
"[",
"0",
"]",
"city_pool",
"=",
"self",
".",
"state_to_city_mapper",
"[",
"state_sort",
".",
"upper",
"(",
")",
"]",
"else",
":",
"city_pool",
"=",
"self",
".",
"city_list",
"result_city_list",
"=",
"list",
"(",
")",
"if",
"best_match",
":",
"city",
",",
"confidence",
"=",
"extractOne",
"(",
"city",
",",
"city_pool",
")",
"if",
"confidence",
">=",
"min_similarity",
":",
"result_city_list",
".",
"append",
"(",
"city",
")",
"else",
":",
"for",
"city",
",",
"confidence",
"in",
"extract",
"(",
"city",
",",
"city_pool",
")",
":",
"if",
"confidence",
">=",
"min_similarity",
":",
"result_city_list",
".",
"append",
"(",
"city",
")",
"if",
"len",
"(",
"result_city_list",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"'%s' is not a valid city name\"",
"%",
"city",
")",
"return",
"result_city_list"
] | Fuzzy search correct city.
:param city: city name.
:param state: search city in specified state.
:param best_match: bool, when True, only the best matched city
will return. otherwise, will return all matching cities.
**中文文档**
如果给定了state, 则只在指定的state里的城市中寻找, 否则, 在全国所有的城市中寻找。 | [
"Fuzzy",
"search",
"correct",
"city",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/search.py#L211-L245 | train |
MacHu-GWU/uszipcode-project | uszipcode/search.py | SearchEngine._resolve_sort_by | def _resolve_sort_by(sort_by, flag_radius_query):
"""
Result ``sort_by`` argument.
:param sort_by: str, or sqlalchemy ORM attribute.
:param flag_radius_query:
:return:
"""
if sort_by is None:
if flag_radius_query:
sort_by = SORT_BY_DIST
elif isinstance(sort_by, string_types):
if sort_by.lower() == SORT_BY_DIST:
if flag_radius_query is False:
msg = "`sort_by` arg can be 'dist' only under distance based query!"
raise ValueError(msg)
sort_by = SORT_BY_DIST
elif sort_by not in SimpleZipcode.__table__.columns:
msg = "`sort_by` arg has to be one of the Zipcode attribute or 'dist'!"
raise ValueError(msg)
else:
sort_by = sort_by.name
return sort_by | python | def _resolve_sort_by(sort_by, flag_radius_query):
"""
Result ``sort_by`` argument.
:param sort_by: str, or sqlalchemy ORM attribute.
:param flag_radius_query:
:return:
"""
if sort_by is None:
if flag_radius_query:
sort_by = SORT_BY_DIST
elif isinstance(sort_by, string_types):
if sort_by.lower() == SORT_BY_DIST:
if flag_radius_query is False:
msg = "`sort_by` arg can be 'dist' only under distance based query!"
raise ValueError(msg)
sort_by = SORT_BY_DIST
elif sort_by not in SimpleZipcode.__table__.columns:
msg = "`sort_by` arg has to be one of the Zipcode attribute or 'dist'!"
raise ValueError(msg)
else:
sort_by = sort_by.name
return sort_by | [
"def",
"_resolve_sort_by",
"(",
"sort_by",
",",
"flag_radius_query",
")",
":",
"if",
"sort_by",
"is",
"None",
":",
"if",
"flag_radius_query",
":",
"sort_by",
"=",
"SORT_BY_DIST",
"elif",
"isinstance",
"(",
"sort_by",
",",
"string_types",
")",
":",
"if",
"sort_by",
".",
"lower",
"(",
")",
"==",
"SORT_BY_DIST",
":",
"if",
"flag_radius_query",
"is",
"False",
":",
"msg",
"=",
"\"`sort_by` arg can be 'dist' only under distance based query!\"",
"raise",
"ValueError",
"(",
"msg",
")",
"sort_by",
"=",
"SORT_BY_DIST",
"elif",
"sort_by",
"not",
"in",
"SimpleZipcode",
".",
"__table__",
".",
"columns",
":",
"msg",
"=",
"\"`sort_by` arg has to be one of the Zipcode attribute or 'dist'!\"",
"raise",
"ValueError",
"(",
"msg",
")",
"else",
":",
"sort_by",
"=",
"sort_by",
".",
"name",
"return",
"sort_by"
] | Result ``sort_by`` argument.
:param sort_by: str, or sqlalchemy ORM attribute.
:param flag_radius_query:
:return: | [
"Result",
"sort_by",
"argument",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/search.py#L248-L271 | train |
MacHu-GWU/uszipcode-project | uszipcode/search.py | SearchEngine.by_zipcode | def by_zipcode(self,
zipcode,
zipcode_type=None,
zero_padding=True):
"""
Search zipcode by exact 5 digits zipcode. No zero padding is needed.
:param zipcode: int or str, the zipcode will be automatically
zero padding to 5 digits.
:param zipcode_type: str or :class`~uszipcode.model.ZipcodeType` attribute.
by default, it returns any zipcode type.
:param zero_padding: bool, toggle on and off automatic zero padding.
"""
if zero_padding:
zipcode = str(zipcode).zfill(5)
else: # pragma: no cover
zipcode = str(zipcode)
res = self.query(
zipcode=zipcode,
sort_by=None,
returns=1,
zipcode_type=zipcode_type,
)
if len(res):
return res[0]
else:
return self.zip_klass() | python | def by_zipcode(self,
zipcode,
zipcode_type=None,
zero_padding=True):
"""
Search zipcode by exact 5 digits zipcode. No zero padding is needed.
:param zipcode: int or str, the zipcode will be automatically
zero padding to 5 digits.
:param zipcode_type: str or :class`~uszipcode.model.ZipcodeType` attribute.
by default, it returns any zipcode type.
:param zero_padding: bool, toggle on and off automatic zero padding.
"""
if zero_padding:
zipcode = str(zipcode).zfill(5)
else: # pragma: no cover
zipcode = str(zipcode)
res = self.query(
zipcode=zipcode,
sort_by=None,
returns=1,
zipcode_type=zipcode_type,
)
if len(res):
return res[0]
else:
return self.zip_klass() | [
"def",
"by_zipcode",
"(",
"self",
",",
"zipcode",
",",
"zipcode_type",
"=",
"None",
",",
"zero_padding",
"=",
"True",
")",
":",
"if",
"zero_padding",
":",
"zipcode",
"=",
"str",
"(",
"zipcode",
")",
".",
"zfill",
"(",
"5",
")",
"else",
":",
"# pragma: no cover",
"zipcode",
"=",
"str",
"(",
"zipcode",
")",
"res",
"=",
"self",
".",
"query",
"(",
"zipcode",
"=",
"zipcode",
",",
"sort_by",
"=",
"None",
",",
"returns",
"=",
"1",
",",
"zipcode_type",
"=",
"zipcode_type",
",",
")",
"if",
"len",
"(",
"res",
")",
":",
"return",
"res",
"[",
"0",
"]",
"else",
":",
"return",
"self",
".",
"zip_klass",
"(",
")"
] | Search zipcode by exact 5 digits zipcode. No zero padding is needed.
:param zipcode: int or str, the zipcode will be automatically
zero padding to 5 digits.
:param zipcode_type: str or :class`~uszipcode.model.ZipcodeType` attribute.
by default, it returns any zipcode type.
:param zero_padding: bool, toggle on and off automatic zero padding. | [
"Search",
"zipcode",
"by",
"exact",
"5",
"digits",
"zipcode",
".",
"No",
"zero",
"padding",
"is",
"needed",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/search.py#L536-L563 | train |
MacHu-GWU/uszipcode-project | uszipcode/search.py | SearchEngine.by_prefix | def by_prefix(self,
prefix,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.zipcode.name,
ascending=True,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by first N digits.
Returns multiple results.
"""
return self.query(
prefix=prefix,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
) | python | def by_prefix(self,
prefix,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.zipcode.name,
ascending=True,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by first N digits.
Returns multiple results.
"""
return self.query(
prefix=prefix,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
) | [
"def",
"by_prefix",
"(",
"self",
",",
"prefix",
",",
"zipcode_type",
"=",
"ZipcodeType",
".",
"Standard",
",",
"sort_by",
"=",
"SimpleZipcode",
".",
"zipcode",
".",
"name",
",",
"ascending",
"=",
"True",
",",
"returns",
"=",
"DEFAULT_LIMIT",
")",
":",
"return",
"self",
".",
"query",
"(",
"prefix",
"=",
"prefix",
",",
"sort_by",
"=",
"sort_by",
",",
"zipcode_type",
"=",
"zipcode_type",
",",
"ascending",
"=",
"ascending",
",",
"returns",
"=",
"returns",
",",
")"
] | Search zipcode information by first N digits.
Returns multiple results. | [
"Search",
"zipcode",
"information",
"by",
"first",
"N",
"digits",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/search.py#L565-L580 | train |
MacHu-GWU/uszipcode-project | uszipcode/search.py | SearchEngine.by_pattern | def by_pattern(self,
pattern,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.zipcode.name,
ascending=True,
returns=DEFAULT_LIMIT):
"""
Search zipcode by wildcard.
Returns multiple results.
"""
return self.query(
pattern=pattern,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
) | python | def by_pattern(self,
pattern,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.zipcode.name,
ascending=True,
returns=DEFAULT_LIMIT):
"""
Search zipcode by wildcard.
Returns multiple results.
"""
return self.query(
pattern=pattern,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
) | [
"def",
"by_pattern",
"(",
"self",
",",
"pattern",
",",
"zipcode_type",
"=",
"ZipcodeType",
".",
"Standard",
",",
"sort_by",
"=",
"SimpleZipcode",
".",
"zipcode",
".",
"name",
",",
"ascending",
"=",
"True",
",",
"returns",
"=",
"DEFAULT_LIMIT",
")",
":",
"return",
"self",
".",
"query",
"(",
"pattern",
"=",
"pattern",
",",
"sort_by",
"=",
"sort_by",
",",
"zipcode_type",
"=",
"zipcode_type",
",",
"ascending",
"=",
"ascending",
",",
"returns",
"=",
"returns",
",",
")"
] | Search zipcode by wildcard.
Returns multiple results. | [
"Search",
"zipcode",
"by",
"wildcard",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/search.py#L582-L597 | train |
MacHu-GWU/uszipcode-project | uszipcode/search.py | SearchEngine.by_state | def by_state(self,
state,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.zipcode.name,
ascending=True,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by fuzzy State name.
My engine use fuzzy match and guess what is the state you want.
"""
return self.query(
state=state,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
) | python | def by_state(self,
state,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.zipcode.name,
ascending=True,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by fuzzy State name.
My engine use fuzzy match and guess what is the state you want.
"""
return self.query(
state=state,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
) | [
"def",
"by_state",
"(",
"self",
",",
"state",
",",
"zipcode_type",
"=",
"ZipcodeType",
".",
"Standard",
",",
"sort_by",
"=",
"SimpleZipcode",
".",
"zipcode",
".",
"name",
",",
"ascending",
"=",
"True",
",",
"returns",
"=",
"DEFAULT_LIMIT",
")",
":",
"return",
"self",
".",
"query",
"(",
"state",
"=",
"state",
",",
"sort_by",
"=",
"sort_by",
",",
"zipcode_type",
"=",
"zipcode_type",
",",
"ascending",
"=",
"ascending",
",",
"returns",
"=",
"returns",
",",
")"
] | Search zipcode information by fuzzy State name.
My engine use fuzzy match and guess what is the state you want. | [
"Search",
"zipcode",
"information",
"by",
"fuzzy",
"State",
"name",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/search.py#L616-L631 | train |
MacHu-GWU/uszipcode-project | uszipcode/search.py | SearchEngine.by_coordinates | def by_coordinates(self,
lat,
lng,
radius=25.0,
zipcode_type=ZipcodeType.Standard,
sort_by=SORT_BY_DIST,
ascending=True,
returns=DEFAULT_LIMIT):
"""
Search zipcode information near a coordinates on a map.
Returns multiple results.
:param lat: center latitude.
:param lng: center longitude.
:param radius: only returns zipcode within X miles from ``lat``, ``lng``.
**中文文档**
1. 计算出在中心坐标处, 每一经度和纬度分别代表多少miles.
2. 以给定坐标为中心, 画出一个矩形, 长宽分别为半径的2倍多一点, 找到该
矩形内所有的Zipcode.
3. 对这些Zipcode计算出他们的距离, 然后按照距离远近排序。距离超过我们
限定的半径的直接丢弃.
"""
return self.query(
lat=lat, lng=lng, radius=radius,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
) | python | def by_coordinates(self,
lat,
lng,
radius=25.0,
zipcode_type=ZipcodeType.Standard,
sort_by=SORT_BY_DIST,
ascending=True,
returns=DEFAULT_LIMIT):
"""
Search zipcode information near a coordinates on a map.
Returns multiple results.
:param lat: center latitude.
:param lng: center longitude.
:param radius: only returns zipcode within X miles from ``lat``, ``lng``.
**中文文档**
1. 计算出在中心坐标处, 每一经度和纬度分别代表多少miles.
2. 以给定坐标为中心, 画出一个矩形, 长宽分别为半径的2倍多一点, 找到该
矩形内所有的Zipcode.
3. 对这些Zipcode计算出他们的距离, 然后按照距离远近排序。距离超过我们
限定的半径的直接丢弃.
"""
return self.query(
lat=lat, lng=lng, radius=radius,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
) | [
"def",
"by_coordinates",
"(",
"self",
",",
"lat",
",",
"lng",
",",
"radius",
"=",
"25.0",
",",
"zipcode_type",
"=",
"ZipcodeType",
".",
"Standard",
",",
"sort_by",
"=",
"SORT_BY_DIST",
",",
"ascending",
"=",
"True",
",",
"returns",
"=",
"DEFAULT_LIMIT",
")",
":",
"return",
"self",
".",
"query",
"(",
"lat",
"=",
"lat",
",",
"lng",
"=",
"lng",
",",
"radius",
"=",
"radius",
",",
"sort_by",
"=",
"sort_by",
",",
"zipcode_type",
"=",
"zipcode_type",
",",
"ascending",
"=",
"ascending",
",",
"returns",
"=",
"returns",
",",
")"
] | Search zipcode information near a coordinates on a map.
Returns multiple results.
:param lat: center latitude.
:param lng: center longitude.
:param radius: only returns zipcode within X miles from ``lat``, ``lng``.
**中文文档**
1. 计算出在中心坐标处, 每一经度和纬度分别代表多少miles.
2. 以给定坐标为中心, 画出一个矩形, 长宽分别为半径的2倍多一点, 找到该
矩形内所有的Zipcode.
3. 对这些Zipcode计算出他们的距离, 然后按照距离远近排序。距离超过我们
限定的半径的直接丢弃. | [
"Search",
"zipcode",
"information",
"near",
"a",
"coordinates",
"on",
"a",
"map",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/search.py#L652-L681 | train |
MacHu-GWU/uszipcode-project | uszipcode/search.py | SearchEngine.by_population | def by_population(self,
lower=-1,
upper=2 ** 31,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.population.name,
ascending=False,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by population range.
"""
return self.query(
population_lower=lower,
population_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
) | python | def by_population(self,
lower=-1,
upper=2 ** 31,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.population.name,
ascending=False,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by population range.
"""
return self.query(
population_lower=lower,
population_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
) | [
"def",
"by_population",
"(",
"self",
",",
"lower",
"=",
"-",
"1",
",",
"upper",
"=",
"2",
"**",
"31",
",",
"zipcode_type",
"=",
"ZipcodeType",
".",
"Standard",
",",
"sort_by",
"=",
"SimpleZipcode",
".",
"population",
".",
"name",
",",
"ascending",
"=",
"False",
",",
"returns",
"=",
"DEFAULT_LIMIT",
")",
":",
"return",
"self",
".",
"query",
"(",
"population_lower",
"=",
"lower",
",",
"population_upper",
"=",
"upper",
",",
"sort_by",
"=",
"sort_by",
",",
"zipcode_type",
"=",
"zipcode_type",
",",
"ascending",
"=",
"ascending",
",",
"returns",
"=",
"returns",
",",
")"
] | Search zipcode information by population range. | [
"Search",
"zipcode",
"information",
"by",
"population",
"range",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/search.py#L683-L698 | train |
MacHu-GWU/uszipcode-project | uszipcode/search.py | SearchEngine.by_population_density | def by_population_density(self,
lower=-1,
upper=2 ** 31,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.population_density.name,
ascending=False,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by population density range.
`population density` is `population per square miles on land`
"""
return self.query(
population_density_lower=lower,
population_density_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
) | python | def by_population_density(self,
lower=-1,
upper=2 ** 31,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.population_density.name,
ascending=False,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by population density range.
`population density` is `population per square miles on land`
"""
return self.query(
population_density_lower=lower,
population_density_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
) | [
"def",
"by_population_density",
"(",
"self",
",",
"lower",
"=",
"-",
"1",
",",
"upper",
"=",
"2",
"**",
"31",
",",
"zipcode_type",
"=",
"ZipcodeType",
".",
"Standard",
",",
"sort_by",
"=",
"SimpleZipcode",
".",
"population_density",
".",
"name",
",",
"ascending",
"=",
"False",
",",
"returns",
"=",
"DEFAULT_LIMIT",
")",
":",
"return",
"self",
".",
"query",
"(",
"population_density_lower",
"=",
"lower",
",",
"population_density_upper",
"=",
"upper",
",",
"sort_by",
"=",
"sort_by",
",",
"zipcode_type",
"=",
"zipcode_type",
",",
"ascending",
"=",
"ascending",
",",
"returns",
"=",
"returns",
",",
")"
] | Search zipcode information by population density range.
`population density` is `population per square miles on land` | [
"Search",
"zipcode",
"information",
"by",
"population",
"density",
"range",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/search.py#L700-L717 | train |
MacHu-GWU/uszipcode-project | uszipcode/search.py | SearchEngine.by_housing_units | def by_housing_units(self,
lower=-1,
upper=2 ** 31,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.housing_units.name,
ascending=False,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by house of units.
"""
return self.query(
housing_units_lower=lower,
housing_units_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
) | python | def by_housing_units(self,
lower=-1,
upper=2 ** 31,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.housing_units.name,
ascending=False,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by house of units.
"""
return self.query(
housing_units_lower=lower,
housing_units_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
) | [
"def",
"by_housing_units",
"(",
"self",
",",
"lower",
"=",
"-",
"1",
",",
"upper",
"=",
"2",
"**",
"31",
",",
"zipcode_type",
"=",
"ZipcodeType",
".",
"Standard",
",",
"sort_by",
"=",
"SimpleZipcode",
".",
"housing_units",
".",
"name",
",",
"ascending",
"=",
"False",
",",
"returns",
"=",
"DEFAULT_LIMIT",
")",
":",
"return",
"self",
".",
"query",
"(",
"housing_units_lower",
"=",
"lower",
",",
"housing_units_upper",
"=",
"upper",
",",
"sort_by",
"=",
"sort_by",
",",
"zipcode_type",
"=",
"zipcode_type",
",",
"ascending",
"=",
"ascending",
",",
"returns",
"=",
"returns",
",",
")"
] | Search zipcode information by house of units. | [
"Search",
"zipcode",
"information",
"by",
"house",
"of",
"units",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/search.py#L753-L768 | train |
MacHu-GWU/uszipcode-project | uszipcode/search.py | SearchEngine.by_occupied_housing_units | def by_occupied_housing_units(self,
lower=-1,
upper=2 ** 31,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.occupied_housing_units.name,
ascending=False,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by occupied house of units.
"""
return self.query(
occupied_housing_units_lower=lower,
occupied_housing_units_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
) | python | def by_occupied_housing_units(self,
lower=-1,
upper=2 ** 31,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.occupied_housing_units.name,
ascending=False,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by occupied house of units.
"""
return self.query(
occupied_housing_units_lower=lower,
occupied_housing_units_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
) | [
"def",
"by_occupied_housing_units",
"(",
"self",
",",
"lower",
"=",
"-",
"1",
",",
"upper",
"=",
"2",
"**",
"31",
",",
"zipcode_type",
"=",
"ZipcodeType",
".",
"Standard",
",",
"sort_by",
"=",
"SimpleZipcode",
".",
"occupied_housing_units",
".",
"name",
",",
"ascending",
"=",
"False",
",",
"returns",
"=",
"DEFAULT_LIMIT",
")",
":",
"return",
"self",
".",
"query",
"(",
"occupied_housing_units_lower",
"=",
"lower",
",",
"occupied_housing_units_upper",
"=",
"upper",
",",
"sort_by",
"=",
"sort_by",
",",
"zipcode_type",
"=",
"zipcode_type",
",",
"ascending",
"=",
"ascending",
",",
"returns",
"=",
"returns",
",",
")"
] | Search zipcode information by occupied house of units. | [
"Search",
"zipcode",
"information",
"by",
"occupied",
"house",
"of",
"units",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/search.py#L770-L785 | train |
MacHu-GWU/uszipcode-project | uszipcode/search.py | SearchEngine.by_median_home_value | def by_median_home_value(self,
lower=-1,
upper=2 ** 31,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.median_home_value.name,
ascending=False,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by median home value.
"""
return self.query(
median_home_value_lower=lower,
median_home_value_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
) | python | def by_median_home_value(self,
lower=-1,
upper=2 ** 31,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.median_home_value.name,
ascending=False,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by median home value.
"""
return self.query(
median_home_value_lower=lower,
median_home_value_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
) | [
"def",
"by_median_home_value",
"(",
"self",
",",
"lower",
"=",
"-",
"1",
",",
"upper",
"=",
"2",
"**",
"31",
",",
"zipcode_type",
"=",
"ZipcodeType",
".",
"Standard",
",",
"sort_by",
"=",
"SimpleZipcode",
".",
"median_home_value",
".",
"name",
",",
"ascending",
"=",
"False",
",",
"returns",
"=",
"DEFAULT_LIMIT",
")",
":",
"return",
"self",
".",
"query",
"(",
"median_home_value_lower",
"=",
"lower",
",",
"median_home_value_upper",
"=",
"upper",
",",
"sort_by",
"=",
"sort_by",
",",
"zipcode_type",
"=",
"zipcode_type",
",",
"ascending",
"=",
"ascending",
",",
"returns",
"=",
"returns",
",",
")"
] | Search zipcode information by median home value. | [
"Search",
"zipcode",
"information",
"by",
"median",
"home",
"value",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/search.py#L787-L802 | train |
MacHu-GWU/uszipcode-project | uszipcode/search.py | SearchEngine.by_median_household_income | def by_median_household_income(self,
lower=-1,
upper=2 ** 31,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.median_household_income.name,
ascending=False,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by median household income.
"""
return self.query(
median_household_income_lower=lower,
median_household_income_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
) | python | def by_median_household_income(self,
lower=-1,
upper=2 ** 31,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.median_household_income.name,
ascending=False,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by median household income.
"""
return self.query(
median_household_income_lower=lower,
median_household_income_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
) | [
"def",
"by_median_household_income",
"(",
"self",
",",
"lower",
"=",
"-",
"1",
",",
"upper",
"=",
"2",
"**",
"31",
",",
"zipcode_type",
"=",
"ZipcodeType",
".",
"Standard",
",",
"sort_by",
"=",
"SimpleZipcode",
".",
"median_household_income",
".",
"name",
",",
"ascending",
"=",
"False",
",",
"returns",
"=",
"DEFAULT_LIMIT",
")",
":",
"return",
"self",
".",
"query",
"(",
"median_household_income_lower",
"=",
"lower",
",",
"median_household_income_upper",
"=",
"upper",
",",
"sort_by",
"=",
"sort_by",
",",
"zipcode_type",
"=",
"zipcode_type",
",",
"ascending",
"=",
"ascending",
",",
"returns",
"=",
"returns",
",",
")"
] | Search zipcode information by median household income. | [
"Search",
"zipcode",
"information",
"by",
"median",
"household",
"income",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/search.py#L804-L819 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/crud/selecting.py | select_single_column | def select_single_column(engine, column):
"""
Select data from single column.
Example::
>>> select_single_column(engine, table_user.c.id)
[1, 2, 3]
>>> select_single_column(engine, table_user.c.name)
["Alice", "Bob", "Cathy"]
"""
s = select([column])
return column.name, [row[0] for row in engine.execute(s)] | python | def select_single_column(engine, column):
"""
Select data from single column.
Example::
>>> select_single_column(engine, table_user.c.id)
[1, 2, 3]
>>> select_single_column(engine, table_user.c.name)
["Alice", "Bob", "Cathy"]
"""
s = select([column])
return column.name, [row[0] for row in engine.execute(s)] | [
"def",
"select_single_column",
"(",
"engine",
",",
"column",
")",
":",
"s",
"=",
"select",
"(",
"[",
"column",
"]",
")",
"return",
"column",
".",
"name",
",",
"[",
"row",
"[",
"0",
"]",
"for",
"row",
"in",
"engine",
".",
"execute",
"(",
"s",
")",
"]"
] | Select data from single column.
Example::
>>> select_single_column(engine, table_user.c.id)
[1, 2, 3]
>>> select_single_column(engine, table_user.c.name)
["Alice", "Bob", "Cathy"] | [
"Select",
"data",
"from",
"single",
"column",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/crud/selecting.py#L49-L62 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/crud/selecting.py | select_many_column | def select_many_column(engine, *columns):
"""
Select data from multiple columns.
Example::
>>> select_many_column(engine, table_user.c.id, table_user.c.name)
:param columns: list of sqlalchemy.Column instance
:returns headers: headers
:returns data: list of row
**中文文档**
返回多列中的数据。
"""
if isinstance(columns[0], Column):
pass
elif isinstance(columns[0], (list, tuple)):
columns = columns[0]
s = select(columns)
headers = [str(column) for column in columns]
data = [tuple(row) for row in engine.execute(s)]
return headers, data | python | def select_many_column(engine, *columns):
"""
Select data from multiple columns.
Example::
>>> select_many_column(engine, table_user.c.id, table_user.c.name)
:param columns: list of sqlalchemy.Column instance
:returns headers: headers
:returns data: list of row
**中文文档**
返回多列中的数据。
"""
if isinstance(columns[0], Column):
pass
elif isinstance(columns[0], (list, tuple)):
columns = columns[0]
s = select(columns)
headers = [str(column) for column in columns]
data = [tuple(row) for row in engine.execute(s)]
return headers, data | [
"def",
"select_many_column",
"(",
"engine",
",",
"*",
"columns",
")",
":",
"if",
"isinstance",
"(",
"columns",
"[",
"0",
"]",
",",
"Column",
")",
":",
"pass",
"elif",
"isinstance",
"(",
"columns",
"[",
"0",
"]",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"columns",
"=",
"columns",
"[",
"0",
"]",
"s",
"=",
"select",
"(",
"columns",
")",
"headers",
"=",
"[",
"str",
"(",
"column",
")",
"for",
"column",
"in",
"columns",
"]",
"data",
"=",
"[",
"tuple",
"(",
"row",
")",
"for",
"row",
"in",
"engine",
".",
"execute",
"(",
"s",
")",
"]",
"return",
"headers",
",",
"data"
] | Select data from multiple columns.
Example::
>>> select_many_column(engine, table_user.c.id, table_user.c.name)
:param columns: list of sqlalchemy.Column instance
:returns headers: headers
:returns data: list of row
**中文文档**
返回多列中的数据。 | [
"Select",
"data",
"from",
"multiple",
"columns",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/crud/selecting.py#L65-L90 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/crud/selecting.py | select_random | def select_random(engine, table_or_columns, limit=5):
"""
Randomly select some rows from table.
"""
s = select(table_or_columns).order_by(func.random()).limit(limit)
return engine.execute(s).fetchall() | python | def select_random(engine, table_or_columns, limit=5):
"""
Randomly select some rows from table.
"""
s = select(table_or_columns).order_by(func.random()).limit(limit)
return engine.execute(s).fetchall() | [
"def",
"select_random",
"(",
"engine",
",",
"table_or_columns",
",",
"limit",
"=",
"5",
")",
":",
"s",
"=",
"select",
"(",
"table_or_columns",
")",
".",
"order_by",
"(",
"func",
".",
"random",
"(",
")",
")",
".",
"limit",
"(",
"limit",
")",
"return",
"engine",
".",
"execute",
"(",
"s",
")",
".",
"fetchall",
"(",
")"
] | Randomly select some rows from table. | [
"Randomly",
"select",
"some",
"rows",
"from",
"table",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/crud/selecting.py#L114-L119 | train |
MacHu-GWU/uszipcode-project | uszipcode/pkg/sqlalchemy_mate/crud/inserting.py | smart_insert | def smart_insert(engine, table, data, minimal_size=5):
"""
An optimized Insert strategy. Guarantee successful and highest insertion
speed. But ATOMIC WRITE IS NOT ENSURED IF THE PROGRAM IS INTERRUPTED.
**中文文档**
在Insert中, 如果已经预知不会出现IntegrityError, 那么使用Bulk Insert的速度要
远远快于逐条Insert。而如果无法预知, 那么我们采用如下策略:
1. 尝试Bulk Insert, Bulk Insert由于在结束前不Commit, 所以速度很快。
2. 如果失败了, 那么对数据的条数开平方根, 进行分包, 然后对每个包重复该逻辑。
3. 若还是尝试失败, 则继续分包, 当分包的大小小于一定数量时, 则使用逐条插入。
直到成功为止。
该Insert策略在内存上需要额外的 sqrt(nbytes) 的开销, 跟原数据相比体积很小。
但时间上是各种情况下平均最优的。
"""
insert = table.insert()
if isinstance(data, list):
# 首先进行尝试bulk insert
try:
engine.execute(insert, data)
# 失败了
except IntegrityError:
# 分析数据量
n = len(data)
# 如果数据条数多于一定数量
if n >= minimal_size ** 2:
# 则进行分包
n_chunk = math.floor(math.sqrt(n))
for chunk in grouper_list(data, n_chunk):
smart_insert(engine, table, chunk, minimal_size)
# 否则则一条条地逐条插入
else:
for row in data:
try:
engine.execute(insert, row)
except IntegrityError:
pass
else:
try:
engine.execute(insert, data)
except IntegrityError:
pass | python | def smart_insert(engine, table, data, minimal_size=5):
"""
An optimized Insert strategy. Guarantee successful and highest insertion
speed. But ATOMIC WRITE IS NOT ENSURED IF THE PROGRAM IS INTERRUPTED.
**中文文档**
在Insert中, 如果已经预知不会出现IntegrityError, 那么使用Bulk Insert的速度要
远远快于逐条Insert。而如果无法预知, 那么我们采用如下策略:
1. 尝试Bulk Insert, Bulk Insert由于在结束前不Commit, 所以速度很快。
2. 如果失败了, 那么对数据的条数开平方根, 进行分包, 然后对每个包重复该逻辑。
3. 若还是尝试失败, 则继续分包, 当分包的大小小于一定数量时, 则使用逐条插入。
直到成功为止。
该Insert策略在内存上需要额外的 sqrt(nbytes) 的开销, 跟原数据相比体积很小。
但时间上是各种情况下平均最优的。
"""
insert = table.insert()
if isinstance(data, list):
# 首先进行尝试bulk insert
try:
engine.execute(insert, data)
# 失败了
except IntegrityError:
# 分析数据量
n = len(data)
# 如果数据条数多于一定数量
if n >= minimal_size ** 2:
# 则进行分包
n_chunk = math.floor(math.sqrt(n))
for chunk in grouper_list(data, n_chunk):
smart_insert(engine, table, chunk, minimal_size)
# 否则则一条条地逐条插入
else:
for row in data:
try:
engine.execute(insert, row)
except IntegrityError:
pass
else:
try:
engine.execute(insert, data)
except IntegrityError:
pass | [
"def",
"smart_insert",
"(",
"engine",
",",
"table",
",",
"data",
",",
"minimal_size",
"=",
"5",
")",
":",
"insert",
"=",
"table",
".",
"insert",
"(",
")",
"if",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"# 首先进行尝试bulk insert",
"try",
":",
"engine",
".",
"execute",
"(",
"insert",
",",
"data",
")",
"# 失败了",
"except",
"IntegrityError",
":",
"# 分析数据量",
"n",
"=",
"len",
"(",
"data",
")",
"# 如果数据条数多于一定数量",
"if",
"n",
">=",
"minimal_size",
"**",
"2",
":",
"# 则进行分包",
"n_chunk",
"=",
"math",
".",
"floor",
"(",
"math",
".",
"sqrt",
"(",
"n",
")",
")",
"for",
"chunk",
"in",
"grouper_list",
"(",
"data",
",",
"n_chunk",
")",
":",
"smart_insert",
"(",
"engine",
",",
"table",
",",
"chunk",
",",
"minimal_size",
")",
"# 否则则一条条地逐条插入",
"else",
":",
"for",
"row",
"in",
"data",
":",
"try",
":",
"engine",
".",
"execute",
"(",
"insert",
",",
"row",
")",
"except",
"IntegrityError",
":",
"pass",
"else",
":",
"try",
":",
"engine",
".",
"execute",
"(",
"insert",
",",
"data",
")",
"except",
"IntegrityError",
":",
"pass"
] | An optimized Insert strategy. Guarantee successful and highest insertion
speed. But ATOMIC WRITE IS NOT ENSURED IF THE PROGRAM IS INTERRUPTED.
**中文文档**
在Insert中, 如果已经预知不会出现IntegrityError, 那么使用Bulk Insert的速度要
远远快于逐条Insert。而如果无法预知, 那么我们采用如下策略:
1. 尝试Bulk Insert, Bulk Insert由于在结束前不Commit, 所以速度很快。
2. 如果失败了, 那么对数据的条数开平方根, 进行分包, 然后对每个包重复该逻辑。
3. 若还是尝试失败, 则继续分包, 当分包的大小小于一定数量时, 则使用逐条插入。
直到成功为止。
该Insert策略在内存上需要额外的 sqrt(nbytes) 的开销, 跟原数据相比体积很小。
但时间上是各种情况下平均最优的。 | [
"An",
"optimized",
"Insert",
"strategy",
".",
"Guarantee",
"successful",
"and",
"highest",
"insertion",
"speed",
".",
"But",
"ATOMIC",
"WRITE",
"IS",
"NOT",
"ENSURED",
"IF",
"THE",
"PROGRAM",
"IS",
"INTERRUPTED",
"."
] | 96282b779a3efb422802de83c48ca284598ba952 | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/pkg/sqlalchemy_mate/crud/inserting.py#L16-L61 | train |
Hironsan/HateSonar | hatesonar/crawler/twitter.py | load_keys | def load_keys():
"""Loads Twitter keys.
Returns:
tuple: consumer_key, consumer_secret, access_token, access_token_secret
"""
consumer_key = os.environ.get('CONSUMER_KEY')
consumer_secret = os.environ.get('CONSUMER_SECRET')
access_token = os.environ.get('ACCESS_TOKEN')
access_token_secret = os.environ.get('ACCESS_TOKEN_SECRET')
return consumer_key, consumer_secret, access_token, access_token_secret | python | def load_keys():
"""Loads Twitter keys.
Returns:
tuple: consumer_key, consumer_secret, access_token, access_token_secret
"""
consumer_key = os.environ.get('CONSUMER_KEY')
consumer_secret = os.environ.get('CONSUMER_SECRET')
access_token = os.environ.get('ACCESS_TOKEN')
access_token_secret = os.environ.get('ACCESS_TOKEN_SECRET')
return consumer_key, consumer_secret, access_token, access_token_secret | [
"def",
"load_keys",
"(",
")",
":",
"consumer_key",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'CONSUMER_KEY'",
")",
"consumer_secret",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'CONSUMER_SECRET'",
")",
"access_token",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'ACCESS_TOKEN'",
")",
"access_token_secret",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'ACCESS_TOKEN_SECRET'",
")",
"return",
"consumer_key",
",",
"consumer_secret",
",",
"access_token",
",",
"access_token_secret"
] | Loads Twitter keys.
Returns:
tuple: consumer_key, consumer_secret, access_token, access_token_secret | [
"Loads",
"Twitter",
"keys",
"."
] | 39ede274119bb128ac32ba3e6d7d58f6104d2354 | https://github.com/Hironsan/HateSonar/blob/39ede274119bb128ac32ba3e6d7d58f6104d2354/hatesonar/crawler/twitter.py#L9-L20 | train |
Hironsan/HateSonar | hatesonar/crawler/twitter.py | TwitterAPI.search | def search(self, q):
"""Search tweets by keyword.
Args:
q: keyword
Returns:
list: tweet list
"""
results = self._api.search(q=q)
return results | python | def search(self, q):
"""Search tweets by keyword.
Args:
q: keyword
Returns:
list: tweet list
"""
results = self._api.search(q=q)
return results | [
"def",
"search",
"(",
"self",
",",
"q",
")",
":",
"results",
"=",
"self",
".",
"_api",
".",
"search",
"(",
"q",
"=",
"q",
")",
"return",
"results"
] | Search tweets by keyword.
Args:
q: keyword
Returns:
list: tweet list | [
"Search",
"tweets",
"by",
"keyword",
"."
] | 39ede274119bb128ac32ba3e6d7d58f6104d2354 | https://github.com/Hironsan/HateSonar/blob/39ede274119bb128ac32ba3e6d7d58f6104d2354/hatesonar/crawler/twitter.py#L30-L41 | train |
Hironsan/HateSonar | hatesonar/crawler/twitter.py | TwitterAPI.search_by_user | def search_by_user(self, screen_name, count=100):
"""Search tweets by user.
Args:
screen_name: screen name
count: the number of tweets
Returns:
list: tweet list
"""
results = self._api.user_timeline(screen_name=screen_name, count=count)
return results | python | def search_by_user(self, screen_name, count=100):
"""Search tweets by user.
Args:
screen_name: screen name
count: the number of tweets
Returns:
list: tweet list
"""
results = self._api.user_timeline(screen_name=screen_name, count=count)
return results | [
"def",
"search_by_user",
"(",
"self",
",",
"screen_name",
",",
"count",
"=",
"100",
")",
":",
"results",
"=",
"self",
".",
"_api",
".",
"user_timeline",
"(",
"screen_name",
"=",
"screen_name",
",",
"count",
"=",
"count",
")",
"return",
"results"
] | Search tweets by user.
Args:
screen_name: screen name
count: the number of tweets
Returns:
list: tweet list | [
"Search",
"tweets",
"by",
"user",
"."
] | 39ede274119bb128ac32ba3e6d7d58f6104d2354 | https://github.com/Hironsan/HateSonar/blob/39ede274119bb128ac32ba3e6d7d58f6104d2354/hatesonar/crawler/twitter.py#L43-L55 | train |
YosaiProject/yosai | yosai/core/mgt/mgt.py | AbstractRememberMeManager.on_successful_login | def on_successful_login(self, subject, authc_token, account_id):
"""
Reacts to the successful login attempt by first always
forgetting any previously stored identity. Then if the authc_token
is a ``RememberMe`` type of token, the associated identity
will be remembered for later retrieval during a new user session.
:param subject: the subject whose identifying attributes are being
remembered
:param authc_token: the token that resulted in a successful
authentication attempt
:param account_id: id of authenticated account
"""
# always clear any previous identity:
self.forget_identity(subject)
# now save the new identity:
if authc_token.is_remember_me:
self.remember_identity(subject, authc_token, account_id)
else:
msg = ("AuthenticationToken did not indicate that RememberMe is "
"requested. RememberMe functionality will not be executed "
"for corresponding account.")
logger.debug(msg) | python | def on_successful_login(self, subject, authc_token, account_id):
"""
Reacts to the successful login attempt by first always
forgetting any previously stored identity. Then if the authc_token
is a ``RememberMe`` type of token, the associated identity
will be remembered for later retrieval during a new user session.
:param subject: the subject whose identifying attributes are being
remembered
:param authc_token: the token that resulted in a successful
authentication attempt
:param account_id: id of authenticated account
"""
# always clear any previous identity:
self.forget_identity(subject)
# now save the new identity:
if authc_token.is_remember_me:
self.remember_identity(subject, authc_token, account_id)
else:
msg = ("AuthenticationToken did not indicate that RememberMe is "
"requested. RememberMe functionality will not be executed "
"for corresponding account.")
logger.debug(msg) | [
"def",
"on_successful_login",
"(",
"self",
",",
"subject",
",",
"authc_token",
",",
"account_id",
")",
":",
"# always clear any previous identity:",
"self",
".",
"forget_identity",
"(",
"subject",
")",
"# now save the new identity:",
"if",
"authc_token",
".",
"is_remember_me",
":",
"self",
".",
"remember_identity",
"(",
"subject",
",",
"authc_token",
",",
"account_id",
")",
"else",
":",
"msg",
"=",
"(",
"\"AuthenticationToken did not indicate that RememberMe is \"",
"\"requested. RememberMe functionality will not be executed \"",
"\"for corresponding account.\"",
")",
"logger",
".",
"debug",
"(",
"msg",
")"
] | Reacts to the successful login attempt by first always
forgetting any previously stored identity. Then if the authc_token
is a ``RememberMe`` type of token, the associated identity
will be remembered for later retrieval during a new user session.
:param subject: the subject whose identifying attributes are being
remembered
:param authc_token: the token that resulted in a successful
authentication attempt
:param account_id: id of authenticated account | [
"Reacts",
"to",
"the",
"successful",
"login",
"attempt",
"by",
"first",
"always",
"forgetting",
"any",
"previously",
"stored",
"identity",
".",
"Then",
"if",
"the",
"authc_token",
"is",
"a",
"RememberMe",
"type",
"of",
"token",
"the",
"associated",
"identity",
"will",
"be",
"remembered",
"for",
"later",
"retrieval",
"during",
"a",
"new",
"user",
"session",
"."
] | 7f96aa6b837ceae9bf3d7387cd7e35f5ab032575 | https://github.com/YosaiProject/yosai/blob/7f96aa6b837ceae9bf3d7387cd7e35f5ab032575/yosai/core/mgt/mgt.py#L137-L160 | train |
YosaiProject/yosai | yosai/core/mgt/mgt.py | AbstractRememberMeManager.remember_identity | def remember_identity(self, subject, authc_token, account_id):
"""
Yosai consolidates rememberIdentity, an overloaded method in java,
to a method that will use an identifier-else-account logic.
Remembers a subject-unique identity for retrieval later. This
implementation first resolves the exact identifying attributes to
remember. It then remembers these identifying attributes by calling
remember_identity(Subject, IdentifierCollection)
:param subject: the subject for which the identifying attributes are
being remembered
:param authc_token: ignored in the AbstractRememberMeManager
:param account_id: the account id of authenticated account
"""
try:
identifiers = self.get_identity_to_remember(subject, account_id)
except AttributeError:
msg = "Neither account_id nor identifier arguments passed"
raise AttributeError(msg)
encrypted = self.convert_identifiers_to_bytes(identifiers)
self.remember_encrypted_identity(subject, encrypted) | python | def remember_identity(self, subject, authc_token, account_id):
"""
Yosai consolidates rememberIdentity, an overloaded method in java,
to a method that will use an identifier-else-account logic.
Remembers a subject-unique identity for retrieval later. This
implementation first resolves the exact identifying attributes to
remember. It then remembers these identifying attributes by calling
remember_identity(Subject, IdentifierCollection)
:param subject: the subject for which the identifying attributes are
being remembered
:param authc_token: ignored in the AbstractRememberMeManager
:param account_id: the account id of authenticated account
"""
try:
identifiers = self.get_identity_to_remember(subject, account_id)
except AttributeError:
msg = "Neither account_id nor identifier arguments passed"
raise AttributeError(msg)
encrypted = self.convert_identifiers_to_bytes(identifiers)
self.remember_encrypted_identity(subject, encrypted) | [
"def",
"remember_identity",
"(",
"self",
",",
"subject",
",",
"authc_token",
",",
"account_id",
")",
":",
"try",
":",
"identifiers",
"=",
"self",
".",
"get_identity_to_remember",
"(",
"subject",
",",
"account_id",
")",
"except",
"AttributeError",
":",
"msg",
"=",
"\"Neither account_id nor identifier arguments passed\"",
"raise",
"AttributeError",
"(",
"msg",
")",
"encrypted",
"=",
"self",
".",
"convert_identifiers_to_bytes",
"(",
"identifiers",
")",
"self",
".",
"remember_encrypted_identity",
"(",
"subject",
",",
"encrypted",
")"
] | Yosai consolidates rememberIdentity, an overloaded method in java,
to a method that will use an identifier-else-account logic.
Remembers a subject-unique identity for retrieval later. This
implementation first resolves the exact identifying attributes to
remember. It then remembers these identifying attributes by calling
remember_identity(Subject, IdentifierCollection)
:param subject: the subject for which the identifying attributes are
being remembered
:param authc_token: ignored in the AbstractRememberMeManager
:param account_id: the account id of authenticated account | [
"Yosai",
"consolidates",
"rememberIdentity",
"an",
"overloaded",
"method",
"in",
"java",
"to",
"a",
"method",
"that",
"will",
"use",
"an",
"identifier",
"-",
"else",
"-",
"account",
"logic",
"."
] | 7f96aa6b837ceae9bf3d7387cd7e35f5ab032575 | https://github.com/YosaiProject/yosai/blob/7f96aa6b837ceae9bf3d7387cd7e35f5ab032575/yosai/core/mgt/mgt.py#L162-L183 | train |
YosaiProject/yosai | yosai/core/mgt/mgt.py | AbstractRememberMeManager.convert_bytes_to_identifiers | def convert_bytes_to_identifiers(self, encrypted, subject_context):
"""
If a cipher_service is available, it will be used to first decrypt the
serialized message. Then, the bytes are deserialized and returned.
:param serialized: the bytes to decrypt and then deserialize
:param subject_context: the contextual data, that is being
used to construct a Subject instance
:returns: the de-serialized identifier
"""
# unlike Shiro, Yosai assumes that the message is encrypted:
decrypted = self.decrypt(encrypted)
return self.serialization_manager.deserialize(decrypted) | python | def convert_bytes_to_identifiers(self, encrypted, subject_context):
"""
If a cipher_service is available, it will be used to first decrypt the
serialized message. Then, the bytes are deserialized and returned.
:param serialized: the bytes to decrypt and then deserialize
:param subject_context: the contextual data, that is being
used to construct a Subject instance
:returns: the de-serialized identifier
"""
# unlike Shiro, Yosai assumes that the message is encrypted:
decrypted = self.decrypt(encrypted)
return self.serialization_manager.deserialize(decrypted) | [
"def",
"convert_bytes_to_identifiers",
"(",
"self",
",",
"encrypted",
",",
"subject_context",
")",
":",
"# unlike Shiro, Yosai assumes that the message is encrypted:",
"decrypted",
"=",
"self",
".",
"decrypt",
"(",
"encrypted",
")",
"return",
"self",
".",
"serialization_manager",
".",
"deserialize",
"(",
"decrypted",
")"
] | If a cipher_service is available, it will be used to first decrypt the
serialized message. Then, the bytes are deserialized and returned.
:param serialized: the bytes to decrypt and then deserialize
:param subject_context: the contextual data, that is being
used to construct a Subject instance
:returns: the de-serialized identifier | [
"If",
"a",
"cipher_service",
"is",
"available",
"it",
"will",
"be",
"used",
"to",
"first",
"decrypt",
"the",
"serialized",
"message",
".",
"Then",
"the",
"bytes",
"are",
"deserialized",
"and",
"returned",
"."
] | 7f96aa6b837ceae9bf3d7387cd7e35f5ab032575 | https://github.com/YosaiProject/yosai/blob/7f96aa6b837ceae9bf3d7387cd7e35f5ab032575/yosai/core/mgt/mgt.py#L245-L259 | train |
YosaiProject/yosai | yosai/core/mgt/mgt.py | AbstractRememberMeManager.encrypt | def encrypt(self, serialized):
"""
Encrypts the serialized message using Fernet
:param serialized: the serialized object to encrypt
:type serialized: bytes
:returns: an encrypted bytes returned by Fernet
"""
fernet = Fernet(self.encryption_cipher_key)
return fernet.encrypt(serialized) | python | def encrypt(self, serialized):
"""
Encrypts the serialized message using Fernet
:param serialized: the serialized object to encrypt
:type serialized: bytes
:returns: an encrypted bytes returned by Fernet
"""
fernet = Fernet(self.encryption_cipher_key)
return fernet.encrypt(serialized) | [
"def",
"encrypt",
"(",
"self",
",",
"serialized",
")",
":",
"fernet",
"=",
"Fernet",
"(",
"self",
".",
"encryption_cipher_key",
")",
"return",
"fernet",
".",
"encrypt",
"(",
"serialized",
")"
] | Encrypts the serialized message using Fernet
:param serialized: the serialized object to encrypt
:type serialized: bytes
:returns: an encrypted bytes returned by Fernet | [
"Encrypts",
"the",
"serialized",
"message",
"using",
"Fernet"
] | 7f96aa6b837ceae9bf3d7387cd7e35f5ab032575 | https://github.com/YosaiProject/yosai/blob/7f96aa6b837ceae9bf3d7387cd7e35f5ab032575/yosai/core/mgt/mgt.py#L294-L304 | train |
YosaiProject/yosai | yosai/core/mgt/mgt.py | AbstractRememberMeManager.decrypt | def decrypt(self, encrypted):
"""
decrypts the encrypted message using Fernet
:param encrypted: the encrypted message
:returns: the decrypted, serialized identifier collection
"""
fernet = Fernet(self.decryption_cipher_key)
return fernet.decrypt(encrypted) | python | def decrypt(self, encrypted):
"""
decrypts the encrypted message using Fernet
:param encrypted: the encrypted message
:returns: the decrypted, serialized identifier collection
"""
fernet = Fernet(self.decryption_cipher_key)
return fernet.decrypt(encrypted) | [
"def",
"decrypt",
"(",
"self",
",",
"encrypted",
")",
":",
"fernet",
"=",
"Fernet",
"(",
"self",
".",
"decryption_cipher_key",
")",
"return",
"fernet",
".",
"decrypt",
"(",
"encrypted",
")"
] | decrypts the encrypted message using Fernet
:param encrypted: the encrypted message
:returns: the decrypted, serialized identifier collection | [
"decrypts",
"the",
"encrypted",
"message",
"using",
"Fernet"
] | 7f96aa6b837ceae9bf3d7387cd7e35f5ab032575 | https://github.com/YosaiProject/yosai/blob/7f96aa6b837ceae9bf3d7387cd7e35f5ab032575/yosai/core/mgt/mgt.py#L306-L314 | train |
Subsets and Splits