blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c0c7402354a7cb6102aa6435284b924201a99110 | 6b41558f2c07b86da06c69dbf06363a43f08b644 | /python-gui/venv/lib/python3.7/site-packages/nidaqmx/_task_modules/channels/di_channel.py | 9c664c03946a3ba63e5b5793548a9ba507b291df | [] | no_license | Sammyalhashe/Charter-cp | 5222ed249e50d3b5eeb468d90bc9dc34cb5eccd4 | 1652d7b25c986d0a4c1f08bc68252132ed3566d0 | refs/heads/master | 2023-03-14T13:00:08.591094 | 2019-10-26T04:21:37 | 2019-10-26T04:21:37 | 174,767,319 | 0 | 0 | null | 2023-02-28T10:40:37 | 2019-03-10T02:05:10 | Python | UTF-8 | Python | false | false | 29,368 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import ctypes
import numpy
from nidaqmx._lib import lib_importer, ctypes_byte_str, c_bool32
from nidaqmx.errors import (
check_for_error, is_string_buffer_too_small, is_array_buffer_too_small)
from nidaqmx._task_modules.channels.channel import Channel
from nidaqmx.constants import (
ActiveOrInactiveEdgeSelection, DataTransferActiveTransferMode,
InputDataTransferCondition, LogicFamily)
class DIChannel(Channel):
"""
Represents one or more digital input virtual channels and their properties.
"""
__slots__ = []
def __repr__(self):
return 'DIChannel(name={0})'.format(self._name)
@property
def di_acquire_on(self):
"""
:class:`nidaqmx.constants.ActiveOrInactiveEdgeSelection`:
Specifies on which edge of the sample clock to acquire
samples.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetDIAcquireOn
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return ActiveOrInactiveEdgeSelection(val.value)
@di_acquire_on.setter
def di_acquire_on(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetDIAcquireOn
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_int]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@di_acquire_on.deleter
def di_acquire_on(self):
cfunc = lib_importer.windll.DAQmxResetDIAcquireOn
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def di_data_xfer_mech(self):
"""
:class:`nidaqmx.constants.DataTransferActiveTransferMode`:
Specifies the data transfer mode for the device.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetDIDataXferMech
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return DataTransferActiveTransferMode(val.value)
@di_data_xfer_mech.setter
def di_data_xfer_mech(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetDIDataXferMech
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_int]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@di_data_xfer_mech.deleter
def di_data_xfer_mech(self):
cfunc = lib_importer.windll.DAQmxResetDIDataXferMech
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def di_data_xfer_req_cond(self):
"""
:class:`nidaqmx.constants.InputDataTransferCondition`: Specifies
under what condition to transfer data from the onboard
memory of the device to the buffer.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetDIDataXferReqCond
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return InputDataTransferCondition(val.value)
@di_data_xfer_req_cond.setter
def di_data_xfer_req_cond(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetDIDataXferReqCond
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_int]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@di_data_xfer_req_cond.deleter
def di_data_xfer_req_cond(self):
cfunc = lib_importer.windll.DAQmxResetDIDataXferReqCond
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def di_dig_fltr_enable(self):
"""
bool: Specifies whether to enable the digital filter for the
line(s) or port(s). You can enable the filter on a line-by-
line basis. You do not have to enable the filter for all
lines in a channel.
"""
val = c_bool32()
cfunc = lib_importer.windll.DAQmxGetDIDigFltrEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(c_bool32)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@di_dig_fltr_enable.setter
def di_dig_fltr_enable(self, val):
cfunc = lib_importer.windll.DAQmxSetDIDigFltrEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str, c_bool32]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@di_dig_fltr_enable.deleter
def di_dig_fltr_enable(self):
cfunc = lib_importer.windll.DAQmxResetDIDigFltrEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def di_dig_fltr_enable_bus_mode(self):
"""
bool: Specifies whether to enable bus mode for digital
filtering. If you set this property to True, NI-DAQmx treats
all lines that use common filtering settings as a bus. If
any line in the bus has jitter, all lines in the bus hold
state until the entire bus stabilizes, or until 2 times the
minimum pulse width elapses. If you set this property to
False, NI-DAQmx filters all lines individually. Jitter in
one line does not affect other lines.
"""
val = c_bool32()
cfunc = lib_importer.windll.DAQmxGetDIDigFltrEnableBusMode
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(c_bool32)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@di_dig_fltr_enable_bus_mode.setter
def di_dig_fltr_enable_bus_mode(self, val):
cfunc = lib_importer.windll.DAQmxSetDIDigFltrEnableBusMode
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str, c_bool32]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@di_dig_fltr_enable_bus_mode.deleter
def di_dig_fltr_enable_bus_mode(self):
cfunc = lib_importer.windll.DAQmxResetDIDigFltrEnableBusMode
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def di_dig_fltr_min_pulse_width(self):
"""
float: Specifies in seconds the minimum pulse width the filter
recognizes as a valid high or low state transition.
"""
val = ctypes.c_double()
cfunc = lib_importer.windll.DAQmxGetDIDigFltrMinPulseWidth
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_double)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@di_dig_fltr_min_pulse_width.setter
def di_dig_fltr_min_pulse_width(self, val):
cfunc = lib_importer.windll.DAQmxSetDIDigFltrMinPulseWidth
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_double]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@di_dig_fltr_min_pulse_width.deleter
def di_dig_fltr_min_pulse_width(self):
cfunc = lib_importer.windll.DAQmxResetDIDigFltrMinPulseWidth
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def di_dig_fltr_timebase_rate(self):
"""
float: Specifies in hertz the rate of the digital filter
timebase. NI-DAQmx uses this value to compute settings for
the filter.
"""
val = ctypes.c_double()
cfunc = lib_importer.windll.DAQmxGetDIDigFltrTimebaseRate
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_double)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@di_dig_fltr_timebase_rate.setter
def di_dig_fltr_timebase_rate(self, val):
cfunc = lib_importer.windll.DAQmxSetDIDigFltrTimebaseRate
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_double]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@di_dig_fltr_timebase_rate.deleter
def di_dig_fltr_timebase_rate(self):
cfunc = lib_importer.windll.DAQmxResetDIDigFltrTimebaseRate
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def di_dig_fltr_timebase_src(self):
"""
str: Specifies the terminal of the signal to use as the timebase
of the digital filter.
"""
cfunc = lib_importer.windll.DAQmxGetDIDigFltrTimebaseSrc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_char_p, ctypes.c_uint]
temp_size = 0
while True:
val = ctypes.create_string_buffer(temp_size)
size_or_code = cfunc(
self._handle, self._name, val, temp_size)
if is_string_buffer_too_small(size_or_code):
# Buffer size must have changed between calls; check again.
temp_size = 0
elif size_or_code > 0 and temp_size == 0:
# Buffer size obtained, use to retrieve data.
temp_size = size_or_code
else:
break
check_for_error(size_or_code)
return val.value.decode('ascii')
@di_dig_fltr_timebase_src.setter
def di_dig_fltr_timebase_src(self, val):
cfunc = lib_importer.windll.DAQmxSetDIDigFltrTimebaseSrc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes_byte_str]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@di_dig_fltr_timebase_src.deleter
def di_dig_fltr_timebase_src(self):
cfunc = lib_importer.windll.DAQmxResetDIDigFltrTimebaseSrc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def di_dig_sync_enable(self):
"""
bool: Specifies whether to synchronize recognition of
transitions in the signal to the internal timebase of the
device.
"""
val = c_bool32()
cfunc = lib_importer.windll.DAQmxGetDIDigSyncEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(c_bool32)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@di_dig_sync_enable.setter
def di_dig_sync_enable(self, val):
cfunc = lib_importer.windll.DAQmxSetDIDigSyncEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str, c_bool32]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@di_dig_sync_enable.deleter
def di_dig_sync_enable(self):
cfunc = lib_importer.windll.DAQmxResetDIDigSyncEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def di_invert_lines(self):
"""
bool: Specifies whether to invert the lines in the channel. If
you set this property to True, the lines are at high logic
when off and at low logic when on.
"""
val = c_bool32()
cfunc = lib_importer.windll.DAQmxGetDIInvertLines
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(c_bool32)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@di_invert_lines.setter
def di_invert_lines(self, val):
cfunc = lib_importer.windll.DAQmxSetDIInvertLines
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str, c_bool32]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@di_invert_lines.deleter
def di_invert_lines(self):
cfunc = lib_importer.windll.DAQmxResetDIInvertLines
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def di_logic_family(self):
"""
:class:`nidaqmx.constants.LogicFamily`: Specifies the logic
family to use for acquisition. A logic family corresponds to
voltage thresholds that are compatible with a group of
voltage standards. Refer to the device documentation for
information on the logic high and logic low voltages for
these logic families.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetDILogicFamily
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return LogicFamily(val.value)
@di_logic_family.setter
def di_logic_family(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetDILogicFamily
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_int]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@di_logic_family.deleter
def di_logic_family(self):
cfunc = lib_importer.windll.DAQmxResetDILogicFamily
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def di_mem_map_enable(self):
"""
bool: Specifies for NI-DAQmx to map hardware registers to the
memory space of the application, if possible. Normally, NI-
DAQmx maps hardware registers to memory accessible only to
the kernel. Mapping the registers to the memory space of the
application increases performance. However, if the
application accesses the memory space mapped to the
registers, it can adversely affect the operation of the
device and possibly result in a system crash.
"""
val = c_bool32()
cfunc = lib_importer.windll.DAQmxGetDIMemMapEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(c_bool32)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@di_mem_map_enable.setter
def di_mem_map_enable(self, val):
cfunc = lib_importer.windll.DAQmxSetDIMemMapEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str, c_bool32]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@di_mem_map_enable.deleter
def di_mem_map_enable(self):
cfunc = lib_importer.windll.DAQmxResetDIMemMapEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def di_num_lines(self):
"""
int: Indicates the number of digital lines in the channel.
"""
val = ctypes.c_uint()
cfunc = lib_importer.windll.DAQmxGetDINumLines
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_uint)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@property
def di_tristate(self):
"""
bool: Specifies whether to tristate the lines in the channel. If
you set this property to True, NI-DAQmx tristates the lines
in the channel. If you set this property to False, NI-DAQmx
does not modify the configuration of the lines even if the
lines were previously tristated. Set this property to False
to read lines in other tasks or to read output-only lines.
"""
val = c_bool32()
cfunc = lib_importer.windll.DAQmxGetDITristate
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(c_bool32)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@di_tristate.setter
def di_tristate(self, val):
cfunc = lib_importer.windll.DAQmxSetDITristate
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str, c_bool32]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@di_tristate.deleter
def di_tristate(self):
cfunc = lib_importer.windll.DAQmxResetDITristate
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def di_usb_xfer_req_count(self):
"""
int: Specifies the maximum number of simultaneous USB transfers
used to stream data. Modify this value to affect performance
under different combinations of operating system and device.
"""
val = ctypes.c_uint()
cfunc = lib_importer.windll.DAQmxGetDIUsbXferReqCount
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_uint)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@di_usb_xfer_req_count.setter
def di_usb_xfer_req_count(self, val):
cfunc = lib_importer.windll.DAQmxSetDIUsbXferReqCount
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_uint]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@di_usb_xfer_req_count.deleter
def di_usb_xfer_req_count(self):
cfunc = lib_importer.windll.DAQmxResetDIUsbXferReqCount
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def di_usb_xfer_req_size(self):
"""
int: Specifies the maximum size of a USB transfer request in
bytes. Modify this value to affect performance under
different combinations of operating system and device.
"""
val = ctypes.c_uint()
cfunc = lib_importer.windll.DAQmxGetDIUsbXferReqSize
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_uint)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@di_usb_xfer_req_size.setter
def di_usb_xfer_req_size(self, val):
cfunc = lib_importer.windll.DAQmxSetDIUsbXferReqSize
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_uint]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@di_usb_xfer_req_size.deleter
def di_usb_xfer_req_size(self):
cfunc = lib_importer.windll.DAQmxResetDIUsbXferReqSize
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
| [
"[email protected]"
] | |
ac7200650548a96aa75c494dea0cd0ff98b1f957 | cab7ef4e4e7cac671f141f5f798ff10182bb91a3 | /test/cli_test.py | 9750307a6679441a631a7cdfc2ac1aa11eef8935 | [] | no_license | beesperester/python-scriptable-houdini | be547299232fa47f111727201fc3b66ea4785564 | 34183bff5cb79b18d4f680341a31ea8796cd4a1d | refs/heads/master | 2022-12-08T08:20:18.014984 | 2020-08-30T19:52:57 | 2020-08-30T19:52:57 | 291,506,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | import unittest
# pydini
from pydini.cli import cli
class TestCliMethods(unittest.TestCase):
def test_cli(self):
self.assertTrue(cli("file /Users/bernhardesperester/git/python-scriptable-houdini/examples/example"))
| [
"[email protected]"
] | |
c0c3bb2377ffb17d151d44f8e2220410d3ca089f | 99ddc4733f832dcabb4dad5e190b927ce9e0e629 | /result_data/publications_links.py | 364d0b517876d8d438320ea4718829a468a12791 | [] | no_license | Casyfill/scrapingMID | 41f760a560a4cd368784f66c6771d9fec426a2c6 | 433bcca3c9e4f6effb1851983f1b39c4ca1a2eb4 | refs/heads/master | 2021-01-20T10:46:06.816792 | 2015-06-17T15:17:56 | 2015-06-17T15:17:56 | 37,602,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,948 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
## Philipp Kats (c) June 2015
## scraping MID : Публикации
import requests, scraperwiki #, json, urllib, time,random
from datetime import datetime
import lxml.html
def scrapePageSMI(link, earl):
'''
scraping page from MID: answers to SMI;
'''
unique_keys = ('link', 'date')
html = requests.get(link)
if html.status_code != requests.codes.ok:
# if something went bad
print 'server goes bad'
print html.status_code
return
dom = lxml.html.fromstring(html.content)
rows = dom.cssselect("#content > div > div > font > table tr") # remove first two rows
for row in rows:
try:
date=datetime.strptime(row.cssselect("td > b > font > a ")[0].text , "%m/%d/%Y")
title=row.cssselect("td a")[1].text
l='http://mid.ru' + row.cssselect("td a")[1].get('href')
print date, title,l
# page = lxml.html.fromstring(requests.get(l).content)
# text = page.cssselect("div.doc-text")[0].text_content().replace('\n', ' ').replace('\r', '').strip()
scraperwiki.sql.save(unique_keys, {'link':l, 'date':date, 'title':title})
except Exception, e:
print str(e)
print 'ой, не тот ряд: ' #, row.cssselect("td > b > font")[0].text
# print date
# теперь выясним, не стоит ли остановиться
if date < earl:
# если слишком далеко закопались
return None
return '!'
earl = datetime.strptime('01/06/2012' , "%d/%m/%Y") #earliest date to parse
baselink = 'http://mid.ru/ns_publ.nsf/rdipl?open&Start=' #стартовая позиция
for x in xrange(0,16):
link = baselink + str(1+30*x)
print link
l = scrapePageSMI(link, earl)
if l ==None: break
| [
"[email protected]"
] | |
2025408b6f084f06b7e9dcbdda87b99ee50c67b9 | 69f0297c53559373a003d57a11182adcd33ed9da | /podcast_publisher/settings.py | 1b06fa6ad51cce6cda5b08db13263c4bbf38fb41 | [] | no_license | dstuck/podcast_publisher_site | a309afebba083c226fdcd3d117d5ef777322c89e | 5715939fbde4eca0b8f013216eee2168122024ef | refs/heads/master | 2020-03-28T09:53:23.079135 | 2018-10-30T18:07:47 | 2018-10-30T18:07:47 | 148,067,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,311 | py | """
Django settings for podcast_publisher project on Heroku. For more info, see:
https://github.com/heroku/heroku-django-template
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
LOGIN_URL = '/sermons/login/'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "2q8y!qk!lh#k9^p$wg6e_q1pdmv&qz&xc_f92dk(0a2n@3v)3+"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
# Disable Django's own staticfiles handling in favour of WhiteNoise, for
# greater consistency between gunicorn and `./manage.py runserver`. See:
# http://whitenoise.evans.io/en/stable/django.html#using-whitenoise-in-development
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
'oauth_handler',
'sermon_storer'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'podcast_publisher.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
},
},
]
WSGI_APPLICATION = 'podcast_publisher.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
#'ENGINE': 'django.db.backends.postgresql',
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, 'static'),
]
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
| [
"[email protected]"
] | |
fa01652f7626b82391e281ce015ba9461201d5fc | e28978d62cd1db2ef1c4e8f45a1fa58ceba81714 | /upload_cardapio/migrations/0004_auto_20161110_0014.py | a616820b15e1662364b272434fcf37bfa8a6eb6a | [] | no_license | robertocsp/bipy3 | 749af8e83172bf9afca2724a95660c7869202ce6 | 6ae18c3bdbebdbe6ffe04ffe9d521f1aa54871e0 | refs/heads/master | 2020-04-09T13:30:55.799384 | 2017-03-22T13:17:00 | 2017-03-22T13:17:00 | 62,016,005 | 0 | 0 | null | 2016-10-20T13:20:19 | 2016-06-27T01:10:24 | JavaScript | UTF-8 | Python | false | false | 573 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-11-11 00:22
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.deletion
import utils.bigint
class Migration(migrations.Migration):
dependencies = [
('loja', '0008_bigint_20161110_0004'),
]
operations = [
migrations.AlterField(
model_name='cardapio',
name='loja',
field=utils.bigint.BigForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='loja.Loja'),
),
]
| [
"[email protected]"
] | |
b0ae5f1d300dc3728c0512ca7c21046ffc3f7b45 | 01f2390a52c36fd455f5364aada53aae19da976b | /recipe/migrations/0005_auto_20191018_2127.py | 202f61ebad6126d288009508b07d5ed20aa02365 | [] | no_license | BenDowswell/Recipe-Site | f682f3c74bc4908d15033ac63ca7dc24c92df070 | 9b0237cae4a022bfdeb75ebd116a41869361635e | refs/heads/master | 2020-09-12T06:37:14.883096 | 2019-12-15T21:32:02 | 2019-12-15T21:32:02 | 222,343,220 | 0 | 0 | null | 2019-12-15T21:32:04 | 2019-11-18T02:01:07 | Python | UTF-8 | Python | false | false | 629 | py | # Generated by Django 2.2.6 on 2019-10-18 21:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('recipe', '0004_auto_20191018_2034'),
]
operations = [
migrations.RenameField(
model_name='recipe',
old_name='picture_item',
new_name='description',
),
migrations.RenameField(
model_name='recipe',
old_name='recipe_description',
new_name='name',
),
migrations.RemoveField(
model_name='recipe',
name='recipe_name',
),
]
| [
"[email protected]"
] | |
2f726caa6f3515f9be1e179dca3ea162e3e55a7b | 0f4fe3cc2b31d868e9869090d3b5f7f9e42a93c9 | /text_syllable/__init__.py | 10e4a4fd372ae9a2e7ec3dd35df3c85403c1b01a | [] | no_license | Fakhraddin/tf2-speech-recognition-ctc | 8f196d00e5080e0b29da1b3257110c3dab7ab40f | 746b3a2144391e40aecb388b089f7f8cfe394b7c | refs/heads/master | 2022-11-09T09:23:20.050414 | 2020-06-20T11:48:56 | 2020-06-20T11:48:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | # coding: utf-8
import pickle
# loading
with open('./text_syllable/syllable_tokenizer.pickle', 'rb') as handle:
tokenizer = pickle.load(handle)
# Mappings from symbol to numeric ID and vice versa:
tokens = list(tokenizer.index_word.values())
PAD = '<p>'
BLANK = '_'
tokens = [PAD] + tokens
# blank token (for CTC)
tokens.append(BLANK)
index_token = {idx: ch for idx, ch in enumerate(tokens)}
token_index = {ch: idx for idx, ch in enumerate(tokens)}
| [
"[email protected]"
] | |
f189cffb60f3fded40f02e86a1807b620bcc37db | a5c416002e1b14413871b7a9bf7056df6d46ee50 | /polls/views.py | 45d20d779a3b0fc3725dce793ee8746d1e07713b | [] | no_license | WellingtonIdeao/polls-app | 4d650cfc0cd315a320f208a045c84de58ac35940 | ed7186c92d820bac2c4f53a4089f2357f4212a13 | refs/heads/master | 2023-05-02T22:42:48.015176 | 2021-05-14T17:38:26 | 2021-05-14T17:38:26 | 357,293,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,080 | py | from django.shortcuts import render, get_object_or_404
from django.template import loader
from django.http import Http404, HttpResponse, HttpResponseRedirect
from .models import Question, Choice
from django.urls import reverse
from django.views import generic
from django.utils import timezone
# Create your views here.
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""
Return the last five published questions (not including those set to be
published in the future).
"""
return Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet.
"""
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet.
"""
return Question.objects.filter(pub_date__lte=timezone.now())
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
select_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form
context = {
'question': question,
'error_message': "You didn't select a choice.",
}
return render(request, 'polls/detail.html', context)
else:
select_choice.votes += 1
select_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
| [
"[email protected]"
] | |
e04fe4f7498c5c51cd3c377088ad64ffb269d9d6 | 239b4c68aebe3263912e3805532cc16398392129 | /code/kaggle/model.py | dfc8e9f9cafa1eaad71a12058fd53535d9522aa2 | [] | no_license | khushjammu/deeplearning_coursera_notes | 3b3ec28cd54676ebbc7284187f8b64f28e24e065 | 89821143479881fd7a9f11bbd5ace83ad9ae8404 | refs/heads/master | 2020-08-07T23:10:22.209026 | 2019-10-08T10:54:16 | 2019-10-08T10:54:16 | 213,617,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,158 | py | # This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
print("Importing modules")
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
import math
#import numpy as np
import h5py
import matplotlib.pyplot as plt
#import tensorflow as tf
from tensorflow.python.framework import ops
from tf_utils import load_dataset, random_mini_batches, convert_to_one_hot, predict
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print("Finished importing")
print("Defining model")
def create_placeholders(n_x, n_y):
X = tf.placeholder(float32, shape=[n_x, None])
Y = tf.placeholder(float32, shape=[n_y, None])
return X, Y
def initialize_parameters():
W1 = tf.get_variable("W1", [25,18], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
b1 = tf.get_variable("b1", [25,1], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
W2 = tf.get_variable("W2", [12,25], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
b2 = tf.get_variable("b2", [12,1], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
W3 = tf.get_variable("W3", [6,12], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
b3 = tf.get_variable("b3", [6,1], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
parameters = {
'W1': W1,
'b1': b1,
'W2': W2,
'b2': b2,
'W3': W3,
'b3': b3
}
def forward_propagation(X, parameters):
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
W3 = parameters['W3']
b3 = parameters['b3']
Z1 = tf.add(tf.matmul(W1, X),b1)
A1 = tf.nn.relu(Z1)
Z2 = tf.add(tf.matmul(W2, A1), b2)
A2 = tf.nn.relu(Z2)
Z3 = tf.matmul(W3, A2) + b3
return Z3
def compute_cost(Z3, Y):
"""
Computes the cost
Arguments:
Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples)
Y -- "true" labels vector placeholder, same shape as Z3
Returns:
cost - Tensor of the cost function
"""
# to fit the tensorflow requirement for tf.nn.softmax_cross_entropy_with_logits(...,...)
logits = tf.transpose(Z3)
labels = tf.transpose(Y)
### START CODE HERE ### (1 line of code)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
### END CODE HERE ###
return cost
def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001,
num_epochs = 1500, minibatch_size = 32, print_cost = True):
"""
Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX.
Arguments:
X_train -- training set, of shape (input size = 12288, number of training examples = 1080)
Y_train -- test set, of shape (output size = 6, number of training examples = 1080)
X_test -- training set, of shape (input size = 12288, number of training examples = 120)
Y_test -- test set, of shape (output size = 6, number of test examples = 120)
learning_rate -- learning rate of the optimization
num_epochs -- number of epochs of the optimization loop
minibatch_size -- size of a minibatch
print_cost -- True to print the cost every 100 epochs
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables
tf.set_random_seed(1) # to keep consistent results
seed = 3 # to keep consistent results
(n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set)
n_y = Y_train.shape[0] # n_y : output size
costs = [] # To keep track of the cost
# Create Placeholders of shape (n_x, n_y)
### START CODE HERE ### (1 line)
X, Y = create_placeholders(n_x, n_y)
### END CODE HERE ###
# Initialize parameters
### START CODE HERE ### (1 line)
parameters = initialize_parameters()
### END CODE HERE ###
# Forward propagation: Build the forward propagation in the tensorflow graph
### START CODE HERE ### (1 line)
Z3 = forward_propagation(X, parameters)
### END CODE HERE ###
# Cost function: Add cost function to tensorflow graph
### START CODE HERE ### (1 line)
cost = compute_cost(Z3, Y)
### END CODE HERE ###
# Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.
### START CODE HERE ### (1 line)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
### END CODE HERE ###
# Initialize all the variables
init = tf.global_variables_initializer()
# Start the session to compute the tensorflow graph
with tf.Session() as sess:
# Run the initialization
sess.run(init)
# Do the training loop
for epoch in range(num_epochs):
epoch_cost = 0. # Defines a cost related to an epoch
num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
seed = seed + 1
minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# IMPORTANT: The line that runs the graph on a minibatch.
# Run the session to execute the "optimizer" and the "cost", the feedict should contain a minibatch for (X,Y).
### START CODE HERE ### (1 line)
_ , minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})
### END CODE HERE ###
epoch_cost += minibatch_cost / num_minibatches
# Print the cost every epoch
if print_cost == True and epoch % 100 == 0:
print ("Cost after epoch %i: %f" % (epoch, epoch_cost))
if print_cost == True and epoch % 5 == 0:
costs.append(epoch_cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# lets save the parameters in a variable
parameters = sess.run(parameters)
print ("Parameters have been trained!")
# Calculate the correct predictions
correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train}))
print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test}))
return parameters
print("Finished defining model.")
print("Trying to import dataset")
'''
train_path = './kc_house_data.csv'
ds = tf.data.TextLineDataset(train_path).skip(1)
# Metadata describing the text columns
FIELD_DEFAULTS = [[0.], [0.], [0], [0.], [0.], [0], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0], [0], [0.], [0.]]
def _parse_line(line):
# Decode the line into its fields
fields = tf.decode_csv(line, FIELD_DEFAULTS)
parsed_line = tf.decode_csv(line, FIELD_DEFAULTS)
# First 4 fields are features, combine into single tensor
features = tf.reshape(parsed_line[1:17], shape=(18,None))
# Last field is the label
label = tf.reshape(parsed_line[0], shape=())
return features, label
ds = ds.map(_parse_line)
print(ds)
'''
#dataset = '/Users/hdadmin/Data/actions/testing.csv'
dataset = './kc_house_data.csv'
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def read_from_csv(filename_queue):
reader = tf.TextLineReader(skip_header_lines=1)
_, csv_row = reader.read(filename_queue)
record_defaults = [[0],[0],[0],[0],[0]]
colHour,colQuarter,colAction,colUser,colLabel = tf.decode_csv(csv_row, record_defaults=record_defaults)
features = tf.TensorArray.pack([colHour,colQuarter,colAction,colUser])
label = tf.TensorArray.pack([colLabel])
return features, label
def input_pipeline(batch_size, num_epochs=None):
filename_queue = tf.train.string_input_producer([dataset], num_epochs=num_epochs, shuffle=True)
example, label = read_from_csv(filename_queue)
min_after_dequeue = 1000
capacity = min_after_dequeue + 3 * batch_size
example_batch, label_batch = tf.train.shuffle_batch(
[example, label], batch_size=batch_size, capacity=capacity,
min_after_dequeue=min_after_dequeue)
return example_batch, label_batch
file_length = file_len(dataset) - 1
examples, labels = input_pipeline(file_length, 1)
with tf.Session() as sess:
init_op = tf.group(tf.initialize_all_variables(),
tf.initialize_local_variables())
tf.initialize_all_variables().run()
# start populating filename queue
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
try:
while not coord.should_stop():
example_batch, label_batch = sess.run([examples, labels])
print(example_batch)
except tf.errors.OutOfRangeError:
print('Done training, epoch reached')
finally:
coord.request_stop()
coord.join(threads)
#parameters = model(X_train, Y_train, X_test, Y_test)
| [
"[email protected]"
] | |
85d8e9a08bf8805856e91789fb535c04c1e4863d | d264dc04f643dbe22ef123c25d68d4c58547d732 | /pyAnVIL/anvil/transformers/fhir/specimen.py | 298cddb47f9a871cffb24c44bd4478f32cbc6bca | [
"Apache-2.0"
] | permissive | dionboles-asym/client-apis | 5c3c975d12cc7cc01d95b76500bdb53880c8eae7 | f000428f6dfd9a43ee6d379d761f0143228b4b5c | refs/heads/master | 2023-06-13T05:21:46.547032 | 2021-06-03T14:55:34 | 2021-06-03T14:55:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,621 | py | """Represent fhir entity."""
from anvil.transformers.fhir import CANONICAL, join, make_identifier
# specimen_type QUESTION: https://www.hl7.org/fhir/v2/0487/index.html
# specimen_type = {
# constants.SPECIMEN.COMPOSITION.BLOOD: {
# "system": "http://terminology.hl7.org/CodeSystem/v2-0487",
# "code": "BLD",
# "display": "Whole blood",
# },
# constants.SPECIMEN.COMPOSITION.SALIVA: {
# "system": "http://terminology.hl7.org/CodeSystem/v2-0487",
# "code": "SAL",
# "display": "Saliva",
# },
# constants.SPECIMEN.COMPOSITION.TISSUE: {
# "system": "http://terminology.hl7.org/CodeSystem/v2-0487",
# "code": "TISS",
# "display": "Tissue",
# },
# }
class Specimen:
"""Create fhir entity."""
class_name = "specimen"
resource_type = "Specimen"
@staticmethod
def identifier(specimen):
"""Create identifier."""
return make_identifier(specimen.workspace_name, specimen.id)
@staticmethod
def build_entity(specimen, subject):
"""Create fhir entity."""
study_id = specimen.workspace_name
# study_id_slug = make_identifier(study_id)
sample_id = specimen.id
sample_id_slug = make_identifier(sample_id)
event_age_days = None
concentration_mg_per_ml = None
composition = None
volume_ul = None
subject_id_slug = make_identifier('P', subject.id)
entity = {
"resourceType": Specimen.resource_type,
"id": sample_id_slug,
"meta": {
"profile": [
"http://hl7.org/fhir/StructureDefinition/Specimen"
]
},
"identifier": [
{
"system": f"https://anvil.terra.bio/#workspaces/anvil-datastorage/{study_id}",
"value": sample_id,
},
{
"system": "urn:ncpi:unique-string",
"value": join(Specimen.resource_type, study_id, sample_id),
},
],
"subject": {
"reference": f"Patient/{subject_id_slug}"
},
}
# event_age_days: QUESTION extension ?
if event_age_days:
entity.setdefault("extension", []).append(
{
"url": f"{CANONICAL}/StructureDefinition/age-at-event",
"valueAge": {
"value": int(event_age_days),
"unit": "d",
"system": "http://unitsofmeasure.org",
"code": "days",
},
}
)
# concentration_mg_per_ml: QUESTION extension ?
if concentration_mg_per_ml:
entity.setdefault("extension", []).append(
{
"url": f"{CANONICAL}/StructureDefinition/concentration",
"valueQuantity": {
"value": float(concentration_mg_per_ml),
"unit": "mg/mL",
},
}
)
# composition: QUESTION extension ?
if composition:
entity["type"] = {
"coding": "TODO", # [specimen_type[composition]],
"text": composition,
}
# volume_ul: QUESTION extension ?
if volume_ul:
entity.setdefault("collection", {})["quantity"] = {
"unit": "uL",
"value": float(volume_ul),
}
return entity
| [
"[email protected]"
] | |
a40cb0bc5ac853e7a833dfa294f2bc552fd53cac | cc7dc8289acefc86af0c34cc91b2e2b3236105a4 | /Lab1 3/Pyramid/my_venv/hello_jax/setup.py | 1deca7888a25aac1914c7e9d19170efd856204fe | [] | no_license | patrykcejlowski/ORW | db32a2c9deb89571ee1bf4dd830c4662edf2c469 | 502476b5b98c88d96f7faf6745de1714992222f9 | refs/heads/master | 2021-01-10T05:25:22.500268 | 2016-01-26T22:29:29 | 2016-01-26T22:29:29 | 50,334,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,061 | py | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.txt')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.txt')) as f:
CHANGES = f.read()
requires = [
'pyramid',
'pyramid_chameleon',
'pyramid_debugtoolbar',
'waitress',
]
setup(name='hello_jax',
version='0.0',
description='hello_jax',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='',
author_email='',
url='',
keywords='web pyramid pylons',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=requires,
test_suite="hello_jax",
entry_points="""\
[paste.app_factory]
main = hello_jax:main
""",
)
| [
"[email protected]"
] | |
86bd70e1a02f04d78b2b80ea47b7f1e989034403 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/binaryTree2_20200617161517.py | f3a4b120336aedbc1fa778594b3da68fe9568879 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | # Create a node and assign a value to the node
# A tree node contains data then pointer to left child and pointer to right child
class Node:
def __init__(self,data):
# designate one node as root
self.data = data
# then the two others as child nodes
self.left = None
self.right = None
def inorder(root,newArr):
if root:
# Traverse left
inorder(root.left,newArr)
newArr.append(root.data)
inorder(root.right,newArr)
print(newArr)
return newArr
def morris_traversal(root):
# function for iterative inorder tree traversal
current = root
while current is not None:
# do the following
if current.left is None:
yield current.data
else:
# find the current in order predece
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.right = Node(4)
root.left.left = Node(7)
print(inorder(root,[]))
| [
"[email protected]"
] | |
e2dfe0688b587d1f971e90638686bd4c2a492dc5 | 98e81c74a6317587defef53f1a3fa23a747eda60 | /scripts/context.py | 84a2d216a0820ead0ca88b3f1d6b5e3d2b3da5c6 | [] | no_license | djiamnot/touchandflurry | 029aa81748a79b08f8b171974cbfae1fd4cf915d | 0d3ab55de1e20ec290815952cfb928eb9b8b22e9 | refs/heads/master | 2021-01-01T18:18:22.305909 | 2015-04-12T04:41:46 | 2015-04-12T04:41:46 | 30,054,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | #!/usr/bin/env python
from bge import logic
cont = logic.getCurrentController()
scene = logic.getCurrentScene()
def updateContext():
global cont, obj, scene
cont = logic.getCurrentController()
obj = cont.owner
scene = logic.getCurrentScene()
def isSensorPositive():
from bge import logic
cont = logic.getCurrentController()
for sensor in cont.sensors:
if sensor.positive:
return True
return False
| [
"[email protected]"
] | |
006c90cf7df9ec40ff716c95f1414cc597574d4a | e9689e5ba11f19acec215fcdd89a931e0a8a81d4 | /erp/productlist/migrations/0031_auto_20170912_0013.py | 4c5197ce958c73b9c14fcb62b5cdcad112426e2e | [] | no_license | PythonAlan/foshan | 75d670954b3f18d239dcbddd3ed47243778aaeb9 | 256cfddd1ec47d723dd10c1a0aff0cb82bfcef2c | refs/heads/master | 2021-08-27T22:30:46.799845 | 2017-12-10T15:46:12 | 2017-12-10T15:46:12 | 113,762,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-09-11 16:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('productlist', '0030_auto_20170911_2301'),
]
operations = [
migrations.RemoveField(
model_name='shippingtime',
name='onway_days',
),
migrations.AddField(
model_name='shippingtime',
name='future_time',
field=models.DateField(blank=True, null=True, verbose_name='上架时间'),
),
]
| [
"[email protected]"
] | |
8a52c13095581ba0eee722b2e42eeb95fa47e209 | 41f7085fffd12bb53222fdba00d033a43b9d7081 | /products/admin.py | 0f57b3b447ba98103af25b0208b0a22595fdd971 | [] | no_license | arifgafizov/online_store | b852e1bd32149268bbed9159f1037561a3d7e9a0 | 25c32f0ae65469e904509772d414a79a743ae31b | refs/heads/master | 2023-08-01T06:28:47.302377 | 2021-09-18T18:30:28 | 2021-09-18T18:30:28 | 345,300,892 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | from django.contrib import admin
from .models import Product
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
list_display = ['id', 'title', 'price', 'is_deleted']
| [
"[email protected]"
] | |
fe2be4bf5bc9911ba5caca98dd735e0bff98f479 | 642526009a434c2a6e04fe0293279a151b216d0a | /dkube/sdk/internal/dkube_client/models/job_model_parameters_priority.py | 7316475303e09acb141fd218235d1971be7f1c38 | [] | no_license | mak-454/dkube-sdk | d4b8e7f7b1d8c0b0f64b10940ae42ab9d62f4654 | d2ba78a0abbda589efc0dbd957d9a8f6fd227464 | refs/heads/master | 2022-12-26T03:17:55.627379 | 2020-05-09T17:29:08 | 2020-05-09T17:29:08 | 262,622,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,278 | py | # coding: utf-8
"""
Dkube api server
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class JobModelParametersPriority(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'donot_queue': 'str'
}
attribute_map = {
'donot_queue': 'donot_queue'
}
def __init__(self, donot_queue=None): # noqa: E501
"""JobModelParametersPriority - a model defined in Swagger""" # noqa: E501
self._donot_queue = None
self.discriminator = None
if donot_queue is not None:
self.donot_queue = donot_queue
@property
def donot_queue(self):
"""Gets the donot_queue of this JobModelParametersPriority. # noqa: E501
:return: The donot_queue of this JobModelParametersPriority. # noqa: E501
:rtype: str
"""
return self._donot_queue
@donot_queue.setter
def donot_queue(self, donot_queue):
"""Sets the donot_queue of this JobModelParametersPriority.
:param donot_queue: The donot_queue of this JobModelParametersPriority. # noqa: E501
:type: str
"""
self._donot_queue = donot_queue
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(JobModelParametersPriority, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, JobModelParametersPriority):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
27c6ca0d1487a5959863346286be54c89d36b592 | d6705f1df05264162fb4aeb7b5c9f9abcccde81d | /proxy/database/database.py | 7917a7925aac3abcfe5f3a2bdba77b91cb239db2 | [] | no_license | edilsonlonC/proxy-files | dd8dd66c27f3c28b4d341229e41dfeffbef9ca81 | 7e6eed96c3fc482d0c4acaf028805e5ac034734a | refs/heads/master | 2023-01-03T13:23:06.950785 | 2020-10-23T16:16:41 | 2020-10-23T16:16:41 | 296,515,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | import pymongo
import os
import pprint
host = os.getenv("HOST")
port = os.getenv("DB_PORT")
db_name = os.getenv("DB_NAME")
print(host, port)
def database():
client = pymongo.MongoClient(host, int(port))
db = client[db_name]
return db
| [
"[email protected]"
] | |
30c03984f4b854f1828500ebe4191db575ad42f4 | 1cd001212ffb898fbfc477f3e95532ed5d082a9c | /Models/Keras-Regressor/Backend/db.py | 74d3bac97c1d880be06fd6688a0fb41f8e063fe2 | [] | no_license | MyrionSC/P10 | 2a58b83e76a11ccd1184f44df1885e6928e229b6 | c35b543b7598378d194d5fad1e4d56def55e1929 | refs/heads/master | 2023-01-29T07:16:03.442838 | 2019-06-17T19:47:08 | 2019-06-17T19:47:08 | 168,322,410 | 0 | 0 | null | 2023-01-07T03:13:21 | 2019-01-30T10:14:44 | Python | UTF-8 | Python | false | false | 4,322 | py | import psycopg2
from Utils.LocalSettings import *
from psycopg2.extras import RealDictCursor
import pandas as pd
chosen_model = 'no_time'
def dijkstra_qry(model=chosen_model):
if model == 'baseline':
return '\'SELECT rou.segmentkey as id, startpoint as source, endpoint as target, segmentgeom as the_geom, model.cost / 1000 + 1 as cost FROM maps.routing3 rou JOIN models.{0} model ON model.segmentkey = rou.segmentkey AND model.direction = rou.direction\''.format(model)
else:
return '\'SELECT rou.segmentkey as id, startpoint as source, endpoint as target, segmentgeom as the_geom, model.cost + 1 as cost FROM maps.routing3 rou JOIN models.{0} model ON model.segmentkey = rou.segmentkey AND model.direction = rou.direction\''.format(model)
def routing_qry(origin, dest, model=chosen_model):
return """
SELECT row_to_json(fc)::text as path
FROM(
SELECT
'FeaturesCollection' as "type",
array_to_json(array_agg(f)) as "features"
FROM (
SELECT
'Feature' as "type",
ST_AsGeoJSON(segmentgeom, 6) :: json as "geometry",
json_build_object(
'cost', cost,
'agg_cost', agg_cost,
'length', length,
'agg_length', agg_length,
'segmentkey', segmentkey,
'direction', direction,
'startpoint', startpoint,
'endpoint', endpoint
) :: json as "properties"
FROM (
SELECT
osm.segmentkey,
segmentgeom,
pgr.cost,
pgr.agg_cost,
length,
sum(length) OVER (ORDER BY pgr.path_seq) as agg_length,
CASE WHEN pgr.node = osm.startpoint
THEN 'FORWARD'::functionality.direction_driving
ELSE 'BACKWARD'::functionality.direction_driving
END AS direction,
osm.startpoint,
osm.endpoint
FROM pgr_dijkstra({2}::text, {0}::bigint, {1}::bigint) pgr
JOIN maps.routing osm
ON pgr.edge = osm.segmentkey
) as q
) as f
) as fc
""".format(origin, dest, dijkstra_qry(model))
def query(qry, db, cursor=None):
conn = psycopg2.connect("dbname='{0}' user='{1}' port='{2}' host='{3}' password='{4}'".format(db['name'], db['user'], db['port'], db['host'], db['password']))
if cursor is not None:
cur = conn.cursor(cursor_factory=cursor)
else:
cur = conn.cursor()
cur.execute(qry)
rows = cur.fetchall()
cur.close()
conn.close()
return rows
def get_route(origin, dest):
return query(routing_qry(origin, dest, chosen_model), local_db)[0][0]
def get_baseline(origin, dest):
return query(routing_qry(origin, dest, 'baseline'), local_db)[0][0]
def get_embedding(key):
qry = """
SELECT *
FROM embeddings.line
WHERE segmentkey = {0}
""".format(key)
return str(list(query(qry, local_db)[0])[1:])
def get_weather_station(segmentkey: int) -> str:
qry = """
SELECT wsname
FROM weather.segment_weatherstation_map
WHERE segmentkey = {0}
""".format(segmentkey)
return query(qry, local_db)[0][0]
def get_baseline_and_actual(trip_id):
qry = """
SELECT
vit.id,
preds.ev_wh / 1000 as baseline,
sum(preds.ev_wh / 1000) OVER (ORDER BY vit.trip_segmentno) as agg_baseline,
CASE WHEN vit.ev_kwh IS NOT NULL
THEN vit.ev_kwh
ELSE 0.0
END as actual,
sum(vit.ev_kwh) OVER (ORDER BY vit.trip_segmentno) as agg_actual
FROM mapmatched_data.viterbi_match_osm_dk_20140101 vit
JOIN experiments.rmp10_baseline_segment_predictions preds
ON vit.segmentkey = preds.segmentkey
WHERE vit.trip_id = {0}
ORDER BY vit.trip_segmentno
""".format(trip_id)
return pd.DataFrame(query(qry, main_db, cursor=RealDictCursor))
| [
"[email protected]"
] | |
fcd2e851ea6d8eeb9e832b74271abedd72940a01 | 4a56b82190cab7f1a17a4d26a34411664aded52c | /models/engine/file_storage.py | d0ce43d08bb433cef70cac9b8a2170c424a2ca51 | [] | no_license | ellio-hub/AirBnB_clone_v2 | e2e4e4dfd9bd4851da73fbdece606a1c34e9a477 | e3e81657a7065ba7c0b86d24fe2f5d0ca1268743 | refs/heads/main | 2023-06-05T05:01:45.838792 | 2021-06-29T12:51:57 | 2021-06-29T12:51:57 | 380,213,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,159 | py | #!/usr/bin/python3
"""
This module defines a clas
"""
import json
from models.amenity import Amenity
from models.base_model import BaseModel
from models.city import City
from models.place import Place
from models.review import Review
from models.state import State
from models.user import User
class FileStorage:
"""This class manages storage of hbnb models in JSON format"""
__file_path = 'file.json'
__objects = {}
def all(self, cls=None):
"""returns the dictionary __objects"""
if cls is not None:
new_dict = {}
for key, value in self.__objects.items():
if cls == value.__class__ or cls == value.__class__.__name__:
new_dict[key] = value
return new_dict
return self.__objects
def new(self, obj):
"""Adds new object to storage dictionary"""
self.all().update({obj.to_dict()['__class__'] + '.' + obj.id: obj})
def save(self):
"""Saves storage dictionary to file"""
with open(FileStorage.__file_path, 'w') as f:
temp = {}
temp.update(FileStorage.__objects)
for key, val in temp.items():
temp[key] = val.to_dict()
json.dump(temp, f)
def reload(self):
"""Loads storage dictionary from file"""
classes = {
'BaseModel': BaseModel, 'User': User, 'Place': Place,
'State': State, 'City': City, 'Amenity': Amenity,
'Review': Review
}
try:
temp = {}
with open(FileStorage.__file_path, 'r') as f:
temp = json.load(f)
for key, val in temp.items():
self.all()[key] = classes[val['__class__']](**val)
except FileNotFoundError:
pass
def delete(self, obj=None):
"""Method that deletes obj from __objects"""
if obj is not None:
key = obj.__class__.__name__ + '.' + obj.id
if key in self.__objects:
del self.__objects[key]
def close(self):
"""
"""
self.reload()
| [
"[email protected]"
] | |
6f5d6cda358dffbfad45bdcc5ef078c2959773d5 | 13fa9da327fc5b947e1cd3a70ca1602d64edb0a6 | /products/migrations/0002_offer.py | 79398b5de48ba36df96b807b33c155a181464c7a | [] | no_license | tcb1978/PythonShop | 091f5029ea8724fe93202b53ce8783fb4cc47066 | dfefd1ee10b73d12d736e75c15fba9224d3a177a | refs/heads/main | 2023-02-27T05:26:21.536266 | 2021-01-27T00:23:41 | 2021-01-27T00:23:41 | 333,252,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | # Generated by Django 2.1 on 2021-01-26 22:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Offer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=7)),
('description', models.CharField(max_length=20)),
('discount', models.FloatField(verbose_name=0.2)),
],
),
]
| [
"[email protected]"
] | |
abd5c3783b0a47c2050a38e68c7662ced936c64f | ddcd717ca21f99edc1af5b1f4b6a010bf8cd3089 | /hopper_speech/src/battery_status_monitor.py | 56f6ef2e4a29b2c4d6399cf05b6d36a9b013123e | [
"MIT"
] | permissive | CreedyNZ/Hopper_ROS | d73b5d38758a8a8c96430799c3f528d04bc1d558 | 1e6354109f034a7d1d41a5b39ddcb632cfee64b2 | refs/heads/master | 2020-07-18T09:25:25.732740 | 2020-04-06T23:25:16 | 2020-04-06T23:25:16 | 206,221,833 | 0 | 0 | MIT | 2020-04-06T18:11:07 | 2019-09-04T03:20:00 | Python | UTF-8 | Python | false | false | 3,548 | py | #!/usr/bin/env python
from __future__ import division
import rospy
from std_msgs.msg import String
from hopper_msgs.msg import ServoTelemetry, HexapodTelemetry
from sensor_msgs.msg import BatteryState
def mean(numbers):
return sum(numbers) / max(len(numbers), 1)
SERVO_COUNT = 18
MAX_VOLTAGE = 12.5
MIN_VOLTAGE = 10.5
class BatteryStatusMonitor(object):
critical_voltage_warning_period = rospy.Duration.from_sec(15)
def __init__(self):
super(BatteryStatusMonitor, self).__init__()
rospy.init_node("hopper_battery_monitor", anonymous=True)
self.lowest_recorded_voltage = 24.0
self.voltages = {}
self.first_check = True
self.speech_publisher = rospy.Publisher('hopper_play_sound', String, queue_size=5)
self.battery_publisher = rospy.Publisher("hopper/battery_status", BatteryState, queue_size=5)
self.face_color_publisher = rospy.Publisher("hopper/face/mode", String, queue_size=3)
self.telemetry_sub = rospy.Subscriber("hopper_telemetry", HexapodTelemetry, self.on_new_telemetry, queue_size=1)
self.last_critical_voltage_warning = rospy.Time.now() + self.critical_voltage_warning_period
rospy.spin()
def on_new_telemetry(self, message):
for servo in message.servos:
self.voltages[servo.id] = servo.voltage
if len(self.voltages) == SERVO_COUNT:
voltages = self.voltages.values()
self.voltages.clear()
voltage = max(voltages)
battery_state = BatteryState()
battery_state.header.stamp = rospy.Time.now()
battery_state.voltage = voltage
battery_state.current = float("nan")
battery_state.charge = float("nan")
battery_state.capacity = float("nan")
battery_state.design_capacity = float("nan")
battery_state.percentage = 100 - (MAX_VOLTAGE - voltage) / (MAX_VOLTAGE - MIN_VOLTAGE) * 100
battery_state.power_supply_status = BatteryState.POWER_SUPPLY_STATUS_DISCHARGING
battery_state.power_supply_health = BatteryState.POWER_SUPPLY_HEALTH_UNKNOWN
battery_state.power_supply_technology = BatteryState.POWER_SUPPLY_TECHNOLOGY_LIPO
battery_state.present = True
battery_state.cell_voltage = [float("nan")] * 3
battery_state.location = "Primary batter bay"
battery_state.serial_number = "N/A"
self.battery_publisher.publish(battery_state)
# skip the first check so that you don't get a warning if battery is already bellow some value
if self.first_check:
self.first_check = False
self.lowest_recorded_voltage = voltage
return
if voltage < 10.2:
if self.last_critical_voltage_warning + self.critical_voltage_warning_period < rospy.Time.now():
self.speech_publisher.publish("battery_critical")
self.face_color_publisher.publish("flash:red")
self.last_critical_voltage_warning = rospy.Time.now()
elif voltage < 11 and self.lowest_recorded_voltage >= 11:
self.speech_publisher.publish("battery_below_11")
elif voltage < 12 and self.lowest_recorded_voltage >= 12:
self.speech_publisher.publish("battery_below_12")
if voltage < self.lowest_recorded_voltage:
self.lowest_recorded_voltage = voltage
if __name__ == '__main__':
BatteryStatusMonitor()
| [
"[email protected]"
] | |
02a8f2e75063833247b447af0b17a2c764c757b3 | b8a794656624700970c50cb0e408f2ae045887d6 | /trening.py | 75057d6eeab348814536ae28e1fde9d18fb3b543 | [] | no_license | spirytus100/scripts | 4ec53dd1848d7fd9471299d7faaf1ad33d0c1c52 | 7c702b4cb20325d8dc37c8ea09e24649d3d726a9 | refs/heads/main | 2023-08-28T09:11:09.847176 | 2021-11-07T12:46:42 | 2021-11-07T12:46:42 | 425,500,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,828 | py | import sqlite3
import datetime
from sys import argv
connection = sqlite3.connect("trening.db", detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)
def create_table(connection):
connection.execute("""CREATE TABLE IF NOT EXISTS training (
exercise TEXT NOT NULL,
weight REAL NOT NULL,
series1 INTEGER NOT NULL,
series2 INTEGER NOT NULL,
series3 INTEGER NOT NULL,
start_time timestamp,
end_time timestamp);""")
all_exercises = ("Przysiady ze sztangą na barkach", "Martwy ciąg", "Wykroki", "Wznosy pięt", "Brzuszki na ławce skośnej", "Deska", "Podciąganie nachwytem",
"Wyciskanie sztangi w leżeniu", "Wiosłowanie", "Wyciskanie sztangi nad głowę", "Wznosy przedramion", "Wznosy tułowia w oparciu tylnym", "Ściskacz",
"Rozpiętki", "Wyciskanie sztangi zza karku", "Wznosy przedramion z hantlami", "Pompki zwykłe")
fbw = ("Przysiady ze sztangą na barkach", "Martwy ciąg", "Podciąganie nachwytem", "Wznosy przedramion z hantlami", "Wyciskanie sztangi zza karku", "Rozpiętki",
"Pompki zwykłe", "Brzuszki na ławce skośnej")
def print_exercises(bodypart):
start = datetime.datetime.now()
for exercise in bodypart:
error = False
cursor = connection.execute("SELECT max(rowid), weight, series1, series2, series3 FROM training WHERE exercise = ?", (exercise,)).fetchone()
print(exercise)
print("\t" + "Ostatnio:", cursor[1], "kg", cursor[2], cursor[3], cursor[4])
weight = input("\t" + "Ciężar: ")
if weight == "q":
continue
else:
try:
weight = float(weight)
except ValueError:
print("Błędny format danych. Ćwiczenie nie zostało zapisane.")
error = True
if not error:
starttime = datetime.datetime.now()
series1 = input("\t" + "Seria 1: ")
series2 = input("\t" + "Seria 2: ")
series3 = input("\t" + "Seria 3: ")
endtime = datetime.datetime.now()
series = []
for el in (series1, series2, series3):
try:
el = int(el)
except ValueError:
print("Błędny format danych. Ćwiczenie nie zostało zapisane.")
error = True
break
else:
series.append(el)
if not error:
values = (exercise, weight, series[0], series[1], series[2], starttime, endtime)
connection.execute("INSERT INTO training VALUES(?, ?, ?, ?, ?, ?, ?)", values)
connection.commit()
quest = input("Czy chcesz zakończyć trening (y/n)? ")
if quest == "y":
end = datetime.datetime.now()
duration = end - start
print("Trening trwał", duration)
cursor = connection.execute("SELECT end_time FROM training")
trainings = []
for row in cursor:
trainings.append(datetime.date(row[0].year, row[0].month, row[0].day))
print("Gratulacje! Ukończyłeś właśnie ", len(set(trainings)), "trening!")
create_table(connection)
print_exercises(fbw)
| [
"[email protected]"
] | |
bb82e32c7f67226f4394524e08ad8b3a40d034b6 | 8573070bdc8ddcabbf9bfd2dc307a6f0e37efb32 | /youtube_to_mp3.py | 778888f3b53af0c54c9132302eb301e977d49a67 | [] | no_license | Zavitit/download-from-youtube | e2ba5073eabfa5a42e6b7654fd849f234ded37f7 | cbcb6c8410333cb7a5ab33f6eb57876382755b62 | refs/heads/main | 2023-01-20T03:08:25.501011 | 2020-11-21T16:03:16 | 2020-11-21T16:03:16 | 314,844,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,715 | py | from __future__ import unicode_literals
import os
from concurrent.futures import ThreadPoolExecutor, wait
from typing import List, Tuple
from pytube import YouTube
from youtube_search import YoutubeSearch
# requires:
# youtube_search - https://github.com/joetats/youtube_search
# ffmpeg - https://ffmpeg.org/
# pytube - https://github.com/nficano/pytube
def get_youtube_urls(video_name: str, max_results: int = 10) -> List[str]:
# get {max_results} results from a youtube query of the video name
results = YoutubeSearch(video_name, max_results=max_results)
# convert the object to a list of dictionaries
results = results.to_dict()
# make sure there is at least one item in the list of dictionaries
assert len(results) > 0
# return the videos' urls
return ["youtube.com" + d['url_suffix'] for d in results]
def download_youtube_song(url: str, path: str, song_name: str = '') -> None:
if song_name != '':
try:
yt_obj = YouTube(url)
yt_obj.streams.get_audio_only().download(output_path=path, filename=yt_obj.title)
print(f'{song_name} downloaded successfully')
except Exception as e:
print(f'failed to download: {song_name} from playlist {path.split("/")[-1]}')
print(e)
def handle_songs_file(filename: str) -> str:
with open(filename, 'r') as f:
songs_names = f.readlines()
songs_names = [name.strip() for name in songs_names]
songs_names = [name for name in songs_names if len(name) > 0]
filename = filename[filename.rfind('/') + 1:-4]
path = os.getcwd() + '/Downloaded/' + filename
if not os.path.isdir(path):
try:
os.mkdir(path)
except Exception as e:
print(f"exception raised: {str(e)}")
workers = []
with ThreadPoolExecutor() as executor:
for name in songs_names:
# download it to mp3 from youtube
try:
url = get_youtube_urls(name, 1)[0]
workers.append(executor.submit(download_youtube_song, url, path, name))
except Exception as e:
print(f"failed to download {name}")
print(wait(workers))
return f"Finished Handling File {filename}"
def main():
files_names = [os.getcwd() + '/Lists/' + file for file in os.listdir(os.getcwd() + '/Lists') if
file.endswith('.txt')]
workers = []
with ThreadPoolExecutor() as executor:
for file_name in files_names:
workers.append(executor.submit(handle_songs_file, file_name))
print(f"submited {file_name}")
print(wait(workers))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
0909f463fdd4d1dada60f93d51a54ea4268747dd | c2705d1a1b0d9ee2c456f55408d4f71d9309eaff | /Aula 2 - Python + Django/Python/test.py | d3945915b9414dd0705efadc7eb57c490e9fbf4b | [] | no_license | DinoSaulo/PTA---CITi | b8bd13efa0d95bdae53df1399870eb69671c790e | caffcbef31256de3430ce21b9373f4c16e7674f9 | refs/heads/master | 2022-10-22T00:56:56.340971 | 2018-11-27T10:05:57 | 2018-11-27T10:05:57 | 155,753,775 | 0 | 1 | null | 2022-10-01T13:19:46 | 2018-11-01T17:48:05 | Python | UTF-8 | Python | false | false | 233 | py | #Dicionario
contexto = {
'register': {
'nome' : 'Marco',
'idade' : 21,
'qualidade' : 'Bonito',
}
}
if not('register' in contexto) :
print('ta aqui')
print('to aqui')
print(contexto['register'])
| [
"[email protected]"
] | |
236d6ec62f07daa740b25140002a2ac47a6cc0a0 | 4bf5321f59a646b9c99163d94c37759e3a977af2 | /pOCCI/pOCCI_parse.py | bcf2b9a8b9460a6d06084412cee38a577f3284eb | [
"MIT"
] | permissive | CESNET/pOCCI | 00935160d9b9ae7fdd5954e2b3659eb305324d4e | 5354e20a5702dcc08d8a87a9690b4204a8388765 | refs/heads/master | 2023-08-24T19:45:30.754853 | 2017-03-27T14:52:58 | 2017-03-27T15:10:23 | 38,253,794 | 6 | 6 | null | 2016-09-08T10:13:39 | 2015-06-29T15:04:00 | Python | UTF-8 | Python | false | false | 3,304 | py | #!/usr/bin/python
import getopt
import os
import re
import sys
import version
import occi
import render
inputmime = 'text/plain'
outputmime = 'text/plain'
messagetype = 'categories'
messagetypes = ['categories', 'entities', 'resource']
def usage(name=__file__):
print '%s [OPTIONS]\n\
\n\
OPTIONS:\n\
-h, --help\n\
-i, --input-mime MIME-TYPE .... input mime-type [text/plain]\n\
-o, --output-mime MIME-TYPE ... output mime-type [text/plain]\n\
-t, --type OCCI-TYPE .......... OCCI message type [categories]\n\
-V, --version ................. print version information\n\
\n\
MIME-TYPE: text/plain, text/occi, text/uri-list\n\
OCCI-TYPE: %s\n\
' % (os.path.basename(name), ', '.join(messagetypes))
def read_input(strip=False):
for line in sys.stdin:
if strip:
yield line.rstrip('\n\r')
else:
yield line
def main(argv=sys.argv[1:]):
global inputmime, outputmime, messagetype
occi_parser = None
occi_renderer = None
try:
opts, args = getopt.getopt(argv, 'hi:o:t:V', ['--help', '--input-mime=', '--output-mime=', 'type=', 'version'])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ['-h', '--help']:
usage()
sys.exit()
elif opt in ['-i', '--input-mime']:
inputmime = arg
elif opt in ['-o', '--output-mime']:
outputmime = arg
elif opt in ['-t', '--type']:
messagetype = arg
elif opt in ["-V", "--version"]:
print version.__version__
sys.exit()
if messagetype not in messagetypes:
print >> sys.stderr, 'OCCI message type expected: %s' % ', '.join(messagetypes)
sys.exit(2)
occi_parser = render.create_renderer(inputmime)
if not occi_parser:
print >> sys.stderr, 'OCCI parser can\'t be initialized (wrong mime-type "%s"?)' % inputmime
sys.exit(2)
occi_renderer = render.create_renderer(outputmime)
if not occi_renderer:
print >> sys.stderr, 'OCCI renderer can\'t be initialized (wrong mime-type "%s"?)' % outputmime
sys.exit(2)
if re.match(r'text/occi(;.*)?$', inputmime):
body = None
headers = list(read_input(strip=False))
else:
body = list(read_input(strip=True))
headers = None
try:
if messagetype in ['categories']:
categories = occi_parser.parse_categories(body, headers)
[body, headers] = occi_renderer.render_categories(categories)
elif messagetype in ['entities']:
urls = occi_parser.parse_locations(body, headers)
[body, headers] = occi_renderer.render_locations(urls)
elif messagetype in ['resource']:
[categories, links, attributes] = occi_parser.parse_resource(body, headers)
[body, headers] = occi_renderer.render_resource(categories, links, attributes)
except occi.ParseError as perr:
print >> sys.stderr, str(perr)
sys.exit(1)
except occi.RenderError as rerr:
print >> sys.stderr, str(rerr)
sys.exit(1)
if body:
sys.stdout.write(body)
if headers:
sys.stdout.write('\n'.join(headers) + '\n')
if __name__ == '__main__':
main(sys.argv[1:])
| [
"[email protected]"
] | |
55376f7f22d73064faf467bc0d0b8ebf66d86e28 | aff5b675f60a8cfffc92ca285c6393eab3956b8e | /scan_values/src/velocityjackalv2.py | 0425b630c4cbb5b491d205f81ad21753cdd8bf0c | [] | no_license | marianbrunet/jackaltest2 | e913e8ff69767669ede2c23c81ab4913c494a450 | 4548c4fa11c11f5aa8cbbda235e018e05a0911dd | refs/heads/master | 2022-07-12T12:41:26.675391 | 2019-03-27T17:14:17 | 2019-03-27T17:14:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,318 | py | #! /usr/bin/env python
import rospy
from std_msgs.msg import String
from geometry_msgs.msg import Twist
import numpy as np
vel_msg = Twist()
angular_speed = 0.017
dist = 0.0
def veloc():
pub = rospy.Publisher('cmd_vel', Twist, queue_size=10)
rospy.init_node('velocity', anonymous=True)
rate = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
angular_speed = 0.3
vel_msg.linear.x = 0
vel_msg.linear.y = 0
vel_msg.linear.z = 0
vel_msg.angular.x = 0
vel_msg.angular.y = 0
vel_msg.angular.z = 0
dist = rospy.get_param('distance')
if(dist > 1.0):
if ((int(rospy.get_param('angle')) < -12)):
vel_msg.angular.z = -angular_speed
pub.publish(vel_msg)
rate.sleep()
else:
if ((int(rospy.get_param('angle')) > 12)):
vel_msg.angular.z = angular_speed
pub.publish(vel_msg)
rate.sleep()
else:
vel_msg.linear.x = 0.3
vel_msg.angular.z = 0
pub.publish(vel_msg)
rate.sleep()
else:
vel_msg.linear.x = 0
vel_msg.angular.z = 0
pub.publish(vel_msg)
rate.sleep()
if __name__ == '__main__':
try:
veloc()
except rospy.ROSInterruptException:
pass
rospy.init_node('velocity')
#sub = rospy.Subscriber(rospy.get_param('scan_subscriber_topic_name'), LaserScan, callback)
rospy.spin()
| [
"[email protected]"
] | |
eb20fab054dd74e416756cfd66c75d8607d1ca97 | 99fcce231428992e00101aaa6f8dddce197b77e9 | /test_cities2.py | af0387b052fd5763e05a662cdfea1811c93a017a | [] | no_license | harshuop/Python-Crash-Course | bba995f52c17afb4030935917a62b76772b357c5 | 8606889297b5d76630fe241cdb507fb9dcdf01a3 | refs/heads/master | 2023-02-15T06:12:44.196297 | 2021-01-10T19:07:45 | 2021-01-10T19:07:45 | 297,055,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | import unittest
from city_functions2 import CityCountry
class TestingFile(unittest.TestCase):
"""Testing method CityCountry"""
def test_city_country(self):
output = CityCountry('santiago', 'chile', 5000000)
self.assertEqual(output, 'Santiago Chile 5000000')
unittest.main()
| [
"[email protected]"
] | |
be4cae57c6d7553aad6194966e21bce7945d3b4a | 6e932aa6ec9424ae0238c559112fdd0214c52be6 | /ffawp/ch03/7_pandas_excel_column_by_index.py | 46091c546d93d0be59c779e56143d2e0a2105ffe | [] | no_license | LingChenBill/python_first_introduce | d1c780dcd3653ef4cda39cc4a0c631a99071f088 | 32ff4a16fe10505fcb49e4762fc573f5f1c62167 | refs/heads/master | 2020-07-29T13:03:15.447728 | 2020-06-09T13:39:07 | 2020-06-09T13:39:07 | 209,813,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 827 | py | # Date:2020/5/25
# Author:Lingchen
# Mark: 利用pandas来筛选特定的列
# python3 7_pandas_excel_column_by_index.py data/sales_2013.xlsx data/output/7_output_pandas.xlsx
import pandas as pd
import sys
input_file = sys.argv[1]
output_file = sys.argv[2]
data_frame = pd.read_excel(input_file, sheet_name='january_2013', index_col=None)
# 如果使用iloc函数来选择列,那么就需要在列索引值前面加上一个冒号和一个逗号,
# 表示你想为这些特定的列保留所有的行,否则,iloc函数也会使用这些索引值去筛选行。
data_frame_column_by_index = data_frame.iloc[:, [1, 4]]
# data_frame_column_by_index = data_frame.iloc[[1, 4]]
writer = pd.ExcelWriter(output_file)
data_frame_column_by_index.to_excel(writer, sheet_name='jan_13_output', index=False)
writer.save()
| [
"[email protected]"
] | |
4efb7b1a1df0c96c04f9fc0c9fa1efe156108503 | fb02ed09ae0a33ed3319f43497bc95fb2159ca96 | /everestsalsa/wsgi.py | e8013dbc4e7cbfc50915e4f002dd57d2afe5eb81 | [] | no_license | nabinst/Salsa | d9c1bcc30e4c38c522170b72e64428d9586f9079 | 2f98086551decb2b227cb4eebb70ef78f99a4b04 | refs/heads/master | 2022-03-05T14:20:23.692993 | 2022-03-01T02:00:38 | 2022-03-01T02:00:38 | 64,690,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for everestsalsa project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'everestsalsa.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
6a247076930cb842958c949ef8482b55f7dcb4c0 | 61c4298f78233e5b4d351e1e4d3b9161938dc5c3 | /wasurvey/settings.py | 3fca0dc9f2e994bb5c4e0fdcd40088c3202da3c0 | [] | no_license | thenocturnalanimal/wasurvey | 735b894b957ab3b2b9b8ac84e12711cfa9e2930e | 99710890877f4b62640ecf0b9dc29cb58e2ebcfc | refs/heads/master | 2022-06-10T17:10:23.852222 | 2020-05-04T19:40:25 | 2020-05-04T19:40:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,325 | py | """
Django settings for wasurvey project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm_ki^d732yu%-&17(wvsgex3%lbh$8+_j*n%@tgb(+wjgy2eh$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'round1',
'round2',
'crispy_forms'
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'wasurvey.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wasurvey.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
import os.path
STATIC_ROOT = ''
STATIC_URL = '/static/'
STATICFILES_DIRS = ( os.path.join('static'), )
| [
"[email protected]"
] | |
ae6a32413cd11a7b2c1641750e2e0ab5eaf0098f | 4519b4b24f3907da1dde513f72d432fd9b4391f4 | /crds/jwst/specs/niriss_amplifier.spec | 6ccec0f5c173fcc470d0acdec4afa258d7e69b7b | [
"BSD-2-Clause"
] | permissive | spacetelescope/crds | 0bd712b7c7c6864c274987e7ba94a051e19d1e48 | 08da10721c0e979877dc9579b4092c79f4ceee27 | refs/heads/master | 2023-07-23T17:07:33.889579 | 2023-06-29T20:04:56 | 2023-06-29T20:04:56 | 52,045,957 | 9 | 29 | NOASSERTION | 2023-09-14T17:42:28 | 2016-02-18T23:15:38 | Python | UTF-8 | Python | false | false | 512 | spec | {
'derived_from' : 'cloning tool 0.03b (2012-09-11)',
'file_ext' : '.fits',
'filekind' : 'AMPLIFIER',
'filetype' : 'DETECTOR PARAMETERS',
'instrument' : 'NIRISS',
'mapping' : 'REFERENCE',
'name' : 'jwst_niriss_amplifier_0000.rmap',
'observatory' : 'JWST',
'parkey' : (('META.INSTRUMENT.DETECTOR', 'META.INSTRUMENT.FILTER'),),
'sha1sum' : '0335598536a0da3617e0521214191ba241ddc530',
'suffix' : 'amplifier',
'text_descr' : 'Detector Amplifier Readout Parameters',
}
| [
"[email protected]@stsci.edu"
] | [email protected]@stsci.edu |
3d9b2d060a77f242efcf80aa17c7928b76186008 | 518bf342bc4138982af3e2724e75f1d9ca3ba56c | /solutions/2245. Maximum Trailing Zeros in a Cornered Path/2245.py | 9c763ab4b9a81598a2bd4acdb95e98b4aed48aa7 | [
"MIT"
] | permissive | walkccc/LeetCode | dae85af7cc689882a84ee5011f0a13a19ad97f18 | a27be41c174565d365cbfe785f0633f634a01b2a | refs/heads/main | 2023-08-28T01:32:43.384999 | 2023-08-20T19:00:45 | 2023-08-20T19:00:45 | 172,231,974 | 692 | 302 | MIT | 2023-08-13T14:48:42 | 2019-02-23T15:46:23 | C++ | UTF-8 | Python | false | false | 2,034 | py | class Solution:
def maxTrailingZeros(self, grid: List[List[int]]) -> int:
m = len(grid)
n = len(grid[0])
# leftPrefix2[i][j] := # of 2 in grid[i][0..j]
# leftPrefix5[i][j] := # of 5 in grid[i][0..j]
# topPrefix2[i][j] := # of 2 in grid[0..i][j]
# topPrefix5[i][j] := # of 5 in grid[0..i][j]
leftPrefix2 = [[0] * n for _ in range(m)]
leftPrefix5 = [[0] * n for _ in range(m)]
topPrefix2 = [[0] * n for _ in range(m)]
topPrefix5 = [[0] * n for _ in range(m)]
def getCount(num: int, factor: int) -> int:
count = 0
while num % factor == 0:
num //= factor
count += 1
return count
for i in range(m):
for j in range(n):
leftPrefix2[i][j] = getCount(grid[i][j], 2)
leftPrefix5[i][j] = getCount(grid[i][j], 5)
if j:
leftPrefix2[i][j] += leftPrefix2[i][j - 1]
leftPrefix5[i][j] += leftPrefix5[i][j - 1]
for j in range(n):
for i in range(m):
topPrefix2[i][j] = getCount(grid[i][j], 2)
topPrefix5[i][j] = getCount(grid[i][j], 5)
if i:
topPrefix2[i][j] += topPrefix2[i - 1][j]
topPrefix5[i][j] += topPrefix5[i - 1][j]
ans = 0
for i in range(m):
for j in range(n):
curr2 = getCount(grid[i][j], 2)
curr5 = getCount(grid[i][j], 5)
l2 = leftPrefix2[i][j]
l5 = leftPrefix5[i][j]
r2 = leftPrefix2[i][n - 1] - (0 if j == 0 else leftPrefix2[i][j - 1])
r5 = leftPrefix5[i][n - 1] - (0 if j == 0 else leftPrefix5[i][j - 1])
t2 = topPrefix2[i][j]
t5 = topPrefix5[i][j]
d2 = topPrefix2[m - 1][j] - (0 if i == 0 else topPrefix2[i - 1][j])
d5 = topPrefix5[m - 1][j] - (0 if i == 0 else topPrefix5[i - 1][j])
ans = max(ans,
min(l2 + t2 - curr2, l5 + t5 - curr5),
min(r2 + t2 - curr2, r5 + t5 - curr5),
min(l2 + d2 - curr2, l5 + d5 - curr5),
min(r2 + d2 - curr2, r5 + d5 - curr5))
return ans
| [
"[email protected]"
] | |
de522fbcbbe893b765777b70e77fe2fa58804235 | b7ddff46be5bdf550480116d539c2508290502e9 | /htrader.py | e2e269e4550dda62f7504ea3402b1230245c8e77 | [] | no_license | wnwjq462/HTrader | b047b07a10374ad7a2258483814002c5c2868422 | 76093642389efa88cf86f0fe2ce130bbbf4e54e3 | refs/heads/main | 2023-05-04T01:53:40.536070 | 2021-05-28T09:33:37 | 2021-05-28T09:33:37 | 314,489,621 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,147 | py | import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5 import uic
from Kiwoom import *
import webbrowser
import sqlite3
import pandas as pd
from datetime import datetime
#미리 만들어 놓은 UI 불러오기
form_class = uic.loadUiType("htrader.ui")[0]
#키움 API와 연결하고 대부분의 기능이 구현되어 있는 Main Class
class MyWindow(QMainWindow, form_class):
def __init__(self):
super().__init__()
self.setupUi(self)
self.trade_stocks_done = False
self.kiwoom = Kiwoom()
self.kiwoom.comm_connect()
# Database 연결
self.con = sqlite3.connect("HBase.db")
self.cursor = self.con.cursor()
#보유종목현황 / 선정 종목 버튼 리스트
self.btn_list1 = []
self.btn1_num = 0
self.btn_list2 = []
self.btn2_num = 0
#현재 시간을 알려주기 위한 Timer
self.timer = QTimer(self)
self.timer.start(1000)
self.timer.timeout.connect(self.timeout)
#실시간 잔고 및 보유종목 현황을 보여주기 위한 Timer
self.timer2 = QTimer(self)
self.timer.start(5000)
self.timer.timeout.connect(self.timeout2)
#종목코드 입력
self.lineEdit.textChanged.connect(self.code_change)
#계좌번호 출력
accounts_num = int(self.kiwoom.get_login_info("ACCOUNT_CNT"))
accounts = self.kiwoom.get_login_info("ACCNO")
accounts_list = accounts.split(';')[0:accounts_num]
self.comboBox.addItems(accounts_list)
#주문버튼 / 잔고 조회 버튼
self.pushButton.clicked.connect(self.send_order)
self.pushButton_2.clicked.connect(self.check_balance)
#선정 종목 정보 출력
self.load_buy_sell_list()
#self.kiwoom._set_real_reg("6000", "8121773611", "8019", "0") //실시간 정보 확인
#프로그램 상에서 수동 주문하는 함수 -> 주문 정보를 DB에 저장
def send_order(self):
order_type_lookup = {'신규매수': 1, '신규매도': 2, '매수취소': 3, '매도취소' : 4}
hoga_lookup = {'지정가' : "00", '시장가' : "03"}
#프로그램 상에서 텍스트 박스 상의 정보를 이용하여 주문
account = self.comboBox.currentText()
order_type = self.comboBox_2.currentText()
code = self.lineEdit.text()
name = self.kiwoom.get_master_code_name(code)
hoga = self.comboBox_3.currentText()
num = self.spinBox.value()
price = self.spinBox_2.value()
current_time = QTime.currentTime().toString()
now = datetime.now()
current_date = str(now.year)+'-'+str(now.month)+'-'+str(now.day)
#구매 정보 DB에 저장
if order_type == '신규매수' :
sql = "INSERT INTO buy_inform VALUES("
sql = sql + "'" +current_date + "'"+ "," + "'"+ current_time + "'"+"," + "'"+name + "'"+"," + "'"+code + "'"+"," + "'수동주문'" + ")"
self.cursor.execute(sql)
self.con.commit()
elif order_type == '신규매도' :
sql = "INSERT INTO sell_inform VALUES("
#buy_inform 에서 데이터가져오기 // 매수 정보에서 불러온 정보와 더불어 매도 정보에 저장
df = pd.read_sql("SELECT * FROM buy_inform",self.con,index_col = None)
df_num = len(df)
for i in range(df_num-1,-1,-1) :
if df.loc[i,"종목명"] == name :
buy_date = df.loc[i,"매수날짜"]
buy_time = df.loc[i,"매수시각"]
buy_reason = df.loc[i,"매수근거"]
break
#보유종목현황에서 데이터가져오기
item_count = len(self.kiwoom.opw00018_output['multi'])
for j in range(item_count):
row = self.kiwoom.opw00018_output['multi'][j]
if row[0] == name :
sql = sql + "'" + buy_date + "','" + buy_time + "','" + current_date+"','" + current_time + "','" + row[0] + "','" + row[1] + "','" + row[2] + "','" + row[3] + "','" + row[4] + "','" + row[5] +"','" + row[6] +"','" + row[7] + "','" + buy_reason + "'," + "'수동주문'" + ")"
#delete_sql = "DELETE FROM buy_inform WHERE 종목명 = "
#delete_sql = delete_sql + "'" + name + "'"
self.cursor.execute(sql)
#self.cursor.execute(delete_sql)
self.con.commit()
break
self.kiwoom.send_order("send_order_req","0101",account,order_type_lookup[order_type],code, num, price, hoga_lookup[hoga],"")
def code_change(self):
code = self.lineEdit.text()
name = self.kiwoom.get_master_code_name(code)
self.lineEdit_2.setText(name)
def timeout(self):
market_start_time = QTime(9,0,0)
current_time = QTime.currentTime()
if current_time > market_start_time and self.trade_stocks_done is False :
self.trade_stocks()
self.trade_stocks_done = True
text_time = current_time.toString("hh:mm:ss")
time_msg = "현재시간: " + text_time
state = self.kiwoom.get_connect_state()
if state == 1:
state_msg = "서버 연결 중"
else :
state_msg = "서버 미 연결 중"
self.statusbar.showMessage(state_msg + " | " + time_msg)
#종목번호를 누르면 네이버,다음 증권 정보를 불러옴
def link_btn(self):
naver_url = "https://finance.naver.com/item/fchart.nhn?code="
daum_url = "https://finance.daum.net/quotes/"
sender = self.sender()
code = sender.text()
daum_url = daum_url + "A"+ code + "#chart"
webbrowser.open_new(daum_url)
code = re.findall('\d+', code)[0]
naver_url = naver_url + code
webbrowser.open_new(naver_url)
#현재 보유하고 있는 주식과 잔고를 보여주는 함수
def check_balance(self):
self.kiwoom.reset_opw00018_output()
account_number = self.kiwoom.get_login_info("ACCNO")
account_number = account_number.split(';')[0]
#opw00018 - 각 종목에 대한 비중, 매입가, 평가액 등을 요청
self.kiwoom.set_input_value("계좌번호", account_number)
self.kiwoom.comm_rq_data("opw00018_req", "opw00018", 0, "2000")
while self.kiwoom.remained_data:
time.sleep(0.2)
self.kiwoom.set_input_value("계좌번호", account_number)
self.kiwoom.comm_rq_data("opw00018_req", "opw00018", 2, "2000")
#opw00001 - 이틀 뒤의 예수금을 보여줌
self.kiwoom.set_input_value("계좌번호", account_number)
self.kiwoom.comm_rq_data("opw00001_req","opw00001",0,"2000")
#balance
item = QTableWidgetItem(self.kiwoom.d2_deposit)
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignHCenter)
self.tableWidget.setItem(0,0,item)
for i in range(1,6):
item = QTableWidgetItem(self.kiwoom.opw00018_output['single'][i-1])
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignHCenter)
self.tableWidget.setItem(0,i,item)
self.tableWidget.resizeRowsToContents()
#Item list
item_count = len(self.kiwoom.opw00018_output['multi'])
self.tableWidget_2.setRowCount(item_count)
for j in range(item_count):
row = self.kiwoom.opw00018_output['multi'][j]
for i in range(len(row)):
if i == 1 :
self.btn_list1.append(QPushButton(self.tableWidget_2))
self.btn_list1[self.btn1_num].setText(row[i])
self.btn_list1[self.btn1_num].clicked.connect(self.link_btn)
self.tableWidget_2.setCellWidget(j,i,self.btn_list1[self.btn1_num])
self.btn1_num += 1
else :
item = QTableWidgetItem(row[i])
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignHCenter)
self.tableWidget_2.setItem(j,i,item)
self.tableWidget_2.resizeRowsToContents()
def timeout2(self):
if self.checkBox.isChecked():
self.check_balance()
#매수해야 하는 목록, 매도해야 하는 목록을 불러와 표로 보여주는 함수
def load_buy_sell_list(self):
f = open("buy_list.txt",'rt', encoding='UTF8')
buy_list = f.readlines()
f.close()
f = open("sell_list.txt", 'rt', encoding='UTF8')
sell_list = f.readlines()
f.close()
row_count = len(buy_list) + len(sell_list)
self.tableWidget_3.setRowCount(row_count)
#buy list
for j in range(len(buy_list)):
row_data = buy_list[j]
split_row_data = row_data.split(';')
for i in range(len(split_row_data)):
if i==1 :
self.btn_list2.append(QPushButton(self.tableWidget_2))
self.btn_list2[self.btn2_num].setText(split_row_data[i].rstrip())
self.btn_list2[self.btn2_num].clicked.connect(self.link_btn);
self.tableWidget_3.setCellWidget(j, i, self.btn_list2[self.btn2_num])
self.btn2_num += 1
else :
item = QTableWidgetItem(split_row_data[i].rstrip())
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignHCenter)
self.tableWidget_3.setItem(j,i,item)
#sell list
for j in range(len(sell_list)):
row_data = sell_list[j]
split_row_data = row_data.split(';')
for i in range(len(split_row_data)):
if i==1 :
self.btn_list2.append(QPushButton(self.tableWidget_2))
self.btn_list2[self.btn2_num].setText(split_row_data[i].rstrip())
self.btn_list2[self.btn2_num].clicked.connect(self.link_btn);
self.tableWidget_3.setCellWidget(len(buy_list)+j, i, self.btn_list2[self.btn2_num])
self.btn2_num += 1
else :
item = QTableWidgetItem(split_row_data[i].rstrip())
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignHCenter)
self.tableWidget_3.setItem(len(buy_list) + j, i, item)
self.tableWidget_3.resizeRowsToContents()
#현재 작성되어 있는 매수해야하는 목록과 매도해야 하는 목록을 바탕으로 매수,매도 진행
def trade_stocks(self):
hoga_lookup = {'지정가' : "00", '시장가': "03"}
f = open("buy_list.txt",'rt', encoding='UTF8')
buy_list = f.readlines()
f.close()
f = open("sell_list.txt",'rt', encoding='UTF8')
sell_list = f.readlines()
f.close()
#acoount
account = self.comboBox.currentText()
#Current Time and Date Check
current_time = QTime.currentTime().toString()
now = datetime.now()
current_date = str(now.year) + '-' + str(now.month) + '-' + str(now.day)
#buy list
for row_data in buy_list:
split_row_data = row_data.split(';')
hoga = split_row_data[3]
code = split_row_data[1]
name = split_row_data[2]
num = split_row_data[4]
price = split_row_data[5]
buy_reason = split_row_data[7]
if split_row_data[6].rstrip() == '매수전':
sql = "INSERT INTO buy_inform VALUES("
sql = sql + "'" + current_date + "'" + "," + "'" + current_time + "'" + "," + "'" + name + "'" + "," + "'" + code + "','" + buy_reason+ "')"
self.cursor.execute(sql)
self.con.commit()
self.kiwoom.send_order("send_order_req","0101",account,1,code,num,price,hoga_lookup[hoga],"")
#sell list
for row_data in sell_list:
split_row_data = row_data.split(';')
hoga = split_row_data[3]
code = split_row_data[1]
name = split_row_data[2]
num = split_row_data[4]
price = split_row_data[5]
sell_reason = split_row_data[7]
if split_row_data[6].rstrip() == '매도전':
sql = "INSERT INTO sell_inform VALUES("
# buy_inform 에서 데이터가져오기
df = pd.read_sql("SELECT * FROM buy_inform", self.con, index_col=None)
df_num = len(df)
for i in range(df_num - 1, -1, -1):
if df.loc[i, "종목명"] == name:
buy_date = df.loc[i, "매수날짜"]
buy_time = df.loc[i, "매수시각"]
buy_reason = df.loc[i, "매수근거"]
break
# 보유종목현황에서 데이터가져오기
item_count = len(self.kiwoom.opw00018_output['multi'])
for j in range(item_count):
row = self.kiwoom.opw00018_output['multi'][j]
if row[0] == name:
sql = sql + "'" + buy_date + "','" + buy_time + "','" + current_date + "','" + current_time + "','" + \
row[0] + "','" + row[1] + "','" + row[2] + "','" + row[3] + "','" + row[4] + "','" + row[
5] + "','" + row[6] + "','" + row[7] + "','" + buy_reason + "','" + sell_reason + "')"
# delete_sql = "DELETE FROM buy_inform WHERE 종목명 = "
# delete_sql = delete_sql + "'" + name + "'"
self.cursor.execute(sql)
# self.cursor.execute(delete_sql)
self.con.commit()
break
self.kiwoom.send_order("send_order_req", "0101", account, 2, code, num, price, hoga_lookup[hoga], "")
#buy / sell list replace
for i, row_data in enumerate(buy_list):
buy_list[i] = buy_list[i].replace("매수전","주문완료")
for i, row_data in enumerate(sell_list):
sell_list[i] = sell_list[i].replace("매도전","주문완료")
#file update
f = open("buy_list.txt",'wt',encoding='UTF8')
for row_data in buy_list :
f.write(row_data)
f.close()
f = open("sell_list.txt",'wt',encoding='UTF8')
for row_data in sell_list :
f.write(row_data)
f.close()
if __name__ == "__main__":
app = QApplication(sys.argv)
myWindow = MyWindow()
myWindow.show()
app.exec_()
myWindow.con.close()
| [
"[email protected]"
] | |
2afd2348c69da55b22391852b71fcdcdb92ff5d2 | d1d3d14453c46d569a5933d776741d4627aabd18 | /newbookstore/books/migrations/0002_auto_20181115_0356.py | 3b17e1afbc7f7b38ef1d9f9a5cb55f519f9d27f3 | [] | no_license | lyzhao0924/rookie | aeb081eb12e4342dc30e47df751223631a1153f3 | 4d6a05410a5d6cff6315a1d74dfaf9af9eb11d8a | refs/heads/master | 2020-04-05T14:50:46.850640 | 2018-11-19T12:47:52 | 2018-11-19T12:47:52 | 156,943,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-11-15 03:56
from __future__ import unicode_literals
import django.core.files.storage
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('books', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='books',
name='image',
field=models.ImageField(storage=django.core.files.storage.FileSystemStorage(location='/home/cz/cz/bookstore/static'), upload_to='books', verbose_name='商品图片'),
),
]
| [
"[email protected]"
] | |
bdc2e232268a35467bac43014ff0c2018975b6c5 | f4e1dcf694b3f0fc6005de0c543c1ae2dbedfe0b | /m14/ClassesObjectsExercise3.py | ad7c759aa1208500b387329287ca3ab14ee6d748 | [] | no_license | madhulika9293/cspp1-practice | 237f700dd5aa3e359fce25586dc4d852f8176741 | b66f3c47afa9020f9e38507f3ce2fce70e0dd30e | refs/heads/master | 2020-03-24T23:03:12.806306 | 2018-08-23T12:05:07 | 2018-08-23T12:05:07 | 143,115,986 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | class Weird(object):
def __init__(self, x, y):
self.y = y
self.x = x
def getX(self):
return x
def getY(self):
return y
class Wild(object):
def __init__(self, x, y):
self.y = y
self.x = x
def getX(self):
return self.x
def getY(self):
return self.y
X = 7
Y = 8
w1 = Weird(X, Y)
# print(w1.getY())
w2 = Wild(X, Y)
# print(w2.getX())
w3 = Wild(17, 18)
# print(w3.getX())
w4 = Wild(X, 18)
# print(w4.getY())
X = w4.getX() + w3.getX() + w2.getX()
print(X)
print(w4.getX())
Y = w4.getY() + w3.getY()
Y = Y + w2.getY()
print(Y)
| [
"[email protected]"
] | |
c1b386c649e599b7f3262762f787efd48cc491df | feff14c6b38485e0ac77e04701b020acc2ca91b4 | /App_Login/migrations/0001_initial.py | 69e691e07dc49d21cc6de529745c612481cf4e3f | [] | no_license | afiksourav/MyBlogDjango | 0531555566fdb3d319fc44a648de8e6a42188a52 | 5eec20033e9de012e1ab0ef0ee5808357ced562a | refs/heads/master | 2022-11-13T05:04:21.975383 | 2020-07-13T21:24:53 | 2020-07-13T21:24:53 | 279,412,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | # Generated by Django 3.0.6 on 2020-06-19 21:26
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_pic', models.ImageField(upload_to='profile_pics')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='user_profile', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
6cba1dc85d9dbadc3e1d85216be9187b154f073e | bf9a4dc9200e5afe6a1abbf0f70d441dd13c97f7 | /examples/imagenet_in_memory.py | a26bc7ea2d4b4b7eb7f5fcdf865da3330f90324f | [
"MIT"
] | permissive | ryan-beisner/ais-etl | c077b9aaa7f07c193e77d7e11f32945ae7425ae3 | ea57d8221f9ca678878cb900ba8aefc6d6b798a1 | refs/heads/master | 2023-01-30T04:44:31.202243 | 2020-12-09T20:44:20 | 2020-12-09T20:48:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,875 | py | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from client.ais_tar2tf import AisDataset
from client.ais_tar2tf.ops import Decode, Convert, Resize
EPOCHS = 5
BATCH_SIZE = 20
# ADJUST AisDataset PARAMETERS BELOW
BUCKET_NAME = "tar-bucket"
PROXY_URL = "http://localhost:8080"
# Create AisDataset.
# Values will be extracted from tar-records according to Resize(Convert(Decode("jpg"), tf.float32), (224, 224)) operation,
# meaning that bytes under "jpg" in tar-record will be decoded as an image, converted to tf.float32 type and then Resized to (224, 224)
# Labels will be extracted from tar-records according to Select("cls") operation, meaning that bytes under "cls" will be treated as label.
conversions = [Decode("jpg"), Convert("jpg", tf.float32), Resize("jpg", (224, 224))]
selections = ["jpg", "cls"]
ais = AisDataset(BUCKET_NAME, PROXY_URL, conversions, selections)
# prepare your bucket first with tars (for instance gsutil ls gs://lpr-gtc2020)
train_dataset = ais.load("train-{0..5}.tar", remote_exec=False,
num_workers=4).prefetch(EPOCHS * BATCH_SIZE).shuffle(buffer_size=1024).batch(BATCH_SIZE)
test_dataset = ais.load("train-{5..10}.tar", remote_exec=False, num_workers=4).prefetch(BATCH_SIZE).batch(BATCH_SIZE)
# TRAINING PART BELOW
inputs = keras.Input(shape=(224, 224, 3), name="images")
x = layers.Flatten()(inputs)
x = layers.Dense(64, activation="relu", name="dense_1")(x)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer=keras.optimizers.Adam(1e-4), loss=keras.losses.mean_squared_error, metrics=["acc"])
model.summary()
model.fit(train_dataset, epochs=EPOCHS)
result = model.evaluate(test_dataset)
print(dict(zip(model.metrics_names, result)))
| [
"[email protected]"
] | |
153c74e65dbe07eeb98cd70036cd1011e073448a | 49bce9657b65f2be61d6ab73917ad91b325895a1 | /Urgent Care + Pharmacies/scrapeUrgentCare.py | 5c904c05cae2c0748f5279793d17e66a352ce8b2 | [] | no_license | katerabinowitz/DC-Health | 346f838a5ef006723a6dd10183e5d6ceb98d01dc | 655daa9fe4f6618f2b1ce106c621c8923891f186 | refs/heads/master | 2021-01-19T07:02:45.732075 | 2017-05-04T00:11:23 | 2017-05-04T00:11:23 | 65,040,265 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | from bs4 import BeautifulSoup
import urllib
import pandas as pd
r = urllib.urlopen('https://www.urgentcarelocations.com/dc/washington-dc-urgent-care').read()
soup = BeautifulSoup(r)
section = soup.find('div', {"class": "generic-block"})
uc=[]
ucList= section.find_next('ul')
for li in ucList.findAll('li'):
liT=li.getText()
uc.append(liT)
clinicLoc = pd.DataFrame({'uc':uc})
clinicLoc.to_csv('clinicLoc.csv')
| [
"[email protected]"
] | |
a3db7d66f12a60aa667ed776e6d8767506ad9942 | 4a9c4259285aa2f4f99f414771655baa120eec18 | /2 Computer Vision/Lesson 6.16 Mean Average Precision/map.py | 8f4c9ed3303a9555bcfc209f1786ce25ec4aaef2 | [] | no_license | jack239/Udacity_Car_ND_2 | 587d106a8d787e6c803ad73bc380b58ff55f6299 | 227aee67ee6d7cfd514baca4cb02f8be736b7868 | refs/heads/main | 2023-08-27T22:03:32.635773 | 2021-10-20T09:59:15 | 2021-10-20T09:59:15 | 399,463,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,131 | py | import copy
import json
from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
from utils import calculate_iou, check_results
def have_isec(box, boxes, min_iou = 0.5):
for box2 in boxes:
if calculate_iou(box, box2) > min_iou:
return True
return False
def get_pr_curve(predictions, boxes):
tp = 0
pr_curve = []
for pred in predictions:
if have_isec(pred[1], boxes) and pred[0] == 1:
tp += 1
updated = 1
else:
updated = 0
pr_curve.append([
tp / (len(pr_curve) + 1.),
tp / len(boxes),
updated
])
return np.array(pr_curve)
def get_smoothed(pr_curve):
zero_pos = [i for i, p in enumerate(pr_curve) if p[2] == 0]
smoothed = copy.copy(pr_curve[:,:2])
for i in range(len(zero_pos)):
start = zero_pos[i]
if i + 1 < len(zero_pos):
end = zero_pos[i + 1]
else:
end = len(pr_curve)
smoothed[start:end,0] = max(pr_curve[start:end,0])
return smoothed
def get_mAP(smoothed):
cmin = 0
mAP = 0
for i in range(smoothed.shape[0] - 1):
if smoothed[i, 1] == smoothed[i+1, 1]:
mAP += (smoothed[i, 1] - cmin) * smoothed[i, 0]
cmin = smoothed[i, 1]
mAP += (smoothed[-1, 1] - cmin) * smoothed[-1, 0]
return mAP
if __name__ == '__main__':
# load data
with open('data/predictions.json', 'r') as f:
preds = json.load(f)[0]
with open('data/ground_truths.json', 'r') as f:
gts = json.load(f)[0]
predictions = list(zip(preds["classes"], preds["boxes"], preds["scores"]))
predictions.sort(key = lambda x: -x[-1])
# TODO IMPLEMENT THIS SCRIPT
pr_curve = get_pr_curve(predictions, gts["boxes"])
smoothed = get_smoothed(pr_curve)
plt.plot(pr_curve[:, 1], pr_curve[:, 0], linewidth=4)
plt.plot(smoothed[:, 1], smoothed[:, 0], linewidth=4)
plt.xlabel('recall', fontsize=18)
plt.ylabel('precision', fontsize=18)
plt.show()
mAP = get_mAP(smoothed)
check_results(mAP) | [
"[email protected]"
] | |
803e3fcb9518c166f1d01bf6522f2ec837d65a28 | 1723a49e6260df85363be4a5d55d91a88d559e23 | /book_app/urls.py | 02be9930dc0b94af033c382f721e25c8306a340d | [] | no_license | morganjohnson101/favorite_book_project | 48d8108f678ae0816a6785632ac6d321fcce7b46 | e7a2596c9012fd93724d5eb4d8025863a0520590 | refs/heads/main | 2023-06-24T11:05:13.024607 | 2021-07-27T01:44:42 | 2021-07-27T01:44:42 | 389,783,245 | 0 | 0 | null | 2021-07-27T01:44:43 | 2021-07-26T22:23:22 | Python | UTF-8 | Python | false | false | 543 | py | from django.urls import path
from . import views
urlpatterns = [
path("", views.index),
path("register", views.register),
path("login", views.login),
path("books", views.show_all),
path("books/create", views.create_book),
path("books/<int:book_id>", views.show_one),
path("books/<int:book_id>/update", views.update),
path("books/<int:book_id>/delete", views.delete),
path("favorite/<int:book_id>", views.favorite),
path("unfavorite/<int:book_id>", views.unfavorite),
path("logout", views.logout)
] | [
"[email protected]"
] | |
55d220b76ba94ddf696701f73a8dab1d67e4b10e | 60664b143123a8109008e66a4e343776f66fa5bc | /mendel/deployer/remote_jar.py | 4f30789ba832e17d92767e6106c993aa0be58a7b | [
"MIT"
] | permissive | pseegers/mendel | e6396f323274eb25b2a893693dd57bc9a66e4b45 | c6c600b8ebfa7419b2618f905375365ad69b5d2b | refs/heads/master | 2022-03-05T03:53:16.888074 | 2019-10-15T12:44:24 | 2019-10-15T12:44:24 | 103,977,221 | 0 | 0 | null | 2017-09-18T18:38:03 | 2017-09-18T18:38:03 | null | UTF-8 | Python | false | false | 4,087 | py | """
Deploy a jar housed remotely (Nexus)
"""
import datetime
import requests
from mendel.config.service_config import ServiceConfig
from mendel.util.colors import blue
from mendel.util.colors import green
from .base import Deployer
from .mixins.nexus import NexusMixin
from .mixins.rollback import SymlinkRollbackMixin
class RemoteJarDeployer(Deployer, NexusMixin, SymlinkRollbackMixin):
def __init__(self, service_name: str = None, config: ServiceConfig = None):
super().__init__(service_name, config)
self._already_deployed = False
def install(self, connection):
"""
[advanced]\t Install jar on the remote host
:param connection: Connection
:return: Nothing
"""
nexus_url = self._generate_nexus_url(connection)
self._create_if_missing(connection, path=self._rpath('releases'))
release_dir = self._new_release_dir(connection)
self._create_if_missing(connection, path=self._rpath('releases', release_dir))
current_release = self._rpath('releases', release_dir)
connection.sudo(f'wget {nexus_url} --directory-prefix={current_release}', hide=True)
# rename versioned jar to normal service jar
connection.sudo(f'mv {current_release}/*.jar {current_release}/{self.config.jar_name}.jar')
connection.sudo(f'chown {self.config.user}:{self.config.group} {current_release}/{self.config.jar_name}.jar')
self._change_symlink_to(connection=connection, release_path=self._rpath('releases', release_dir))
print(green(self.INSTALL_SUCCESS_MESSAGE % self.config.service_name))
def upload(self, connection):
"""
[advanced]\t Deploy jar to nexus if not already present
:param connection: Connection
:return:
"""
if not self.already_deployed(connection):
if self.config.project_type == "java":
print(blue('Pushing jar to nexus server'))
connection.local('mvn deploy')
self._already_deployed = True
else:
raise Exception(f"Unsupported project type: {self.config.project_type}")
def already_built(self, connection):
return self.already_deployed(connection)
def already_deployed(self, connection):
"""
Check if jar has already been deployed to the central repository (nexus)
Check first time, and caches it on an instance variable thereafter.
:param connection: Connection
:return: bool whether jar is already deployed
"""
if self._already_deployed:
return True
else:
nexus_url = self._generate_nexus_url(connection)
resp = requests.head(url=nexus_url, timeout=3)
if resp.status_code == 200:
print(green('Already found artifact in nexus. Skipping build and upload phases...'))
self._already_deployed = True
return True
else:
print(print(resp.content))
print(blue(f'Artifact not found in nexus. Url checked: {nexus_url}'))
return False
def rollback(self, connection):
"""
[core]\t\tchoose a version to rollback to from all available releases
"""
return self.symlink_rollback(connection)
def _new_release_dir(self, connection):
"""
Generate a new release dir for the remote hosts, this needs to be the same across hosts
Note this is overridden from the base class - this variant includes project version
in addition to the commit hash, user and timestamp on the base class.
:param connection: Connection
:return: str release dir
"""
release_dir_timestamp = datetime.datetime.utcnow().strftime('%Y%m%d-%H%M%S')
commit_hash = self._get_commit_hash(connection)
release_dir = f'{release_dir_timestamp}-{self.config.deployment_user}-{commit_hash}-{self.project_version}'
print(blue(f"Release directory set to {release_dir}"))
return release_dir
| [
"[email protected]"
] | |
1a4f91e25643cf5f7f898e8b1de1399dd21f61a6 | d2dff59e6f42ad60e84edd40e4fd940bf1e0127f | /trypython/basic/func_/func01.py | 58b9a552ef0b6fcb0f88667f73b1bfcdad0416b0 | [
"MIT"
] | permissive | devlights/try-python | fb4b49b70872c782da6cffc6ed9625f577fc9530 | 8f0e9997012c7ef5a4f71c15c98ea2832424136d | refs/heads/master | 2022-11-20T16:21:11.001413 | 2022-11-09T02:41:49 | 2022-11-09T02:41:49 | 78,821,417 | 6 | 1 | MIT | 2022-02-15T07:31:48 | 2017-01-13T06:19:46 | Python | UTF-8 | Python | false | false | 2,237 | py | # coding: utf-8
"""
関数についてのサンプルです。
"""
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import pr
class Sample(SampleBase):
def exec(self):
# ------------------------------------------------------------
# オプション引数
# ------------------------------------------------------------
self.func_with_default_val()
# ------------------------------------------------------------
# *による位置引数のタプル化
# ------------------------------------------------------------
self.func_with_args_tuples(1, 2, 'hello', 'world')
# ------------------------------------------------------------
# **によるキーワード引数の辞書化
# ------------------------------------------------------------
self.func_with_kwargs_dict(name='hello', name2='world')
# ------------------------------------------------------------
# 全部入り
# ------------------------------------------------------------
self.func_allin('helloworld', 1, 2, 'こんにちわ世界', value1=100, value2='string value')
# ------------------------------------------------------------
# 関数内関数
# ------------------------------------------------------------
def inner_func(x, y):
return x + y
pr('inner-func', inner_func(10, 20))
pr('inner-func', inner_func(50, 50))
# ------------------------------------------------------------
# ラムダ(匿名関数)
# ------------------------------------------------------------
lambda01 = lambda x, y: x + y
pr('lambda', lambda01(10, 20))
pr('lambda', lambda01(50, 50))
def func_with_default_val(self, message=''):
pr('func_with_default_val', message)
def func_with_args_tuples(self, *args):
pr('func_with_args_tuples', args)
def func_with_kwargs_dict(self, **kwargs):
pr('func_with_kwargs_dict', kwargs)
def func_allin(self, message='', *args, **kwargs):
pr('func_allin', (message, args, kwargs,))
def go():
obj = Sample()
obj.exec()
| [
"[email protected]"
] | |
2afa179259c8c63875807be60b33ef0d0407efed | 060ce17de7b5cdbd5f7064d1fceb4ded17a23649 | /fn_service_now/tests/test_fn_snow_helper_update_datatable.py | 2645906bac55fabfa0c971c1d99bc22a1fb17114 | [
"MIT"
] | permissive | ibmresilient/resilient-community-apps | 74bbd770062a22801cef585d4415c29cbb4d34e2 | 6878c78b94eeca407998a41ce8db2cc00f2b6758 | refs/heads/main | 2023-06-26T20:47:15.059297 | 2023-06-23T16:33:58 | 2023-06-23T16:33:58 | 101,410,006 | 81 | 107 | MIT | 2023-03-29T20:40:31 | 2017-08-25T14:07:33 | Python | UTF-8 | Python | false | false | 2,152 | py | # -*- coding: utf-8 -*-
"""Tests using pytest_resilient_circuits"""
from __future__ import print_function
import pytest
from resilient_circuits.util import get_config_data, get_function_definition
from resilient_circuits import SubmitTestFunction, FunctionResult
from sn_test_helper import *
from copy import deepcopy
import sys
PACKAGE_NAME = "fn_service_now"
FUNCTION_NAME = "fn_snow_helper_update_datatable"
# Read the default configuration-data section from the package
config_data = get_mock_config_data()
# Use custom resilient_mock
resilient_mock = SNResilientMock
def call_fn_snow_helper_update_datatable_function(circuits, function_params, timeout=10):
# Fire a message to the function
evt = SubmitTestFunction("fn_snow_helper_update_datatable", function_params)
circuits.manager.fire(evt)
event = circuits.watcher.wait("fn_snow_helper_update_datatable_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
class TestFnSnowHelperUpdateDatatable:
""" Tests for the fn_snow_helper_update_datatable function"""
def test_function_definition(self):
""" Test that the package provides customization_data that defines the function """
func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)
assert func is not None
inputs1 = {
"incident_id": 1001,
"task_id": 2002,
"sn_resilient_status": "A"
}
output1 = {
'inputs': deepcopy(inputs1),
'row_id': 1,
'res_id': 'RES-1001-2002',
'success': True
}
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6")
@pytest.mark.parametrize("inputs, expected_results", [(inputs1, output1)])
def test_success(self, circuits_app, inputs, expected_results):
""" Test calling with sample values for the parameters """
results = call_fn_snow_helper_update_datatable_function(circuits_app, inputs)
for key in expected_results:
assert(expected_results[key] == results[key]) | [
"[email protected]"
] | |
d178aa4a4bfb63b5d642528bbb59985076333526 | baefeca76d1f9bb196006fa9992b6b519471399c | /solution/services/Developers.py | 9dadfb5061427f623bdc8ebafbd3ed48a800abb2 | [] | no_license | Vitormdias/desafioTegra | 466019e20c02ae792aa8fcc9b784a0dd59b348a1 | c7f9abf16739aa32b49e3cc4dadd2f0364c7ea15 | refs/heads/master | 2020-06-10T13:58:30.558879 | 2016-12-10T19:51:42 | 2016-12-10T19:51:42 | 75,953,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | from models.Developer import Developer
developers = []
def addDev(name , function):
developer = Developer(name , function)
developers.append(developer)
print (developer.presentation())
| [
"[email protected]"
] | |
f5ff61e50412d0abb1f135f65c73fb95e5e78da9 | 53784d3746eccb6d8fca540be9087a12f3713d1c | /res/packages/scripts/scripts/client/gui/Scaleform/daapi/settings/config.py | 8a6642812356f374e997896fadbb8454d2fedcfb | [] | no_license | webiumsk/WOT-0.9.17.1-CT | 736666d53cbd0da6745b970e90a8bac6ea80813d | d7c3cf340ae40318933e7205bf9a17c7e53bac52 | refs/heads/master | 2021-01-09T06:00:33.898009 | 2017-02-03T21:40:17 | 2017-02-03T21:40:17 | 80,870,824 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,559 | py | # 2017.02.03 21:48:57 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/settings/config.py
from constants import HAS_DEV_RESOURCES, ARENA_GUI_TYPE
_COMMON_RELEASE_PACKAGES = ('gui.Scaleform.daapi.view.common',)
_COMMON_DEBUG_PACKAGES = ('gui.development.ui.GUIEditor',)
_LOBBY_RELEASE_PACKAGES = ('gui.Scaleform.daapi.view.lobby', 'gui.Scaleform.daapi.view.lobby.barracks', 'gui.Scaleform.daapi.view.lobby.boosters', 'gui.Scaleform.daapi.view.lobby.clans', 'gui.Scaleform.daapi.view.lobby.crewOperations', 'gui.Scaleform.daapi.view.lobby.customization', 'gui.Scaleform.daapi.view.lobby.cyberSport', 'gui.Scaleform.daapi.view.lobby.exchange', 'gui.Scaleform.daapi.view.lobby.fortifications', 'gui.Scaleform.daapi.view.lobby.hangar', 'gui.Scaleform.daapi.view.lobby.header', 'gui.Scaleform.daapi.view.lobby.inputChecker', 'gui.Scaleform.daapi.view.lobby.messengerBar', 'gui.Scaleform.daapi.view.lobby.prb_windows', 'gui.Scaleform.daapi.view.lobby.profile', 'gui.Scaleform.daapi.view.lobby.server_events', 'gui.Scaleform.daapi.view.lobby.store', 'gui.Scaleform.daapi.view.lobby.techtree', 'gui.Scaleform.daapi.view.lobby.trainings', 'gui.Scaleform.daapi.view.lobby.vehiclePreview', 'gui.Scaleform.daapi.view.lobby.vehicle_compare', 'gui.Scaleform.daapi.view.lobby.wgnc', 'gui.Scaleform.daapi.view.login', 'messenger.gui.Scaleform.view.lobby')
_LOBBY_DEBUG_PACKAGES = ('gui.development.ui.messenger.view.lobby',)
_BATTLE_RELEASE_PACKAGES = ('gui.Scaleform.daapi.view.battle.shared', 'messenger.gui.Scaleform.view.battle')
_BATTLE_DEBUG_PACKAGES = ('gui.development.ui.battle',)
LOBBY_PACKAGES = _LOBBY_RELEASE_PACKAGES
BATTLE_PACKAGES = _BATTLE_RELEASE_PACKAGES
COMMON_PACKAGES = _COMMON_RELEASE_PACKAGES
BATTLE_PACKAGES_BY_ARENA_TYPE = {ARENA_GUI_TYPE.FALLOUT_CLASSIC: ('gui.Scaleform.daapi.view.battle.fallout',),
ARENA_GUI_TYPE.FALLOUT_MULTITEAM: ('gui.Scaleform.daapi.view.battle.fallout',),
ARENA_GUI_TYPE.TUTORIAL: ('gui.Scaleform.daapi.view.battle.tutorial',),
ARENA_GUI_TYPE.EVENT_BATTLES: ('gui.Scaleform.daapi.view.battle.event',)}
BATTLE_PACKAGES_BY_DEFAULT = ('gui.Scaleform.daapi.view.battle.classic',)
if HAS_DEV_RESOURCES:
LOBBY_PACKAGES += _LOBBY_DEBUG_PACKAGES
BATTLE_PACKAGES += _BATTLE_DEBUG_PACKAGES
COMMON_PACKAGES += _COMMON_DEBUG_PACKAGES
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\daapi\settings\config.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:48:57 Střední Evropa (běžný čas)
| [
"[email protected]"
] | |
bc77fd9603573442425b481cbc4eb758b7875f70 | f9db5af8998a1ce6becc191d5a133d4ba30b115b | /function/ex2.py | 340974f81985c12ac7063254ef369370ad80a64a | [] | no_license | nembangallen/Practice-Python | 0cd886264f74a2e234aea9d27ccb635370896ffd | e63ea5d269aed4eff81dd724a2819a4a7d1aecc7 | refs/heads/master | 2021-05-20T22:18:30.178237 | 2020-05-06T04:02:49 | 2020-05-06T04:02:49 | 252,435,719 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | # WAP which make a new list contain square of each list's element
def doubling(li):
newList = []
for i in range(0,len(li)):
result = li[i]*li[i]
newList.append(result)
return newList
print(doubling([1,2,3,4])) | [
"[email protected]"
] | |
ccd6cee56a87d05ba6eae7b4e5626c70dbec7285 | 1968e96138a6a7849730170acb83ba570785cce6 | /field_class.py | c4e203550165688f45ac3211c1e780493c877122 | [] | no_license | margonjo/PYQT-starters | 8255a33cd14e45da5d6ee250996248ffcf74a1e5 | 2d7d9a197539551da9ac240b3577f9b51ddb06d3 | refs/heads/master | 2020-04-30T21:30:48.247676 | 2015-08-25T13:19:56 | 2015-08-25T13:19:56 | 41,358,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,627 | py | from potato_class import *
from wheat_class import *
from cow_class import *
from sheep_class import *
import random
class Field:
"""Simulate a field that can contain animals and crops"""
def __init__(self,max_animals,max_crops):
self._crops = []
self._animals = []
self._max_animals = max_animals
self._max_crops = max_crops
def plant_crop(self,crop):
if len(self._crops) < self._max_crops:
self._crops.append(crop)
return True
else:
return False
def add_animal(self,animal):
if len(self._animals) < self._max_animals:
self._animals.append(animal)
return True
else:
return False
def harvest_crop(self, position):
return self._crops.pop(position)
def remove_animal(self,position):
return self._animals.pop(position)
def report_contents(self):
crop_report =[]
animal_report = []
for crop in self._crops:
crop_report.append(crop.report())
for animal in self._animals:
animal_report.append(animal.report())
return {"crop": crop_report, "animals" :animal_report}
def report_needs(self):
food = 0
light = 0
water = 0
for crop in self._crops:
needs = crop.needs()
if needs["light need"] > light:
light = needs["light need"]
if needs["water need"] > water:
water = needs["water need"]
for animal in self._animals:
needs = animal.needs()
food+= needs ["food need"]
if needs["water need"] > water:
water = needs ["water need"]
return {"food":food, "light":light,"water":water}
def grow(self,light,food,water):
if len(self._crops)>0:
for crop in self._crops:
crop.grow(light,water)
if len(self._animals)> 0:
food_required = 0
for animal in self._animals:
needs = animal.needs()
food_required += needs["food need"]
if food > food_required:
additional_food = food - food_required
food = food_required
else:
additional_food = 0
for animal in self._animals:
needs = animal.needs()
if food >= needs["food need"]:
food-=needs["food need"]
feed = needs["food need"]
if additional_food>0:
additional_food -= 1
feed += 1
animal.grow(feed,water)
def auto_grow(field,days):
for day in range(days):
light = random.randint(1,10)
water = random.randint(1,10)
food = random.randint(1,100)
field.grow(light,food,water)
def manual_grow(field):
valid = False
while not valid:
try:
light = int(input("Please enter a light value (1-10): "))
if 1 <= light <= 10:
valid = True
else:
print("Value entered is not valid - please enter a value between 1 and 10")
except ValueError:
print("Value entered is not valid - please enter a value between 1 and 10")
valid = False
while not valid:
try:
water = int(input("Please enter a water value (1-10): "))
if 1 <= water <= 10:
valid = True
else:
print("Value entered is not valid - please enter a value between 1 and 10")
except ValueError:
print("Value entered is not valid - please enter a value between 1 and 10")
valid = False
while not valid:
try:
food = int(input("Please enter a food value (1-100): "))
if 1 <= food <= 100:
valid = True
else:
print("Value entered is not valid - please enter a value between 1 and 100")
except ValueError:
print("Value entered is not valid - please enter a value between 1 and 100")
field.grow(light,food,water)
def display_crops(crop_list):
print()
print("The following crops are in this field: ")
pos = 1
for crop in crop_list:
print("{0:>2}. {1}".format(pos,crop.report()))
pos += 1
def display_animals(animal_list):
print()
print("The following animals are in this field: ")
pos = 1
for animal in animal_list:
print("{0:>2}. {1}".format(pos,animal.report()))
pos+= 1
def select_crop(length_list):
valid = False
while not valid:
selected = int(input("please select a crop: "))
if selected in range (1, length_list+1):
valid = True
else:
print("Please select a valid option")
return selected - 1
def select_animal(length_list):
valid = False
while not valid:
selected = int(input("please select an animal: "))
if selected in range (1, length_list+1):
valid = True
else:
print("Please select a valid option")
return selected - 1
def harvest_crop_from_field(field):
display_crops(field._crops)
selected_crop = select_crop(len(field._crops))
return field.harvest_crop(selected_crop)
def remove_animal_from_field(field):
display_animals(field._animals)
selected_animal = select_animal(len(field._animals))
return field.remove_animal(selected_animal)
def display_crop_menu():
print()
print("Which crop type would you like to add?")
print()
print("1. Potato")
print("2. Wheat")
print()
print("0. I don't want to add a crop = return me to the main menu")
print()
print("Please select an option from the above menu")
def display_animal_menu():
print()
print("Which animal type would you like to add?")
print()
print("1. Cow")
print("2. Sheep")
print()
print("0. I don't want to add an animal = return me to the main menu")
print()
print("Please select an option from the above menu")
def display_main_menu():
print()
print("1. Plant a new crop")
print("2. Harvest a crop")
print()
print("3. Add an animal")
print("4. Remove animal")
print()
print("5.Grow field manually over 1 day")
print("6. Grow field automatically over 30 days")
print()
print("7. Report field status")
print()
print("8. Exit test program")
print()
print("Please select an option from the above menu")
def get_menu_choice(lower, upper):
valid = False
while not valid:
try:
choice = int(input("Option selected: "))
if lower <= choice <= upper:
valid = True
else:
print("please enter a valid option")
except ValueError:
print("Please enter a a valid option")
return choice
def plant_crop_in_field(field):
display_crop_menu()
choice = get_menu_choice(0,2)
if choice == 1:
if field.plant_crop(Potato()):
print()
print("Crop planted")
print()
else:
print()
print("Field if full - potato not planted")
print()
elif choice == 2:
if field.plant_crop(Wheat()):
print()
print("Crop planted")
print()
else:
print()
print("Field is full - wheat not planted")
print()
def add_animal_to_field(field):
display_animal_menu()
choice = get_menu_choice(0,2)
if choice == 1:
if field.add_animal(Cow()):
print()
print("Animal added")
else:
print()
print("Field is full - cow not added")
print()
elif choice == 2:
if field.add_animal(Sheep()):
print()
print("Animal added")
else:
print()
print("Field is full - sheep not added")
print()
def manage_field(field):
print("This is the field management program")
print()
exit = False
while not exit:
display_main_menu()
option = get_menu_choice(0,7)
print()
if option == 1:
plant_crop_in_field(field)
elif option == 2:
removed_crop = harvest_crop_from_field(field)
print("you have removed the crop {0}".format(removed_crop))
elif option == 3:
add_animal_to_field(field)
elif option == 4:
removed_animal = remove_animal_from_field(field)
print("You removed the animal: {0}".format(removed_animal))
elif option == 5:
manual_grow(field)
elif option == 6:
auto_grow(field,30)
elif option ==7:
print(field.report_contents())
elif option == 0:
exit = True
print()
print("Thank you for using the field managment program")
def main():
new_field = Field(5,2)
manage_field(new_field)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
77814a795ea3813e81d93c90531970828da38a14 | 3ef8ee2104e5c549178c66dfbf49a9bc3f85d8af | /krogon/k8s/k8s_kubectl.py | 63372e8667bd259d2a462dd0e1fd65eb741a7ac3 | [
"MIT"
] | permissive | enamrik/krogon | a84fa7da9b54213cb83a433e4deee23400826896 | a41a10ed346b7198509929ed9ba1e9fcf778dc78 | refs/heads/master | 2023-05-27T09:56:14.781023 | 2020-03-18T05:34:08 | 2020-03-19T10:50:24 | 167,399,046 | 1 | 0 | MIT | 2023-05-22T22:30:57 | 2019-01-24T16:26:12 | Python | UTF-8 | Python | false | false | 452 | py | from krogon.k8s.template_context import TemplateContext
class K8sKubectl:
def __init__(self, command: str):
self.command = command
def map_context(self, context: TemplateContext):
cluster_name = context.get_state('cluster_name')
if cluster_name is not None:
context.kubectl.cmd(self.command, cluster_name)
return context
def kubectl(command: str) -> K8sKubectl:
return K8sKubectl(command)
| [
"[email protected]"
] | |
8252f1e7da2718c437d1f0ec4a2477c72156d66a | c1ec27123bec4da6678c133010bf041246bc0860 | /user/migrations/0002_auto_20200509_1722.py | 968043f4ab7a8869b088c819e6b3eb7cd5ef7f10 | [] | no_license | nisargptl/eTARKARIPASAL | 7118689390cdfd6191e88a7ddb35061a8696914b | 785631db9244b7d715983c2fcb2c61dc35f13424 | refs/heads/master | 2023-08-25T08:41:03.870188 | 2021-10-07T13:08:35 | 2021-10-07T13:08:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | # Generated by Django 3.0.5 on 2020-05-09 11:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='job',
old_name='userlink',
new_name='user',
),
]
| [
"[email protected]"
] | |
b6026b9683f2dae7ce827f723a2abf7fde6259fc | d12a2c3351e8573ac8ace81603e1d94959526913 | /pynlp/wrapper.py | 3c837f9581718279f421e8382fd4d19bc3726966 | [
"MIT"
] | permissive | Agnon1573/pynlp | fbddc1b818058c0f57acf5f593cbc5d0a752bdb7 | eec3cd2aba69804866a67040bb717671ef12fda1 | refs/heads/master | 2021-04-06T07:07:50.915053 | 2018-02-20T20:05:47 | 2018-02-20T20:05:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,986 | py | from . import protobuf
class Document:
def __init__(self, proto_doc):
self._doc = proto_doc
def __str__(self):
return self._doc.text
def __len__(self):
return self._doc.sentence[-1].token[-1].tokenEndIndex
def __repr__(self):
return '<{}: [sentences: {}, tokens: {}]>'.format(
__class__.__name__,
len(self._doc.sentence),
self._doc.sentence[-1].token[-1].tokenEndIndex
)
def __iter__(self):
return (Sentence(self._doc, proto_sentence) for proto_sentence in self._doc.sentence)
def __getitem__(self, item):
return Sentence(self._doc, self._doc.sentence[item])
def __eq__(self, other):
return self._doc == other._doc
def __bytes__(self):
return self.to_bytes()
@property
def sentences(self):
return (Sentence(self._doc, proto_sentence) for proto_sentence in self._doc.sentence)
@property
def tokens(self):
return (Token(self._doc, proto_sentence, proto_token)
for proto_sentence in self._doc.sentence
for proto_token in proto_sentence.token)
def to_bytes(self):
return protobuf.to_bytes(self._doc)
@classmethod
def from_bytes(cls, bytes_):
return cls(protobuf.from_bytes(bytes_))
@property
def text(self):
return self._doc.text
@property
def entities(self):
return [NamedEntity(self._doc, proto_mention) for proto_mention in self._doc.mentions]
@property
def coref_chains(self):
return [CorefChain(self._doc, proto_coref) for proto_coref in self._doc.corefChain]
def coref_chain(self, chain_id):
for proto_chain in self._doc.corefChain:
if proto_chain.chainID == chain_id:
return CorefChain(self._doc, proto_chain)
raise IndexError('No CorefChain with id={} exits.'.format(chain_id))
@property
def quotes(self):
return [Quote(self._doc, proto_quote) for proto_quote in self._doc.quote]
class Sentence:
def __init__(self, proto_doc, proto_sentence):
self._doc = proto_doc
self._sentence = proto_sentence
def __str__(self):
return ''.join([(t.originalText + t.after) for t in self._sentence.token])
def __len__(self):
return len(self._sentence.token)
def __repr__(self):
return '<{} : [index: {}, tokens: {}]>'.format(
__class__.__name__,
self._sentence.sentenceIndex,
self._sentence.token[-1].tokenEndIndex
)
def __eq__(self, other):
return self._sentence == other._sentence and self._doc == other._doc
def __iter__(self):
return (Token(self._doc, self._sentence, proto_token) for proto_token in self._sentence.token)
def __getitem__(self, item):
return Token(self._doc, self._sentence, self._sentence.token[item])
@property
def index(self):
return self._sentence.sentenceIndex
@property
def tokens(self):
return [Token(self._doc, self._sentence, proto_token) for proto_token in self._sentence.token]
@property
def entities(self):
return [NamedEntity(self._doc, proto_mention) for proto_mention in self._sentence.mentions]
@property
def sentiment(self):
return self._sentence.sentiment
@property
def coref_mentions(self):
# todo: implement coref mention (mentionsForCoref)
raise NotImplementedError('Method under development.')
@property
def relations(self):
# todo: implement relations
raise NotImplementedError('Method under development.')
@property # parseTree, annotatedParseTree, binarizedParseTree
def parse_tree(self):
# todo: implement parse tree (ParseTree class?)
raise NotImplementedError('Method under development.')
# todo: implement dependencies & e & epp
class Token:
def __init__(self, proto_doc, proto_sentence, proto_token):
self._doc = proto_doc
self._sentence = proto_sentence
self._token = proto_token
def __str__(self):
return self._token.originalText
def __repr__(self):
return '<{}: [sentence: {}, index: {}]>'.format(
__class__.__name__,
self._sentence.sentenceIndex,
self._token.beginIndex
)
def __eq__(self, other):
return self._token == other._token and \
self._sentence == other._sentence and \
self._doc == other._doc
def __hash__(self): # this is not foolproof!
return hash((self._token.originalText,
self._token.beginChar,
self._token.endChar))
@property
def word(self):
return self._token.word
@property
def ws(self):
return self._token.after
@property
def word_ws(self):
return self._token.word + self._token.after
@property
def pos(self):
return self._token.pos
@property
def ner(self):
return self._token.ner
@property
def lemma(self):
return self._token.lemma
@property
def sentence(self):
return Sentence(self._doc, self._sentence)
class Root(Token):
def __init__(self, proto_doc, proto_sentence):
super().__init__(proto_doc, proto_sentence, None)
def __eq__(self, other):
return self._sentence == other._sentence and self._doc == other._doc
def __hash__(self):
return hash('ROOT')
def __str__(self):
return 'ROOT'
def __repr__(self):
return '<Token: [sentence: {}, index: ROOT]>'.format(
self._sentence.sentenceIndex
)
@property
def word(self):
return 'ROOT'
@property
def word_ws(self):
return 'ROOT'
@property
def pos(self):
return 'ROOT'
@property
def ner(self):
return 'ROOT'
@property
def lemma(self):
return 'ROOT'
class NamedEntity:
def __init__(self, proto_doc, proto_mention):
self._doc = proto_doc
self._mention = proto_mention
self._sentence = self._doc.sentence[self._mention.sentenceIndex]
self._tokens = [token for token in self._sentence.token
if token.tokenBeginIndex >= self._mention.tokenStartInSentenceInclusive
and token.tokenEndIndex <= self._mention.tokenEndInSentenceExclusive]
def __str__(self):
return ' '.join([token.originalText for token in self._tokens])
def __repr__(self):
return '<{}: [type: {}, sentence: {}]>'.format(
__class__.__name__,
self._mention.entityType,
self._sentence.sentenceIndex
)
def __getitem__(self, item):
return Token(self._doc, self._sentence, self._tokens[item])
@property
def type(self):
return self._mention.entityType
@property
def ner(self):
return self._mention.ner
@property
def normalized_ner(self):
return self._mention.normalizedNER
class CorefChain:
def __init__(self, proto_doc, proto_coref):
self._doc = proto_doc
self._coref = proto_coref
self._index = 0
def __repr__(self):
return '<{}: [chain_id: {}, length: {}]>'.format(
__class__.__name__,
self._coref.chainID,
len(self._coref.mention)
)
def __str__(self):
referent = self._coref.mention[self._coref.representative]
references = {}
for reference in self._coref.mention:
references.setdefault(reference.sentenceIndex, []).append(reference)
string = ''
for sentence_index in sorted(references):
words = []
whitespace = []
for token in self._doc.sentence[sentence_index].token:
words.append(token.originalText)
whitespace.append(token.after)
for ref in sorted(references[sentence_index], key=lambda r: r.beginIndex):
left_tag = '('
right_tag = ')-[id={}]'.format(ref.mentionID)
if ref.mentionID == referent.mentionID:
left_tag = '(' + left_tag
right_tag = ')' + right_tag
words[ref.beginIndex] = left_tag + words[ref.beginIndex]
words[ref.endIndex - 1] += right_tag
for index, word in enumerate(words):
string += word + whitespace[index]
string += '\n'
return string
def __iter__(self):
return (Coreference(self._doc, self._coref, mention) for mention in self._coref.mention)
def __getitem__(self, item):
if not isinstance(item, int):
raise KeyError('Index by coref_id for coreference.')
for proto_mention in self._coref.mention:
if proto_mention.mentionID == item:
return Coreference(self._doc, self._coref, proto_mention)
raise KeyError('No coreference with id={} exits.'.format(item))
@property
def chain_id(self):
return self._coref.chainID
@property
def referent(self):
proto_coref_mention = self._coref.mention[self._coref.representative]
return Coreference(self._doc, self._coref, proto_coref_mention)
class Coreference:
def __init__(self, proto_doc, proto_coref_chain, proto_coref_mention):
self._doc = proto_doc
self._coref_chain = proto_coref_chain
self._coref_mention = proto_coref_mention
sentence_index = proto_coref_mention.sentenceIndex
token_span = range(self._coref_mention.beginIndex, self._coref_mention.endIndex)
self._tokens = [proto_doc.sentence[sentence_index].token[token_index] for token_index in token_span]
def __repr__(self):
ref_id = self._coref_chain.mention[self._coref_chain.representative].mentionID
return '<{}: [coref_id: {}, chain_id: {}, referent: {}]>'.format(
self.__class__.__name__,
self._coref_mention.mentionID,
self._coref_chain.chainID,
ref_id)
def __str__(self):
return ' '.join([token.originalText for token in self._tokens])
def __getitem__(self, item):
return Token(self._doc, self._doc.sentence[self._coref_mention.sentenceIndex],
self._tokens[item])
def chain(self):
return CorefChain(self._doc, self._coref_chain)
@property
def is_referent(self):
referent_id = self._coref_chain.mention[self._coref_chain.representative].mentionID
return self._coref_mention.mentionID == referent_id
@property
def coref_id(self):
return self._coref_mention.mentionID
@property
def type(self):
return self._coref_mention.mentionType
@property
def number(self):
return self._coref_mention.number
@property
def gender(self):
return self._coref_mention.gender
@property
def animacy(self):
return self._coref_mention.animacy
@property
def head(self):
return self._coref_mention.head
class Quote:
def __init__(self, proto_doc, proto_quote):
self._doc = proto_doc
self._quote = proto_quote
def __repr__(self):
return '<{}: {}>'.format(__class__.__name__, self._quote.text)
def __str__(self):
return self._quote.text
def __getitem__(self, item):
if 0 <= item <= self._quote.sentenceEnd - self._quote.sentenceBegin:
return Sentence(self._doc,
self._doc.sentence[
self._quote.sentenceBegin + item
])
else:
raise IndexError('Quote contains {} sentences.'.format(
self._quote.sentenceEnd - self._quote.sentenceBegin + 1
))
@property
def text(self):
return self._quote.text[1:-1]
| [
"[email protected]"
] | |
10b6ecfda3a11b6ad25590d0715b1afe2cacdbc5 | eb07468e74dd9b0961dd3792b5891d3c1497397c | /train_contrastive_moco.py | 3413d76ba59069136b962c6d5125d9a1471ac074 | [] | no_license | dewenzeng/CL-TCI | 32696aee5ae173c2b9d8ac146f8541198e7a3f73 | 2649745b6c4402ba97ba903ec3d2d602668cefce | refs/heads/main | 2023-07-25T01:56:49.903085 | 2021-09-05T20:36:57 | 2021-09-05T20:36:57 | 403,154,551 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,030 | py | import os
from datetime import datetime
from utils import *
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from network.unet2d import UNet2D_contrastive
from network.deeplabv3plus import deeplabv3_resnet50_contrast
from dataset.bch import BCH
from myconfig import get_config
from lr_scheduler import LR_Scheduler
from torch.utils.tensorboard import SummaryWriter
from PytorchExperimentLogger import PytorchExperimentLogger
from network.moco import MoCo
def main():
# initialize config
args = get_config()
if args.save is '':
args.save = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
save_path = os.path.join(args.results_dir, args.experiment_name + args.save)
if not os.path.exists(save_path):
os.makedirs(save_path)
logger = PytorchExperimentLogger(save_path, "elog", ShowTerminal=True)
model_result_dir = os.path.join(save_path, 'model')
if not os.path.exists(model_result_dir):
os.makedirs(model_result_dir)
args.model_result_dir = model_result_dir
logger.print(f"saving to {save_path}")
writer = SummaryWriter('runs/' + args.experiment_name + args.save)
# setup cuda
args.device = torch.device(args.device if torch.cuda.is_available() else "cpu")
logger.print(f"the model will run on device {args.multiple_device_id}")
# create model
logger.print("creating model ...")
if args.model_name == 'unet':
model = MoCo(UNet2D_contrastive, dim=args.classes, K=3072, m=0.999, T=args.temp)
elif args.model_name == 'deeplab':
model = MoCo(deeplabv3_resnet50_contrast, dim=args.classes, K=3072, m=0.999, T=args.temp, weight_func=args.weight_func)
model.to(args.device)
model = torch.nn.DataParallel(model, device_ids=args.multiple_device_id)
num_parameters = sum([l.nelement() for l in model.module.parameters()])
logger.print(f"number of parameters: {num_parameters}")
train_dataset = BCH(keys=None, purpose='train', args=args)
logger.print('training data dir ' + train_dataset.data_dir)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_works, drop_last=True)
# define loss function (criterion) and optimizer
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
scheduler = LR_Scheduler(args.lr_scheduler, args.lr, args.epochs, len(train_loader), warmup_epochs=10)
for epoch in range(args.epochs):
train_loss = train(train_loader, model, epoch, optimizer, scheduler, logger, args)
logger.print('\n Epoch: {0}\t'
'Training Loss {train_loss:.4f} \t'
.format(epoch + 1, train_loss=train_loss))
writer.add_scalar('training_loss', train_loss, epoch)
# save model
save_dict = {"net": model.module.state_dict()}
torch.save(save_dict, os.path.join(args.model_result_dir, "latest.pth"))
def train(data_loader, model, epoch, optimizer, scheduler, logger, args):
model.train()
losses = AverageMeter()
for batch_idx, tup in enumerate(data_loader):
scheduler(optimizer, batch_idx, epoch)
img1, img2, label = tup
# We are training a image reconstruction network, the targets are the original inputs.
image1_var = Variable(img1.float(), requires_grad=False).to(args.device)
image2_var = Variable(img2.float(), requires_grad=False).to(args.device)
label = Variable(label.long()).to(args.device)
loss = model(im_q=image1_var, im_k=image2_var, pseudo_label=label, vanilla=args.use_vanilla)
loss = loss.mean()
bsz = img1.shape[0]
losses.update(loss.item(), bsz)
optimizer.zero_grad()
loss.backward()
optimizer.step()
logger.print(f"epoch:{epoch}, batch:{batch_idx}/{len(data_loader)}, lr:{optimizer.param_groups[0]['lr']:.6f}, loss:{losses.avg:.4f}")
return losses.avg
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
3171978f93b3168bab215f986e9f986ff29097f8 | 457fe4851c4bba44d8667bb6dfb8fd07dfd71f8c | /rideshare.py | 2dd782b60a82ec8e5f4b0955c7bb358c0c7afa0a | [] | no_license | j0d0nn/rideshare | 0063eb28bffbb03b04b8c40220d7fab00c54cd12 | 2daec1f54560e22e5716165239165467e77f4a18 | refs/heads/master | 2020-12-24T13:45:08.328410 | 2013-09-16T00:38:46 | 2013-09-16T00:38:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,102 | py | import webapp2
from apps.home import HomePage
from apps.needaride import NeedARidePage
from apps.givearide import GiveARidePage
from apps.orgselection import OrgSelectionPage
from apps.removedriver import RemoveDriverPage
from apps.finddriver import FindDriverAjax
from apps.savedriver import SaveDriverAjax
from apps.admin.orgmaintenance import AdminOrgMaintenancePage
application = webapp2.WSGIApplication([
# pages
(r'/(\w+)/needaride', NeedARidePage),
(r'/(\w+)/givearide', GiveARidePage),
(r'/(\w+)/removedriver', RemoveDriverPage),
(r'/orgselection', OrgSelectionPage),
(r'/(\w+)/', HomePage),
(r'/(\w+)', HomePage),
(r'/', OrgSelectionPage),
# ajax servlets
(r'/(\w+)/finddriver', FindDriverAjax),
(r'/(\w+)/savedriver', SaveDriverAjax),
# admin stuff
(r'/admin/orgs', AdminOrgMaintenancePage),
], debug=True) | [
"[email protected]"
] | |
6d4b2e4a0bcd107e940a784d4f8a9dc27e8799b9 | 8ae0bd8fc5ceceacd839b941c6327c12f31f46b5 | /rhea/system/_reset.py | 011efb899f21b83303633ffec293f92054cce856 | [
"MIT"
] | permissive | gbin/rhea | 9406ef72985b80ea44ad2a3a699b2842f8d45157 | 7bf8a9b5446fe6777bd15535bb29eb62196287a5 | refs/heads/master | 2020-05-25T11:28:12.059208 | 2015-12-11T19:45:14 | 2015-12-11T19:45:14 | 47,847,156 | 0 | 0 | null | 2015-12-11T19:38:06 | 2015-12-11T19:38:06 | null | UTF-8 | Python | false | false | 777 | py |
import myhdl
from myhdl import delay
class Reset(myhdl.ResetSignal):
def __init__(self, val, active, async):
myhdl.ResetSignal.__init__(self, val, active, async)
def pulse(self, delays=10):
if isinstance(delays, int):
self.next = self.active
yield delay(delays)
self.next = not self.active
elif isinstance(delays, tuple):
assert len(delays) in (1, 2, 3), "Incorrect number of delays"
self.next = not self.active if len(delays) == 3 else self.active
for dd in delays:
yield delay(dd)
self.next = not self.val
self.next = not self.active
else:
raise ValueError("{} type not supported".format(type(delays)))
| [
"[email protected]"
] | |
14d4fc889928f8aa49f5e23e4588fb20c4de6a12 | a70f382e2719c6a6755f5b7f9b8beed59bc07be7 | /models/bert_deletion.py | c310f5783af7077e9d6c31b1e112976f66abccd8 | [] | no_license | horsedongmin/UMS-ResSel | 9a5b109223743125187a52d177e8ce4a1254a65d | 635e37f5340faf5a37f3b1510a9402be18348c66 | refs/heads/master | 2022-12-12T17:30:20.610294 | 2020-09-15T01:02:00 | 2020-09-15T01:02:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,409 | py | import torch
import torch.nn as nn
import pickle
class BertDeletion(nn.Module):
def __init__(self, hparams, pretrained_model):
super(BertDeletion, self).__init__()
self.hparams = hparams
self._model = pretrained_model
self._classification = nn.Sequential(
nn.Dropout(p=1 - self.hparams.dropout_keep_prob),
nn.Linear(self.hparams.bert_hidden_dim, 1)
)
if self.hparams.auxiliary_loss_type == "softmax":
self._criterion = nn.CrossEntropyLoss()
elif self.hparams.auxiliary_loss_type == "sigmoid":
self._criterion = nn.BCEWithLogitsLoss()
else:
raise NotImplementedError
def forward(self, batch, batch_ressel_label):
outputs = self._model(
batch["anno_sent"],
token_type_ids=batch["segment_ids"],
attention_mask=batch["attention_mask"]
)
bert_outputs = outputs[0]
if self.hparams.pca_visualization:
pca_handle = open("/data/taesunwhang/response_selection/visualization/%s/del_token_representation.pkl"
% self.hparams.task_name, "ab")
print(pca_handle)
del_losses = []
for batch_idx, del_pos in enumerate(batch["del_pos"]):
if batch["label"][batch_idx] == -1:
continue
if batch_ressel_label[batch_idx] == 0:
continue
del_pos_nonzero = del_pos.nonzero().view(-1)
dialog_del_out = bert_outputs[batch_idx, del_pos_nonzero, :] # num_utterances, 768
del_logits = self._classification(dialog_del_out) # num_utterances, 1
del_logits = del_logits.squeeze(-1) # num_utterances
target_id = batch["label"][batch_idx]
if self.hparams.pca_visualization:
pickle.dump([dialog_del_out.to("cpu").tolist(), target_id.to("cpu").tolist()], pca_handle)
if self.hparams.auxiliary_loss_type == "softmax":
del_loss = self._criterion(del_logits.unsqueeze(0), target_id.unsqueeze(0))
elif self.hparams.auxiliary_loss_type == "sigmoid":
del_label = torch.eye(del_pos_nonzero.size(0))[target_id].to(torch.cuda.current_device())
del_loss = self._criterion(del_logits, del_label)
else:
raise NotImplementedError
del_losses.append(del_loss)
if len(del_losses) == 0:
deletion_loss = torch.tensor(0).float().to(torch.cuda.current_device())
else:
deletion_loss = torch.mean(torch.stack(del_losses, dim=0), dim=-1)
return deletion_loss | [
"[email protected]"
] | |
17da5ea3d2bb9e4127e42dc12e6b4dabb388ec23 | 45a49476f7ddadf411ad8b71fa43299ddbbd87ec | /day5-morning/01-lm.py | 4e759c1a33106d938b146cb2a7a6642605895166 | [] | no_license | NEVEC2149/qbb2019-answers | 5bf8fec10437f4f398fb3137409dccd80f7a659f | e52126efb90b5b3f400d3aa1856d2aa24d99e27e | refs/heads/master | 2020-07-11T08:28:48.554356 | 2020-05-10T20:54:56 | 2020-05-10T20:54:56 | 204,489,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | #!/usr/bin/env python3
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
import scipy
#FBtr0302347
df = pd.read_csv( sys.argv[1], index_col = "t_name" )
col_names = df.columns.values.tolist()
goi = pd.DataFrame( df.loc[sys.argv[2]].iloc[1:] )
goi.columns = ["FPKM"]
goi["FPKM"] = pd.to_numeric(goi["FPKM"])
goi["sex"], goi["stage"] = goi.index.str.split("_", 1).str
print(goi)
model = sm.formula.ols(formula = "FPKM ~ sex", data = goi)
ols_results = model.fit()
print(ols_results.summary()) | [
"[email protected]"
] | |
88649eb358b60e371e1a0612153225a457aab025 | ee8756f9beff74584b4a6df9c2c31b4acab1c3bf | /_GTW/_OMP/_PAP/Person_has_VAT_IDN.py | 785a801b87be85fe1cba2ba10bbff630fec07c80 | [
"BSD-3-Clause"
] | permissive | JPilarr/tapyr | 5d13da8138f677c744ab357fe04e4e67b1e21d55 | 4235fba6dce169fe747cce4d17d88dcf4a3f9f1d | refs/heads/master | 2022-11-13T09:59:01.541995 | 2020-06-29T12:35:55 | 2020-06-30T09:36:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,279 | py | # -*- coding: utf-8 -*-
# Copyright (C) 2016 Mag. Christian Tanzer All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. [email protected]
# #*** <License> ************************************************************#
# This module is part of the package GTW.OMP.PAP.
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# #*** </License> ***********************************************************#
#
#++
# Name
# GTW.OMP.PAP.Person_has_VAT_IDN
#
# Purpose
# Link a person to a VAT identification number
#
# Revision Dates
# 24-Feb-2016 (CT) Creation
# 24-Feb-2016 (CT) Inject attribute `vat_idn` into `Person`
# 27-May-2016 (CT) Add missing import for `PAP.Person`
# ««revision-date»»···
#--
from _MOM.import_MOM import *
from _GTW._OMP._PAP.Attr_Type import *
from _GTW import GTW
from _GTW._OMP._PAP import PAP
from _TFL.I18N import _
from _GTW._OMP._PAP.VAT_IDN import A_VAT_IDN
import _GTW._OMP._PAP.Person
import _GTW._OMP._PAP.Subject_has_VAT_IDN
from _TFL.Decorator import eval_function_body
_Ancestor_Essence = PAP.Subject_has_VAT_IDN
class Person_has_VAT_IDN (_Ancestor_Essence) :
"""Link a person to a VAT identification number"""
class _Attributes (_Ancestor_Essence._Attributes) :
_Ancestor = _Ancestor_Essence._Attributes
### Primary attributes
class left (_Ancestor.left) :
"""Person that has a VAT identification number"""
role_type = PAP.Person
# end class left
class vin (_Ancestor.vin) :
"""VAT identification number of the Person"""
# end class vin
# end class _Attributes
# end class Person_has_VAT_IDN
@eval_function_body
def _inject_vat_idn () :
class vat_idn (A_VAT_IDN) :
"""VAT identification number of Company."""
kind = Attr.Query
query = Q.vat_idn_link.vin
# end class vat_idn
PAP.Person.add_attribute (vat_idn, override = True)
# end def _inject_vat_idn
if __name__ != "__main__" :
GTW.OMP.PAP._Export ("*")
### __END__ GTW.OMP.PAP.Person_has_VAT_IDN
| [
"[email protected]"
] | |
1a91d1d502e4e80141f2117aaeaf409ddefed976 | 8c582c6899780bfb739d7d8ad4f35b41357b8f8f | /env/bin/pip3 | cf5a028cf40c305be14859a3ce7da794f6451fd7 | [] | no_license | upriverbasil/BLAST | 086078363716d106299fefb5a2373caf49534b71 | 4208803478c8821160653a4a38f3f892f9fecba1 | refs/heads/main | 2023-01-07T18:27:37.130484 | 2020-10-28T17:27:34 | 2020-10-28T17:27:34 | 307,136,586 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | #!/home/saad/Desktop/BLAST/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
a9ec19d40ea9ccc343b5b229f1628c72a763202c | 1650587cce73b06dcc35837140c67734866f8c32 | /tests/system/varnishbeat.py | 8ad46df90584f17405a77b0f5a816de46526df21 | [
"BSD-2-Clause"
] | permissive | phenomenes/varnishbeat | 70b0c8ad3ab0c886491d0bf63b760657d1577463 | c881804d317842bdb44a6de0c106725dd4748c9f | refs/heads/master | 2021-01-17T01:00:31.399713 | 2017-01-31T15:41:49 | 2017-01-31T15:41:49 | 57,590,542 | 10 | 3 | null | 2016-12-05T12:10:14 | 2016-05-01T08:22:30 | Go | UTF-8 | Python | false | false | 334 | py | import sys
sys.path.append('../../vendor/github.com/elastic/beats/libbeat/tests/system')
from beat.beat import TestCase
class BaseTest(TestCase):
@classmethod
def setUpClass(self):
self.beat_name = "varnishbeat"
self.build_path = "../../build/system-tests/"
self.beat_path = "../../varnishbeat.test"
| [
"[email protected]"
] | |
392588f75f659c4faaea4d66e33cf5e011382a1a | 7c3b77af326723f353f5626f113111ba42ce45bd | /test_flask/sessions/my_global.py | 4d0838ae94cf7a6a91ee149009328007ed79aebd | [] | no_license | ianhzhang/learning | 143ed501d90ef20ef99a934bae066d8063b4f794 | 42d9c4da6da1b34844ee94379576feb6337d3271 | refs/heads/master | 2023-01-08T02:58:21.209957 | 2021-03-11T01:44:56 | 2021-03-11T01:44:56 | 144,384,263 | 1 | 0 | null | 2023-01-07T13:24:15 | 2018-08-11T12:41:38 | Go | UTF-8 | Python | false | false | 420 | py | from flask import Flask, session, jsonify, request
from flask_cors import CORS
# global variable
app = Flask(__name__)
CORS(app)
cnt = 0
@app.route('/', methods=["GET"])
def index():
global cnt
cnt = cnt +1
return jsonify({"rslt": cnt})
@app.route('/drop', methods=["GET"])
def drop():
session.pop('cnt', None)
return 'Dropped'
if __name__ == '__main__':
app.run(debug=True, port=5000)
| [
"[email protected]"
] | |
7e31ac3da4e303ae64897cf072b6f527f6c5b105 | 7990331178e62ec1a982fb84e7a06c024a30f28f | /Regex and Parsing/Detect Floating Point Number.py | 27d8a0c4619ecfa11090065d39544b180746d4be | [] | no_license | phoenixx1/HackerRank-Python | bc6e17eb87f968d9d3f25d30e6dc8bbbf9fe1088 | f7ae39e5ecc7aa8cf2b53928b567be37af1329c9 | refs/heads/master | 2021-01-06T00:38:37.308567 | 2020-05-22T16:28:53 | 2020-05-22T16:28:53 | 241,179,541 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | #author @Nishant
import re
n = int(input())
for i in range(0,n):
k = input()
print (bool(re.match('^[+-]?\d*?\.{1}\d+$',k))) | [
"[email protected]"
] | |
7fbeea65a915ec3d7fbab247809ecbc40dc0a9b8 | 4d6884ee460208b055fd957500280892a5e8d434 | /openspace/apps/profiles/admin.py | 337208164c23c8413f303d562fdb6f1fd7e22019 | [] | no_license | Turbulence-org/openspace-wilderness | 5ee3c9d2cb84006c8df3feb5e7f60b5e0d2e34cd | ff55b066ef05d75d0333b44e84a5e8977206330b | refs/heads/master | 2020-12-24T10:15:36.729594 | 2014-12-03T05:24:36 | 2014-12-03T05:24:36 | 40,108,884 | 1 | 0 | null | 2015-08-03T06:35:21 | 2015-08-03T06:35:21 | null | UTF-8 | Python | false | false | 1,017 | py | from django.contrib import admin
from apps.profiles.models import *
class CommentInline(admin.TabularInline):
model = Comment
extra = 1
class PostInline(admin.TabularInline):
model = Post
extra = 0
class ProfileAdmin(admin.ModelAdmin):
fieldsets = [
('Personal Information', {'fields': ['fname', 'lname', 'age', 'gender', 'location', 'tags', 'interest', 'position', 'species', 'img_number', 'energy', 'visible']}),
('Blog Info', {'fields': ['blog_id', 'blog_url', 'last_login'], 'classes': ['collapse']}),
('Network', {'fields': ['friends'], 'classes': ['collapse']})
]
inlines = [PostInline]
list_display = ('id', 'species', 'fname', 'lname', 'location', 'visible', 'last_login')
search_fields = ['id', 'fname', 'lname', 'species']
class PostAdmin(admin.ModelAdmin):
inlines = [CommentInline]
list_display = ('post_profile', 'date_published', 'post_content')
admin.site.register(Profile, ProfileAdmin)
admin.site.register(Post, PostAdmin)
| [
"[email protected]"
] | |
725c2d313653b6a9d63ddc9fb74ac922331984cf | 2942c012289af83c956538ff8304874ea1696104 | /Lesson_4/task_1.py | 9fc0327946ee0515189ea96927d5ac32b7a3eca1 | [] | no_license | AndreyKrivochenko/Python_basic | a6e152acf3f737c96b9f505530adcc9bed48ecd9 | e119a085eb74e972f63e6310f82c8f758d001f05 | refs/heads/master | 2023-01-20T14:13:44.594405 | 2020-11-19T09:54:02 | 2020-11-19T09:54:02 | 308,070,295 | 0 | 0 | null | 2020-11-19T09:54:03 | 2020-10-28T16:04:00 | Python | UTF-8 | Python | false | false | 228 | py | from sys import argv
def calc_salary(*args):
result = int(args[0]) * int(args[1])
if len(args) > 2:
result += int(args[2])
return result
param = argv[1:]
if len(param) > 1:
print(calc_salary(*param))
| [
"[email protected]"
] | |
e9101a3d6dec48121d70106cd22a02580dab9818 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/329/usersdata/278/91305/submittedfiles/dec2bin.py | 595fc5831851ef370d1fdd817a1809e93e8e5f6c | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | # -*- coding: utf-8 -*-
p = int(input("Digite um inteiro: "))
q = int(input("Digite um inteiro maior que o anterior: "))
i=10
x=0
while (p>0):
p=p//10
i=i*10
while (q>0):
q=q%i
if p==q:
print("S")
break
x+=1
if x>0:
print("N")
| [
"[email protected]"
] | |
f78cf3aa3a31fe7c829aafdb3531253cf24b4678 | 61036367482076a08a40421d3ba7bd6927b7fc51 | /src/neural_lm_eval.py | bca76bc48204f40cfa571417b2e62a19f12fe048 | [] | no_license | antoine-hochart/language_models | 900c176341709779be858e917f3db032e2d20919 | ba0252f9780802ac7854e5fa43e5e1e1329f0cbd | refs/heads/main | 2023-02-07T12:08:47.608048 | 2020-12-30T15:08:42 | 2020-12-30T15:08:42 | 325,579,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,886 | py | import os
import torch
from time import time
from utils.data import get_corpus, preprocess_text, list2str
from utils.eval import perplexity_loss
from utils.nn import encode_text, decode_text, Textset
from models.nn import RNNLM
######################################################################
# parameters
MIN_COUNT = 5
EMBEDDING_DIM = 128
HIDDEN_SIZE = 256
SEQ_LEN = 30
BATCH_SIZE = 2048
######################################################################
# load and preprocess text
print("Loading and preprocessing text...")
t0 = time()
text = get_corpus()
(train_text, val_text, test_text), vocab = preprocess_text(
text, val_size=0.1, test_size=0.1, min_count=MIN_COUNT, seed=0
)
print("Done ({:.2f}s)".format(time() - t0))
print()
######################################################################
# load model
model = RNNLM(len(vocab), EMBEDDING_DIM, HIDDEN_SIZE, SEQ_LEN)
fpath = os.path.join(os.path.dirname(__file__), '..', 'data', 'models', 'rnn.pt')
model.load_state_dict(torch.load(fpath, map_location=torch.device('cpu')))
model.eval()
######################################################################
# evaluate model
testset = Textset(test_text, vocab, SEQ_LEN)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Computing perplexity on {}...".format(device))
perplexity = perplexity_loss(model, testset, BATCH_SIZE, device)
print("Perplexity of model on test set: {:.2f}".format(perplexity))
print()
######################################################################
# sentence generator
print("Sentences generated by model:")
print()
eos = ['.', '?', '!']
eos = encode_text(eos, vocab)
seed = ['ce', 'soir']
seed = encode_text(seed, vocab)
model.to('cpu')
new_text = model.generate_text(seed, eos, n_sent=5)
new_text = decode_text(new_text, vocab)
new_text = list2str(new_text)
print(new_text)
| [
"[email protected]"
] | |
a6b36d3c91288ef7abec4035eff6d36f80262027 | 225a76477f1db049764cd796ebab2c27ae291e30 | /blog/models.py | 58bad25123edd31dc26f08e1586f459978f755b6 | [] | no_license | lubakach/myfirstblog | 3c4e36a9dfbf77b57e4bdfadbba2bcd1067543ae | e111f3fffe0994e1cf2985cf0f9b71fa8b786220 | refs/heads/master | 2022-06-02T06:06:28.346017 | 2020-05-05T13:14:32 | 2020-05-05T13:14:32 | 259,961,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | from django.conf import settings
from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
| [
"[email protected]"
] | |
2c186f4b552116c2a244339463695d82c846d67d | 3233da7c24e82818e7e4e2fe4cc1546467adc4ce | /problem_setup.py | 926315b218ccccb991dc33ddb0c3347828171a44 | [] | no_license | lue/dust-in-the-wind | 981578829d9ce4a5606a4ffd416c5edf007031c6 | 83a76919d08603d021d44abac2102d787cea955d | refs/heads/master | 2020-03-22T05:41:15.995852 | 2018-10-08T18:58:53 | 2018-10-08T18:58:53 | 139,583,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,761 | py | #
# Import NumPy for array handling
#
import numpy as np
#
# Import plotting libraries (start Python with ipython --matplotlib)
#
#from mpl_toolkits.mplot3d import axes3d
#from matplotlib import pyplot as plt
#
# Some natural constants
#
au = 1.49598e13 # Astronomical Unit [cm]
pc = 3.08572e18 # Parsec [cm]
ms = 1.98892e33 # Solar mass [g]
ts = 5.78e3 # Solar temperature [K]
ls = 3.8525e33 # Solar luminosity [erg/s]
rs = 6.96e10 # Solar radius [cm]
#
# Monte Carlo parameters
#
nphot = 1000000
#
# Grid parameters
#
nx = 100
ny = 100
nz = 100
sizex = 10.*rs
sizey = 10.*rs
sizez = 10.*rs
#
# Model parameters
#
radius = 1200*rs
rho0 = 1e-13
#
# Star parameters
#
mstar = ms
rstar = rs*10.
tstar = ts
pstar = np.array([0.,0.,100.*rs])
#
# Make the coordinates
# Write the wavelength_micron.inp file
#
lam1 = 0.1e0
lam2 = 7.0e0
lam3 = 25.e0
lam4 = 1.0e4
n12 = 20
n23 = 100
n34 = 30
lam12 = np.logspace(np.log10(lam1),np.log10(lam2),n12,endpoint=False)
lam23 = np.logspace(np.log10(lam2),np.log10(lam3),n23,endpoint=False)
lam34 = np.logspace(np.log10(lam3),np.log10(lam4),n34,endpoint=True)
lam = np.concatenate([lam12,lam23,lam34])
nlam = lam.size
#
# Write the wavelength file
#
with open('wavelength_micron.inp','w+') as f:
f.write('%d\n'%(nlam))
np.savetxt(f,lam.T,fmt=['%13.6e'])
#
#
# Write the stars.inp file
#
mstar = ms
rstar = rs*10.
tstar = ts
pstar = np.array([0.,0.,100.*rs])
with open('stars.inp','w+') as f:
f.write('2\n')
f.write('1 %d\n\n'%(nlam))
f.write('%13.6e %13.6e %13.6e %13.6e %13.6e\n\n'%(rstar,mstar,pstar[0],pstar[1],pstar[2]))
np.savetxt(f,lam.T,fmt=['%13.6e'])
f.write('\n%13.6e\n'%(-tstar))
#
# Write the grid file
#
# with open('amr_grid.inp','w+') as f:
# f.write('1\n') # iformat
# f.write('0\n') # AMR grid style (0=regular grid, no AMR)
# f.write('0\n') # Coordinate system
# f.write('0\n') # gridinfo
# f.write('1 1 1\n') # Include x,y,z coordinate
# f.write('%d %d %d\n'%(nx,ny,nz)) # Size of grid
# np.savetxt(f,xi.T,fmt=['%13.6e']) # X coordinates (cell walls)
# np.savetxt(f,yi.T,fmt=['%13.6e']) # Y coordinates (cell walls)
# np.savetxt(f,zi.T,fmt=['%13.6e']) # Z coordinates (cell walls)
# #
# # Write the density file
# #
# with open('dust_density.inp','w+') as f:
# f.write('1\n') # Format number
# f.write('%d\n'%(nx*ny*nz)) # Nr of cells
# f.write('1\n') # Nr of dust species
# data = rhod.ravel(order='F') # Create a 1-D view, fortran-style indexing
# np.savetxt(f,data.T,fmt=['%13.6e']) # The data
#
# Dust opacity control file
#
with open('dustopac.inp','w+') as f:
f.write('2 Format number of this file\n')
f.write('1 Nr of dust species\n')
f.write('============================================================================\n')
f.write('1 Way in which this dust species is read\n')
f.write('0 0=Thermal grain\n')
f.write('silicate Extension of name of dustkappa_***.inp file\n')
f.write('----------------------------------------------------------------------------\n')
#
# Write the radmc3d.inp control file
#
with open('radmc3d.inp','w') as f:
f.write('nphot = %d\n'%(nphot))
f.write('scattering_mode_max = 1000\n')
f.write('iranfreqmode = 1\n')
f.write('istar_sphere = 0')
| [
"[email protected]"
] | |
7a4341fef7c0ae285d755ed3115bdb7c9082c53e | 48d7e39f7f683b1ae895ff7a04668cdd1e2e98b3 | /scripts/test_crop3.py | 342bd1ac3a0c99e31e4f0f27052550c9f81da35b | [] | no_license | CesarAsturias/Computer_Vision | 371d78feaad76fa3ff0eae31b67f27c4d421eeca | 697c3b06b91f774d2c97c25cc0fc4198c49b27cc | refs/heads/master | 2020-04-12T08:41:26.265850 | 2016-08-01T19:59:51 | 2016-08-01T19:59:51 | 42,715,968 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,275 | py | from CVImage import CVImage
from Matcher import Matcher
import numpy as np
from VisualOdometry import VisualOdometry
import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import axes3d
def correlate_roi(match, img, size, start):
# This function correlates two images using Matcher and CVImage
# @param match: Matcher object
# @param img: CVImage object
# @param size: size of the region of interest
# @param start: coordinates of the origin
# @return match.good_kp1: keypoints founded in the current roi
# @return match.good_kp2: keypoints founded in the previous roi
roi = img.crop_image(start, size, img.new_image)
roi_prev = img.crop_image(start, size, img.prev_image)
match.match(roi, roi_prev)
print type(match.curr_kp[0])
print len(match.curr_kp)
print len(match.prev_kp)
# Translate keypoints to their actual position
match.sum_coord(start[0], start[1])
# Store the keypoints and the matches
match.append_global()
def get_number_keypoints(match):
# returns the total keypoints encountered
return len(match.global_kpts1)
def plot_same_figure(match, img):
# Plot the keypoints in each image on the same figure
global_kpts1 = np.reshape(match.global_kpts1, (len(match.global_kpts1), 2))
global_kpts2 = np.reshape(match.global_kpts2, (len(match.global_kpts2), 2))
x1 = global_kpts1[:, 0]
y1 = global_kpts1[:, 1]
x2 = global_kpts2[:, 0]
y2 = global_kpts2[:, 1]
fig = plt.figure(figsize=(20, 20))
a = fig.add_subplot(1, 2, 1)
imgplot = plt.imshow(img.new_image, cmap='gray', interpolation='bicubic')
plt.plot(x1, y1, 'r*')
a.set_title('Current Image')
a = fig.add_subplot(1, 2, 2)
imgplot = plt.imshow(img.prev_image, cmap='gray', interpolation='bicubic')
plt.plot(x2, y2, 'g*')
a.set_title('Previous Image')
plt.show()
def plot_together(match, img):
# Store the result and plot
plt.imshow(img.new_image, cmap='gray', interpolation='bicubic')
global_kpts1 = np.reshape(match.global_kpts1, (len(match.global_kpts1), 2))
global_kpts2 = np.reshape(match.global_kpts2, (len(match.global_kpts2), 2))
x1 = global_kpts1[:, 0]
y1 = global_kpts1[:, 1]
plt.plot(x1, y1, 'r*')
x2 = global_kpts2[:, 0]
y2 = global_kpts2[:, 1]
plt.plot(x2, y2, 'k*')
plt.show()
def plot_together_np(array1, array2, img):
# Store the result and plot
plt.imshow(img.new_image, cmap='gray', interpolation='bicubic')
global_kpts1 = np.reshape(array1, (len(array1), 2))
global_kpts2 = np.reshape(array2, (len(array2), 2))
x1 = global_kpts1[:, 0]
y1 = global_kpts1[:, 1]
plt.plot(x1, y1, 'r*')
x2 = global_kpts2[:, 0]
y2 = global_kpts2[:, 1]
plt.plot(x2, y2, 'k*')
plt.show()
def plot_one(match, img):
# Store the result and plot
plt.imshow(img.new_image, cmap='gray', interpolation='bicubic')
global_kpts1 = np.reshape(match.global_kpts1, (len(match.global_kpts1), 2))
x1 = global_kpts1[:, 0]
y1 = global_kpts1[:, 1]
plt.plot(x1, y1, 'r*')
plt.show()
def plot_one_np(array1, img):
# Store the result and plot
plt.imshow(img.new_image, cmap='gray', interpolation='bicubic')
array1 = np.reshape(array1, (len(array1), 2))
x1 = array1[:, 0]
y1 = array1[:, 1]
plt.plot(x1, y1, 'r*')
plt.show()
def plot_save(match, img):
# Store the result and plot
px = img.new_image.shape[0]
py = img.new_image.shape[1]
dpi = 140
size = (py / np.float(dpi), px / np.float(dpi))
fig = plt.figure(figsize=size, dpi=dpi)
plt.imshow(img.new_image, cmap='gray', interpolation='bicubic')
global_kpts1 = np.reshape(match.global_kpts1, (len(match.global_kpts1), 2))
x1 = global_kpts1[:, 0]
y1 = global_kpts1[:, 1]
plt.plot(x1, y1, 'r*')
plt.show()
# plt.savefig('test.png', dpi=100)
def get_structure(match, img, vo):
# Get structure of the scene
# @param match: Matcher object
# @param img: CVImage object
# @param vo: VisualOdometry object
vo.P_from_F(vo.F)
vo.create_P1()
# Triangulate points
# global_kpts1 -> keypoints in the second scene, but we are inverting the
# scne in order to obtain the movement of the camera, which is equal to the
# movement of the points inverted. So, the camera matrix of the
# global_kpts1 keypoints is the camera of the first frame
scene = vo.opt_triangulation(match.global_kpts1, match.global_kpts2,
vo.cam1.P, vo.cam2.P)
return scene
def run():
match = Matcher()
img = CVImage('/home/cesar/Documentos/Computer_Vision/01/image_0')
img.read_image()
img.copy_image()
img.acquire()
h = img.new_image.shape[0]
w = img.new_image.shape[1]
n = 2 # Number of roi's
size = np.array([[w / n], [h / n]], np.int32)
start = np.array([[0], [0]], np.int32)
# First roi
correlate_roi(match, img, size, start)
# Second roi
start = np.array([[w / n], [0]])
correlate_roi(match, img, size, start)
# Third roi
start = np.array([[0], [h / n]])
correlate_roi(match, img, size, start)
# Last roi
start = np.array([[w / n], [h / n]])
correlate_roi(match, img, size, start)
# We have stored two times every original keypoint (curr_kp, prev_kp)
match.curr_kp = match.curr_kp[::2]
match.prev_kp = match.prev_kp[::2]
# The same applies for the descriptors
match.curr_dsc = match.curr_dsc[::2]
match.prev_dsc = match.prev_dsc[::2]
print match.curr_kp[0].pt
print match.global_kpts1[0]
# Print the total number of keypoints encountered
print("Total number of keypoints encountered: \
{}".format(get_number_keypoints(match)))
# Test the plot_same_figure function
# plot_same_figure(match, img)
# plot_one(match, img)
# plot_save(match, img)
# Get Fundamental Matrix
vo = VisualOdometry()
print "Type of match.global_kpts1: ", type(match.global_kpts1)
match.global_kpts1, match.global_kpts2 = \
vo.EstimateF_multiprocessing(match.global_kpts2, match.global_kpts1)
# plot_one(match, img)
# plot_one_np(vo.outlier_points_new, img)
# plot_together_np(match.global_kpts1, vo.outlier_points_new, img)
print("Total number of keypoints encountered: \
{}".format(get_number_keypoints(match)))
# Triangulate. To get the actual movement of the camera we are "swapping"
# the scene. The first camera is cam1.P, the first keypoints are
# global_kpts1. On the other hand, the second camera is cam2.P and the
# second keypoints are global_kpts2
scene = get_structure(match, img, vo)
print "ESCENA", scene[:, :20]
print "PROYECCION EN SEGUNDA", vo.cam1.project(scene[:, :20])
print "SEGUNDA", match.global_kpts1[:20]
print "CORREGIDOS SEGUNDA", vo.correctedkpts1[:, :20]
print "PROYECCION EN PRIMERA", vo.cam2.project(scene[:, :20])
print "PRIMERA", match.global_kpts2[:20]
print "CORREGIDOS EN PRIMERA", vo.correctedkpts2[:, :20]
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(scene[0], scene[1], scene[2], 'ko')
plt.axis('equal')
plt.show()
# Test utils
if __name__ == '__main__':
run()
| [
"[email protected]"
] | |
76ca19495012c1c92570c4b7765287f60587744f | ffe73c48a765dd8c36bd4f34288833be8bca31c6 | /game.py | 286bac0957c3e7b36bbcff97ef17959799d7c16e | [] | no_license | abdulhannan456/MY-FIRST-GAME | 0f3145539f29ebb1d111e5a126ce40c09fcb9d96 | acca4d63fe4fe24b263e695ceaed0050e510a21b | refs/heads/main | 2023-03-29T10:48:47.344985 | 2021-04-03T09:18:21 | 2021-04-03T09:18:21 | 354,246,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,361 | py | import random
import pygame
from pygame import mixer
# initializing
pygame.init()
pygame.display.set_caption("Shoot em' Up")
screen = pygame.display.set_mode((1024, 600))
pygame.display.set_icon(pygame.image.load("icon.png"))
backgroundimg = pygame.image.load("space-2.png")
titleimg = pygame.image.load("title.png")
pressspace = pygame.image.load("pressspace.png")
spaceship = pygame.image.load("science-fiction.png")
playerimg = pygame.image.load("science-fiction (2).png")
bulletimg = pygame.image.load("bullet.png")
shot2img = pygame.image.load("bullet2.png")
enemyimg1 = pygame.image.load("sp1.png")
enemyimg2 = pygame.image.load("sp2.png")
expimg = pygame.image.load("explosion.png")
font = pygame.font.Font("orange juice 2.0.ttf", 40)
destroy = pygame.mixer.Sound("destroy.wav")
laser = pygame.mixer.Sound("laser.wav")
img1y, img2y = -1024, 0
titley = 600
run = True
play = False
gameover = False
score, hscore = 0, 0
k = 0
pygame.mixer.music.load("music2.wav")
pygame.mixer.music.play(-1)
while run:
if gameover is True:
text = font.render("Your Score: " + str(score), True, (255, 255, 255))
screen.blit(text, (400, 300))
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
gameover = False
k = 0
elif play is False:
player_x, player_y = 475, 500
change_x = 0
flist, elist, shot2lst, explist = [], [], [], []
screen.blit(backgroundimg, (0, img2y))
screen.blit(backgroundimg, (0, img1y))
screen.blit(titleimg, (250, titley))
if titley >= 220:
titley -= 3
else:
screen.blit(spaceship, (800, 220))
if k % 64 in range(32):
screen.blit(pressspace, (300, 350))
if score > hscore:
hscore = score
if hscore != 0:
text = font.render("High score: "+ str(hscore), True, (0, 255, 0))
screen.blit(text, (200, 100))
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
play = True
pygame.mixer.music.load("music.wav")
pygame.mixer.music.play(-1)
k = 0
score = 0
if event.type == pygame.QUIT:
run = False
elif play is True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
change_x = 8
if event.key == pygame.K_LEFT:
change_x = -8
if event.key == pygame.K_SPACE:
laser.play()
flist.append([player_x + 27, player_y - 20])
if event.type == pygame.KEYUP:
change_x = 0
screen.blit(backgroundimg, (0, img1y))
screen.blit(backgroundimg, (0, img2y))
img1y = img1y + 1
img2y = img2y + 1
player_x += change_x
if player_x <= 0:
player_x = 0
if player_x >= 959:
player_x = 959
if img2y == 1024:
img2y = -1024
if img1y == 1024:
img1y = -1024
screen.blit(playerimg, (player_x, player_y))
if len(elist) < 3:
y = random.choice([enemyimg1, enemyimg2])
if y is enemyimg1:
elist.append([y, -64, random.choice([64, 160, 256, 352])])
elif y is enemyimg2:
elist.append([y, 1088, random.choice([64, 160, 256, 352])])
for i in range(len(elist)):
screen.blit(elist[i][0], (elist[i][1], elist[i][2]))
if elist[i][0] == enemyimg1:
elist[i][1] += score // 50 + 3
if elist[i][1] >= 1024:
del elist[i]
break
elif elist[i][0] == enemyimg2:
elist[i][1] -= score // 50 + 3
if elist[i][1] <= -64:
del elist[i]
break
if k == 60:
for i in range(len(elist)):
s2x = elist[i][1] + 27
s2y = elist[i][2] + 50
shot2lst.append([s2x, s2y])
k = 0
for i in range(len(shot2lst)):
screen.blit(shot2img, (shot2lst[i][0], shot2lst[i][1]))
shot2lst[i][1] += 4
for i in range(len(flist)):
if flist[i][1] <= 0:
del flist[i]
break
else:
screen.blit(bulletimg, (flist[i][0], flist[i][1]))
flist[i][1] -= 20
for i in range(len(elist)):
for j in range(len(flist)):
if (flist[j][0] - elist[i][1] - 32) ** 2 + (flist[j][1] - elist[i][2] - 32) ** 2 <= 2025:
del flist[j]
explist.append([elist[i][1], elist[i][2], 0])
elist[i] = random.choice([[enemyimg1, -64, random.choice([64, 160, 256, 352])],
[enemyimg2, 1088, random.choice([64, 160, 256, 352])]])
score += 1
destroy.play()
break
for i in range(len(explist)):
if explist[i][2] <= 6:
screen.blit(expimg, (explist[i][0], explist[i][1]))
explist[i][2] += 1
else:
del explist[i]
break
for i in shot2lst:
if (i[0] - player_x - 32) ** 2 + (i[1] - player_y - 32) ** 2 <= 2025:
play = False
screen.blit(expimg, (player_x, player_y))
destroy.play()
destroy.play()
pygame.mixer.music.load("music2.wav")
pygame.mixer.music.play(-1)
gameover = True
text = font.render("Score: " + str(score), True, (255, 255, 255))
screen.blit(text, (0, 0))
k += 1
pygame.display.update() | [
"[email protected]"
] | |
47698845ec9c99153f1e3f0033757c42a8453169 | 047002844168598dfefe1800a78a1e0ea56ea207 | /SwitchTracer/cores/domains/resoluters.py | fcdbbc4f375f81774797025c1afd076293c640e7 | [
"MIT"
] | permissive | IzayoiRin/VirtualVeyonST | 9d65808fb6299cecf046ceed78227c7fa5b0ebe5 | d0c4035dba81d02135ad54f4c5a5d463e95f7925 | refs/heads/master | 2023-02-04T05:38:36.270463 | 2020-12-26T09:23:24 | 2020-12-26T09:23:24 | 324,520,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,492 | py | """ There are code for Resoluter"""
import time
from multiprocessing import Process
from celery.result import AsyncResult
import SwitchTracer as st
from SwitchTracer.cores.compents.recorders import Recorder
from SwitchTracer.cores.compents.registers import Register
from SwitchTracer.universal.exceptions import SettingErrors, ResoluterErrors, KernelWaresSettingsErrors
_helper_pool = []
class ResoluterBase(object):
default_global_registers_map = None
default_global_records_list = None
__dynamic_pool__ = None
__environ__ = None
__max_pool__ = None
def __init__(self, env):
if self.default_global_registers_map is None or self.default_global_records_list is None:
raise KernelWaresSettingsErrors("No VOLUMES has been Linked!")
setattr(self, "__REGISTERS", self.default_global_registers_map)
setattr(self, "__RECORDS", self.default_global_records_list)
settings = st.environ(env or self.__environ__).settings
self.__dynamic_pool__ = self.__dynamic_pool__ or settings.get("DEFAULT_DYNAMIC_POOL_INFO")
if not isinstance(self.__dynamic_pool__, dict):
raise SettingErrors("Can Not found dynamic pool info of multiprocessing in settings.RESOLUTER")
self.max_pool = self.__max_pool__ or settings.get("DEFAULT_MAX_POOL")
def _get_records_list(self):
return getattr(self, "__RECORDS")
def _get_registers_map(self):
return getattr(self, "__REGISTERS")
@property
def id(self):
return hex(id(self))
def __str__(self):
return "@{sender}:<{greg}->{grec}>".format(
sender=self.__class__.__name__,
greg=self._get_registers_map().id(),
grec=self._get_records_list().id()
)
class GenericResoluter(ResoluterBase):
recorder_class = None
register_class = None
async_helper_prefix = "helper"
def __init__(self, env=None):
self.kwargs = dict()
super(GenericResoluter, self).__init__(env=env)
def get_register_class(self):
return self.register_class.link2gvol(self._get_registers_map())
def get_register(self, tskey):
return self.get_register_class()(key=tskey)
def get_records_class(self):
return self.recorder_class
def get_records(self, **kwargs):
return self.get_records_class()(**kwargs)
def records(self, underflow, timeout, blocked):
recorder = self.get_records(
overflow=0, underflow=underflow, timeout=timeout
)
if not blocked:
recorder.blocking_off()
return recorder
def _listen(self, pname, grec):
recorder = self.dynamic_recorder(pname, timeout=0)
recorder.link_with_records(grec)
params = recorder.params()
if not (params["blocked"] or (params["min"] * 2) < len(recorder)):
return
# print(len(recorder), recorder, params)
while params["blocked"] or params["min"] < len(recorder):
time.sleep(0.1)
dequeue = recorder.pop(0, params["blocked"])
# blocked mod: pop will not underflow cause timeout=0 repr process No Releasing.
# Non-blocked mod: pop will underflow and return None value repr the process Terminated.
if not isinstance(dequeue, AsyncResult):
if pname == "main":
continue
else:
return
if not self.polling(dequeue):
recorder.push(dequeue, False)
# print(recorder)
def dynamic_recorder(self, pname, timeout=1):
starting = self.kwargs.get(pname) or self.__dynamic_pool__.get(pname)
is_short_circuit = (isinstance(starting, int) is False)
underflow = 1 if is_short_circuit else starting // 2
return self.records(underflow=underflow, blocked=is_short_circuit, timeout=timeout)
def polling(self, dequeue):
if dequeue.ready():
print(dequeue.get())
# self.get_register().delay(dequeue.get())
return 1
# if dequeue % 2 == 0:
# return 1
return 0
def async_helper(self, monitors, timeout=0.1):
global _helper_pool
for idx, hd in enumerate(_helper_pool):
if hd is None:
# _helper_pool[idx] = Process(
# target=self._listen,
# args=("helper%d" % idx, self._get_records_list())
# )
if len(self._get_records_list()) > monitors[idx]:
_helper_pool[idx] = Process(
target=self._listen,
args=("helper%d" % idx, self._get_records_list())
)
# TODO: INFO MSG HELPER START
print("helper%d started" % idx)
_helper_pool[idx].start()
elif hd.is_alive() is False:
# TODO: INFO MSG HELPER END
print("helper%d ended" % idx)
_helper_pool[idx] = None
if timeout > 0 and all(_helper_pool):
time.sleep(timeout)
def async_listen(self, gdict, **kwargs):
self.kwargs = kwargs
hd0 = Process(
target=self._listen,
args=("main", self._get_records_list())
)
hd0.start()
gdict["pid_main_monitor"] = hd0.pid
max_helper_pool = (kwargs.get("max_pool", None) or self.max_pool) - 1
kwset = {i for i in self.kwargs if i.startswith(self.async_helper_prefix)}
cfset = set(self.__dynamic_pool__.keys())
available_helper = kwset.union(cfset)
if max_helper_pool < 1:
# TODO: WARNING
print("Warning: No helper for listening!")
elif max_helper_pool > len(available_helper):
raise ResoluterErrors(
"Numbers of helpers exceed! only %d helpers can be found in settings" % len(available_helper)
)
else:
global _helper_pool
_helper_pool = [None for _ in range(max_helper_pool)]
helper_settings = {
int(k[len(self.async_helper_prefix):]):
self.kwargs.get(k, None) or self.__dynamic_pool__.get(k) for k in available_helper
}
while True:
self.async_helper(monitors=helper_settings, timeout=0.1)
class UniResoluter(GenericResoluter):
recorder_class = Recorder
register_class = Register
| [
"[email protected]"
] | |
33deef250b7120b3804dca5d4413e22e3a73e529 | e5d4345bf084bd8ffc0797d14bbca6032202c9e5 | /iautomate/iautomate.py | 85945ee46241beaf63ab5bb2fb545a2e77e7cc7b | [
"MIT"
] | permissive | iranianpep/iautomate | 0a318402de6801ce35d541ddf7a2392766e08eca | c187aedaf8f71c63e06a2112fbcf65f60217c5bc | refs/heads/master | 2020-03-26T15:24:25.605974 | 2018-08-19T16:12:23 | 2018-08-19T16:12:23 | 145,041,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,033 | py | import json
import os
from collections import OrderedDict
from . import global_variables
from .resources import abstract_resource
from .resources import execution_resource
from .resources import file_resource
from .resources import package_resource
from .resources import service_resource
from .resources import directory_resource
class IAutomate(object):
VARS_KEY = 'vars'
TASKS_KEY = 'tasks'
def __init__(self, config_file):
self.config_file = config_file
self.config = self.__parse_config_file()
self.global_variables = self.config.get(self.VARS_KEY, None)
@property
def config_file(self):
return self.__config_file
@config_file.setter
def config_file(self, config_file):
# check if the config file exists
if os.path.isfile(config_file) is True:
self.__config_file = config_file
else:
raise OSError('Config file does not exist: ' + config_file)
@property
def config(self):
return self.__config
@config.setter
def config(self, config):
# check if the config file is not empty
if config:
self.__config = config
else:
raise OSError('Config cannot be empty')
@property
def global_variables(self):
return self.__global_variables
@global_variables.setter
def global_variables(self, variables):
# check if the config file is not empty
self.__global_variables = global_variables.GlobalVariables(variables)
# parse the config file which is in json
def __parse_config_file(self):
return json.load(open(self.config_file), object_pairs_hook=OrderedDict)
# handle execution resource
def __handle_execs(self, execs):
for execution in execs:
self.__handle_exec(execution)
# handle execution resource
def __handle_exec(self, execution_properties):
# instantiate execution model and run it
execution = execution_resource.ExecutionResource(execution_properties, self.global_variables)
execution.run()
if execution_properties.get(abstract_resource.AbstractResource.AFTER_TASKS_KEY, None):
self.__handle_tasks(execution_properties.get(abstract_resource.AbstractResource.AFTER_TASKS_KEY))
# handle package resource
def __handle_packages(self, packages):
for package in packages:
self.__handle_package(package)
# handle package resource
def __handle_package(self, package_properties):
# instantiate package model and run it
package = package_resource.PackageResource(package_properties, self.global_variables)
package.run()
if package_properties.get(abstract_resource.AbstractResource.AFTER_TASKS_KEY, None):
self.__handle_tasks(package_properties.get(abstract_resource.AbstractResource.AFTER_TASKS_KEY))
# handle service resource
def __handle_services(self, services):
for service in services:
self.__handle_service(service)
# handle service resource
def __handle_service(self, service_properties):
# instantiate service model and run it
service = service_resource.ServiceResource(service_properties, self.global_variables)
service.run()
if service_properties.get(abstract_resource.AbstractResource.AFTER_TASKS_KEY, None):
self.__handle_tasks(service_properties.get(abstract_resource.AbstractResource.AFTER_TASKS_KEY))
# handle file resources
def __handle_files(self, files):
# iterate through the files
for file in files:
self.__handle_file(file)
# handle file resource
def __handle_file(self, file_properties):
# instantiate file model and run it
file_resource_obj = file_resource.FileResource(file_properties, self.global_variables)
file_resource_obj.run()
if file_properties.get(abstract_resource.AbstractResource.AFTER_TASKS_KEY, None):
self.__handle_tasks(file_properties.get(abstract_resource.AbstractResource.AFTER_TASKS_KEY))
# handle directory resources
def __handle_directories(self, directories):
# iterate through the directories
for directory in directories:
self.__handle_directory(directory)
def __handle_directory(self, directory_properties):
# instantiate directory model and run it
directory = directory_resource.DirectoryResource(directory_properties, self.global_variables)
directory.run()
if directory_properties.get(abstract_resource.AbstractResource.AFTER_TASKS_KEY, None):
self.__handle_tasks(directory_properties.get(abstract_resource.AbstractResource.AFTER_TASKS_KEY))
def __handle_tasks(self, tasks):
# iterate through the tasks
for task in tasks:
self.__handle_task(task)
def __handle_task(self, task):
# for each task handle the sub items based on the type
for config_type, properties in task.items():
if config_type == 'execs':
print('| Handling execs ...')
self.__handle_execs(properties)
elif config_type == 'packages':
print('| Handling packages ...')
self.__handle_packages(properties)
elif config_type == 'services':
print('| Handling services ...')
self.__handle_services(properties)
elif config_type == 'files':
print('| Handling files ...')
self.__handle_files(properties)
elif config_type == 'directories':
print('| Handling directories ...')
self.__handle_directories(properties)
else:
# unsupported resource
print('Unsupported resource: ' + config_type)
# run the tasks in the config file
def run(self):
print('Processing the config file ...')
self.__handle_tasks(self.config[self.TASKS_KEY])
| [
"[email protected]"
] | |
5eadbb4fb195658e0d5d2b2b7ed5c86f9a216ecc | 7f2c5c38bd2c1a445a1e67c71dc5a51abb188bfb | /kbase-extension/jupyter_config.py | 621c5ee560a55d66b4c69a93b2035f57981a2433 | [
"MIT"
] | permissive | kbaseIncubator/narrative-jupyterlab | fc7b17701dc7af589c3500f00cf8f50447a0fe4e | 94a4b4a6bbb583f65ce50c8f8343083aceafff05 | refs/heads/master | 2023-01-07T02:22:57.379427 | 2019-06-28T21:46:49 | 2019-06-28T21:46:49 | 175,266,215 | 2 | 3 | MIT | 2023-01-04T01:11:18 | 2019-03-12T17:40:36 | Python | UTF-8 | Python | false | false | 20,464 | py | # Configuration file for ipython.
c = get_config()
#------------------------------------------------------------------------------
# InteractiveShellApp configuration
#------------------------------------------------------------------------------
# A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
# Execute the given command string.
# c.InteractiveShellApp.code_to_run = ''
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.InteractiveShellApp.pylab = None
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.InteractiveShellApp.exec_PYTHONSTARTUP = True
# lines of code to run at IPython startup.
# c.InteractiveShellApp.exec_lines = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.InteractiveShellApp.gui = None
# Reraise exceptions encountered loading IPython extensions?
# c.InteractiveShellApp.reraise_ipython_extension_failures = False
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.InteractiveShellApp.matplotlib = None
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.InteractiveShellApp.pylab_import_all = True
# A list of dotted module names of IPython extensions to load.
# c.InteractiveShellApp.extensions = []
# Run the module as a script.
# c.InteractiveShellApp.module_to_run = ''
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.InteractiveShellApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.InteractiveShellApp.extra_extension = ''
# List of files to run at IPython startup.
# c.InteractiveShellApp.exec_files = []
# A file to be run
# c.InteractiveShellApp.file_to_run = ''
#------------------------------------------------------------------------------
# TerminalIPythonApp configuration
#------------------------------------------------------------------------------
# TerminalIPythonApp will inherit config from: BaseIPythonApplication,
# Application, InteractiveShellApp
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.TerminalIPythonApp.exec_PYTHONSTARTUP = True
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.TerminalIPythonApp.pylab = None
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.TerminalIPythonApp.verbose_crash = False
# Run the module as a script.
# c.TerminalIPythonApp.module_to_run = ''
# The date format used by logging formatters for %(asctime)s
# c.TerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# Whether to overwrite existing config files when copying
# c.TerminalIPythonApp.overwrite = False
# Execute the given command string.
# c.TerminalIPythonApp.code_to_run = ''
# Set the log level by value or name.
# c.TerminalIPythonApp.log_level = 30
# lines of code to run at IPython startup.
# c.TerminalIPythonApp.exec_lines = []
# Suppress warning messages about legacy config files
# c.TerminalIPythonApp.ignore_old_config = False
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.TerminalIPythonApp.extra_config_file = u''
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.TerminalIPythonApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.TerminalIPythonApp.extra_extension = ''
# A file to be run
# c.TerminalIPythonApp.file_to_run = ''
# The IPython profile to use.
# c.TerminalIPythonApp.profile = u'default'
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.TerminalIPythonApp.matplotlib = None
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.TerminalIPythonApp.force_interact = False
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.TerminalIPythonApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.TerminalIPythonApp.ipython_dir = u''
# Whether to display a banner upon starting IPython.
# c.TerminalIPythonApp.display_banner = True
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.TerminalIPythonApp.copy_config_files = False
# List of files to run at IPython startup.
# c.TerminalIPythonApp.exec_files = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.TerminalIPythonApp.gui = None
# Reraise exceptions encountered loading IPython extensions?
# c.TerminalIPythonApp.reraise_ipython_extension_failures = False
# A list of dotted module names of IPython extensions to load.
# c.TerminalIPythonApp.extensions = []
# Start IPython quickly by skipping the loading of config files.
# c.TerminalIPythonApp.quick = False
# The Logging format template
# c.TerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
#------------------------------------------------------------------------------
# TerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# TerminalInteractiveShell will inherit config from: InteractiveShell
# auto editing of files with syntax errors.
# c.TerminalInteractiveShell.autoedit_syntax = False
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.TerminalInteractiveShell.color_info = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.TerminalInteractiveShell.ast_transformers = []
#
# c.TerminalInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.TerminalInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.TerminalInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.TerminalInteractiveShell.colors = 'LightBG'
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.TerminalInteractiveShell.display_page = False
# Autoindent IPython code entered interactively.
# c.TerminalInteractiveShell.autoindent = True
#
# c.TerminalInteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.in2_template
# c.TerminalInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.TerminalInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.TerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.TerminalInteractiveShell.autocall = 0
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.TerminalInteractiveShell.screen_length = 0
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.TerminalInteractiveShell.editor = 'vi'
# Deprecated, use PromptManager.justify
# c.TerminalInteractiveShell.prompts_pad_left = True
# The part of the banner to be printed before the profile
# c.TerminalInteractiveShell.banner1 = 'Python 2.7.6 (default, Nov 18 2013, 15:12:51) \nType "copyright", "credits" or "license" for more information.\n\nIPython 3.2.0-dev -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
#
# c.TerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# The part of the banner to be printed after the profile
# c.TerminalInteractiveShell.banner2 = ''
#
# c.TerminalInteractiveShell.separate_out2 = ''
#
# c.TerminalInteractiveShell.wildcards_case_sensitive = True
#
# c.TerminalInteractiveShell.debug = False
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.TerminalInteractiveShell.confirm_exit = True
#
# c.TerminalInteractiveShell.ipython_dir = ''
#
# c.TerminalInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# c.TerminalInteractiveShell.logstart = False
# The name of the logfile to use.
# c.TerminalInteractiveShell.logfile = ''
# The shell program to be used for paging.
# c.TerminalInteractiveShell.pager = 'less'
# Enable magic commands to be called without the leading %.
# c.TerminalInteractiveShell.automagic = True
# Save multi-line entries as one entry in readline history
# c.TerminalInteractiveShell.multiline_history = True
#
# c.TerminalInteractiveShell.readline_use = True
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.TerminalInteractiveShell.deep_reload = False
# Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# c.TerminalInteractiveShell.logappend = ''
#
# c.TerminalInteractiveShell.xmode = 'Context'
#
# c.TerminalInteractiveShell.quiet = False
# Enable auto setting the terminal title.
# c.TerminalInteractiveShell.term_title = False
#
# c.TerminalInteractiveShell.object_info_string_level = 0
# Deprecated, use PromptManager.out_template
# c.TerminalInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.TerminalInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.TerminalInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.TerminalInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# PromptManager configuration
#------------------------------------------------------------------------------
# This is the primary interface for producing IPython's prompts.
# Output prompt. '\#' will be transformed to the prompt number
# c.PromptManager.out_template = 'Out[\\#]: '
# Continuation prompt.
# c.PromptManager.in2_template = ' .\\D.: '
# If True (default), each prompt will be right-aligned with the preceding one.
# c.PromptManager.justify = True
# Input prompt. '\#' will be transformed to the prompt number
# c.PromptManager.in_template = 'In [\\#]: '
#
# c.PromptManager.color_scheme = 'Linux'
#------------------------------------------------------------------------------
# HistoryManager configuration
#------------------------------------------------------------------------------
# A class to organize all history-related functionality in one place.
# HistoryManager will inherit config from: HistoryAccessor
# Should the history database include output? (default: no)
# c.HistoryManager.db_log_output = False
# Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
# c.HistoryManager.db_cache_size = 0
# Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
# c.HistoryManager.hist_file = u''
# Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
# c.HistoryManager.connection_options = {}
# enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
# c.HistoryManager.enabled = True
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# PlainTextFormatter configuration
#------------------------------------------------------------------------------
# The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
# PlainTextFormatter will inherit config from: BaseFormatter
#
# c.PlainTextFormatter.type_printers = {}
# Truncate large collections (lists, dicts, tuples, sets) to this size.
#
# Set to 0 to disable truncation.
# c.PlainTextFormatter.max_seq_length = 1000
#
# c.PlainTextFormatter.float_precision = ''
#
# c.PlainTextFormatter.verbose = False
#
# c.PlainTextFormatter.deferred_printers = {}
#
# c.PlainTextFormatter.newline = '\n'
#
# c.PlainTextFormatter.max_width = 79
#
# c.PlainTextFormatter.pprint = True
#
# c.PlainTextFormatter.singleton_printers = {}
#------------------------------------------------------------------------------
# IPCompleter configuration
#------------------------------------------------------------------------------
# Extension of the completer class with IPython-specific features
# IPCompleter will inherit config from: Completer
# Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# c.IPCompleter.omit__names = 2
# Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# c.IPCompleter.merge_completions = True
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
# c.IPCompleter.limit_to__all__ = False
# Activate greedy completion
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# c.IPCompleter.greedy = False
#------------------------------------------------------------------------------
# ScriptMagics configuration
#------------------------------------------------------------------------------
# Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
# Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
# c.ScriptMagics.script_magics = []
# Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
# c.ScriptMagics.script_paths = {}
#------------------------------------------------------------------------------
# StoreMagics configuration
#------------------------------------------------------------------------------
# Lightweight persistence for python variables.
#
# Provides the %store magic.
# If True, any %store-d variables will be automatically restored when IPython
# starts.
# c.StoreMagics.autorestore = False
| [
"[email protected]"
] | |
d7ec10bcc780cba7ce3249a981f31b03edcf0c08 | 6466ea21e16ecb8e1c6ce6d3fcdb60b7b89861c6 | /Code/app/libraries/createFile.py | c5ca8294dc60eef2f739072ae2a5eb283041cc0f | [
"MIT"
] | permissive | liamLatour/DaphnieMaton | 1dfcce208dee460bdd15d3909d2790710bdcd6e4 | 01d149b58437ba100098107f0b9d2f8b72d9557a | refs/heads/master | 2023-07-20T20:57:55.087888 | 2022-07-13T08:06:06 | 2022-07-13T08:06:06 | 154,168,996 | 1 | 0 | MIT | 2023-07-06T21:32:42 | 2018-10-22T15:33:28 | C | UTF-8 | Python | false | false | 4,595 | py | import numpy as np
def generateFile(waypoints, photos, ratio, loop, action):
waypoints = list(np.rint(np.multiply(waypoints, ratio)).tolist())
top = "#include <AccelStepper.h>\n \
#include <"+str(action)+">\n \
#define LED_PIN 13\n \
#define LOOP "+str(loop).replace("F", "f")+"\n \
AccelStepper Xaxis(AccelStepper::DRIVER, 60, 61);\n \
AccelStepper Y1axis(AccelStepper::DRIVER, 54, 55);\n \
AccelStepper Y2axis(AccelStepper::DRIVER, 46, 48);\n \
\n \
const int A = 3;\n \
const int B = 15;\n \
const int C = 18;\n \
const int D = 2;\n \
const int MA = 19;\n \
const int MD = 14;\n \
\n \
const int waypointNb = "+str(len(waypoints))+";\n \
int currentWaypoint = 0;\n \
const int waypoints["+str(len(waypoints))+"][2] = "+str(waypoints).replace("[", "{").replace("]", "}").replace(".0", "")+";\n \
const bool photo[] = "+str(photos).replace("[", "{").replace("]", "}").replace("F", "f").replace("T", "t")+";\n \
\n \
bool hasStarted = false;\n \
int increment = 1;\n\n"
setup = "void setup(){\n \
Xaxis.setEnablePin(38);\n \
Y1axis.setEnablePin(56);\n \
Y2axis.setEnablePin(62);\n \
Xaxis.setPinsInverted(false, false, true);\n \
Y1axis.setPinsInverted(false, false, true);\n \
Y2axis.setPinsInverted(true, false, true);\n \
Xaxis.enableOutputs();\n \
Y1axis.enableOutputs();\n \
Y2axis.enableOutputs();\n \
Xaxis.setMaxSpeed(850);\n \
Y1axis.setMaxSpeed(850);\n \
Y2axis.setMaxSpeed(850);\n \
Xaxis.setAcceleration(800);\n \
Y1axis.setAcceleration(800);\n \
Y2axis.setAcceleration(800);\n \
pinMode(40, INPUT_PULLUP);\n \
}\n\n"
loop = "void loop() {\n \
if(digitalRead(40) == 1){\n \
if(hasStarted){\n \
Xaxis.setSpeed(500);\n \
Y1axis.setSpeed(500);\n \
Y2axis.setSpeed(500);\n \
\n \
Xaxis.moveTo(waypoints[currentWaypoint][0]);\n \
Y1axis.moveTo(waypoints[currentWaypoint][1]);\n \
Y2axis.moveTo(waypoints[currentWaypoint][1]);\n \
\n \
bool xmax = false;\n \
bool ymax = false;\n \
\n \
if( (Xaxis.targetPosition() - Xaxis.currentPosition()>0 && digitalRead(MD)) || (Xaxis.targetPosition() - Xaxis.currentPosition()<0 && digitalRead(MA)) ){\n \
Xaxis.runSpeedToPosition();\n \
}\n \
else{\n \
xmax = true;\n \
}\n \
if( (Y1axis.targetPosition() - Y1axis.currentPosition()>0 && digitalRead(B) && digitalRead(C)) || (Y1axis.targetPosition() - Y1axis.currentPosition()<0 && digitalRead(A) && digitalRead(D)) ){\n \
Y1axis.runSpeedToPosition();\n \
Y2axis.runSpeedToPosition();\n \
}\n \
else{\n \
ymax = true;\n \
}\n \
if((Xaxis.distanceToGo()==0 || xmax) && (Y1axis.distanceToGo()==0 || Y2axis.distanceToGo()==0 || ymax)){\n \
if(photo[currentWaypoint]){\n \
//Gotta take them\n \
action();\n \
}\n \
if(LOOP && (currentWaypoint+increment+1)%(waypointNb+1) == 0){\n \
increment = -increment;\n \
}\n \
currentWaypoint = (currentWaypoint+increment)%waypointNb;\n \
}\n \
}\n \
else{\n \
Xaxis.setSpeed(-500);\n \
Y1axis.setSpeed(-500);\n \
Y2axis.setSpeed(-500);\n \
if(digitalRead(MA)){\n \
Xaxis.runSpeed();\n \
}\n \
if(digitalRead(A) && digitalRead(D)){\n \
Y1axis.runSpeed();\n \
Y2axis.runSpeed();\n \
}\n \
if(!digitalRead(MA) && (!digitalRead(A) || !digitalRead(D))){\n \
hasStarted = true;\n \
Xaxis.setCurrentPosition(0);\n \
Y1axis.setCurrentPosition(0);\n \
Y2axis.setCurrentPosition(0);\n \
Xaxis.setSpeed(500);\n \
Y1axis.setSpeed(500);\n \
Y2axis.setSpeed(500);\n \
}\n \
}\n \
}\n \
}"
return top + setup + loop
| [
"[email protected]"
] | |
d7dfbc252d4cbabc72aa1e5528be6515ff4948f4 | 52ffcef145cda7d343f195964ef33cfec34b2c66 | /butterballs_animated/butterballs_animated.pyde | caa1a4d0d4a0e57811fe6b4c60655f6d3a4ac3d3 | [
"MIT"
] | permissive | cclauss/generative_art | 626960162e468605039f281774a2d48eecf7e86e | fc179177badd8b54b4faefd6f36f3d974ff0ca65 | refs/heads/master | 2020-04-06T18:31:40.298910 | 2018-11-15T01:12:30 | 2018-11-15T01:12:30 | 157,701,393 | 0 | 0 | MIT | 2018-11-15T11:36:52 | 2018-11-15T11:36:52 | null | UTF-8 | Python | false | false | 10,066 | pyde | ##########################################################################
# Aaron Penne
# https://github.com/aaronpenne
##########################################################################
import datetime
import string
import sys
from random import shuffle, seed
import helper
import bug_palette
##########################################################################
# Global variables
##########################################################################
random_seed = 0
# Get time
timestamp = None
# Parameters for draw speed
frame_rate = 30
##########################################################################
# Knobs to turn
##########################################################################
# Canvas size
w = 1000 # width
h = 1000 # height
pal = bug_palette.pal
upper_angles = [random(x-7, x) for x in range(int(random(0, 20)), int(random(60, 80)), int(random(7, 20)))]
lower_angles = [random(x-7, x) for x in range(int(random(0, 20)), int(random(60, 80)), int(random(7, 20)))]
upper_radii_high = [random(w*0.4, w*0.3) for x in upper_angles]
upper_radii_low = [random(w*0.01, w*0.2) for x in upper_angles]
lower_radii_high = [random(w*0.1, w*0.4) for x in lower_angles]
lower_radii_low = [random(w*0.05, w*0.2) for x in lower_angles]
upper_wing = {}
for i in range(0, 10):
upper_wing[i] = [[0, 0, 0, 0, 0]]
for angle in upper_angles:
x = random(0, w*0.01)
y = random(0, h*0.01)
r = random(w*0.25, w*0.4)
a = radians(random(angle-7, angle))
phase = random(0, 128) * PI/64
upper_wing[i].append([x, y, r, a, phase])
# upper_wing[i].append([x, y, r, radians(random(70, 80)), 0])
for i in range(10, 20):
upper_wing[i] = [[0, 0, 0, 0, 0]]
for angle in upper_angles:
x = random(0, w*0.01)
y = random(0, h*0.01)
r = random(w*0.1, w*0.2)
a = radians(random(angle-7, angle))
phase = random(0, 128) * PI/64
upper_wing[i].append([x, y, r, a, phase])
# upper_wing[i].append([x, y, r, radians(random(70, 80)), 0])
lower_wing = {}
for i in range(0, 13):
lower_wing[i] = [[0, 0, 0, 0, 0]]
for angle in lower_angles:
x = random(0, w*0.01)
y = random(0, h*0.01)
r = random(w*0.25, w*0.4)
a = radians(random(angle-7, angle))
phase = random(0, 128) * PI/64
lower_wing[i].append([x, y, r, a, phase])
# lower_wing[i].append([x, y, r, radians(random(70, 80)), 0])
for i in range(13, 26):
lower_wing[i] = [[0, 0, 0, 0, 0]]
for angle in lower_angles:
x = random(0, w*0.01)
y = random(0, h*0.01)
r = random(w*0.1, w*0.2)
a = radians(random(angle-7, angle))
phase = random(0, 128) * PI/64
lower_wing[i].append([x, y, r, a, phase])
# lower_wing[i].append([x, y, r, radians(random(70, 80)), 0])
palette = pal[int(random(0, len(pal)))]
print(palette)
upper_palette = []
for i in upper_wing:
upper_palette.append(palette[int(random(0, len(palette)))])
lower_palette = []
for i in lower_wing:
lower_palette.append(palette[int(random(0, len(palette)))])
body = None
angles = None
radii = None
antennae = None
curve_tightness = []
##########################################################################
# setup()
# function gets run once at start of program
##########################################################################
def setup():
print(frameCount)
# Sets size of canvas in pixels (must be first line)
size(w, h)
# Sets resolution dynamically (affects resolution of saved image)
pixelDensity(displayDensity()) # 1 for low, 2 for high
# Sets color space to Hue Saturation Brightness with max values of HSB
# respectively
colorMode(HSB, 360, 100, 100, 100)
# Set the number of frames per second to display
frameRate(frame_rate)
background(0, 0, 100)
rectMode(CORNER)
global body, angles, radii, antennae, curve_tightness
# Antennae
body = get_16_points(-w*0.015, -h*0.2, w*0.03, h*0.4)
angles, radii = get_angles_radii_antennae(10, w*0.1)
antennae = []
for i in range(int(random(3, 8))):
x = body[0][0]
y = body[0][1]
r = random(height * 0.1, height * 0.3)
a = random(45, 80)
phase = random(0, 128) * PI/64
antennae.append([x, y, r, a, phase])
for a in antennae:
curve_tightness.append(random(-2, 0.8))
# Stops draw() from running in an infinite loop (should be last line)
#noLoop() # Comment to run draw() infinitely (or until 'count' hits limit)
##########################################################################
# draw()
##########################################################################
def draw():
global timestamp
timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
global random_seed
random_seed = int(frameCount*100000/(second()+1))
random_seed = 1143
random_seed = helper.get_seed(random_seed)
helper.set_seed(random_seed)
global body, angles, radii, antennae, curve_tightness
steps = 32
if frameCount >= 2*steps:
exit()
#palette = pal[int(random(0,len(pal)))]
#palette_bg_idx = int(random(0, len(palette)))
background(g.backgroundColor)
translate(width/2, height/2)
curveTightness(curve_tightness[-1])
##########################################################################
# Upper Wings
##########################################################################
#stroke(0, 0, 0, 60)
noStroke()
for i in upper_wing:
layer = []
p = upper_palette[i]
if (i==3) and (i==6):
fill(0, 0, 100, 20)
else:
fill(p[0], p[1], p[2], 20)
for x, y, r, a, phase in upper_wing[i]:
r = r + sin(frameCount*PI/steps+phase) * 20
layer.append(helper.circle_points_list(x, y, r, a))
draw_wings(layer, True)
##########################################################################
# Lower Wings
##########################################################################
for i in lower_wing:
layer = []
p = lower_palette[i]
if (i==3) and (i==6):
fill(0, 0, 100, 20)
else:
fill(p[0], p[1], p[2], 20)
for x, y, r, a, phase in lower_wing[i]:
r = r + sin(frameCount*PI/steps+phase) * 20
layer.append(helper.circle_points_list(x, y, r, a))
draw_wings(layer)
##########################################################################
# Antennae and body
##########################################################################
antennae_points = []
for x, y, r, a, phase in antennae:
r = r + sin(frameCount*PI/steps+phase) * 7
a = a + sin(frameCount*PI/steps+phase) * 11
antennae_points.append(helper.circle_points_list(x, y, r, radians(a)))
# Body
fill(0, 0, 100)
noStroke()
draw_16_points(body)
pushStyle()
pushMatrix()
translate(0, -random(height * 0.32, height * 0.35))
noFill()
strokeWeight(width * 0.001)
stroke(p[0], p[1], 25)
scale(1, -1)
beginShape()
curveVertex(*body[2])
curveVertex(*body[2])
for i, (x, y) in enumerate(antennae_points):
curveTightness(curve_tightness[i])
curveVertex(x, y)
endShape()
scale(-1, 1)
beginShape()
curveVertex(*body[2])
curveVertex(*body[2])
for i, (x, y) in enumerate(antennae_points):
curveTightness(curve_tightness[i])
curveVertex(x, y)
endShape()
popStyle()
popMatrix()
helper.save_frame_timestamp('butterballs', timestamp, random_seed)
# Save memory by closing image, just look at it in the file system
# if (w > 1000) or (h > 1000):
# exit()
##########################################################################
# Functions
##########################################################################
def draw_wings(wing, upper_wing=False):
pushMatrix()
if upper_wing:
scale(1,-1)
draw_curve_filled(wing)
scale(-1,1)
draw_curve_filled(wing)
popMatrix()
def get_16_points(x, y, w, h):
squeeze = random(w*0.2, w*0.3)
points = [0] * 16
points[0] = [x, y]
points[1] = [x + w * 0.25, y]
points[2] = [x + w * 0.5, y-h*0.05]
points[3] = [x + w * 0.75, y]
points[4] = [x + w, y]
points[5] = [x + w, y + h * 0.25]
points[6] = [x + w + squeeze, y + h * 0.5]
points[7] = [x + w, y + h * 0.75]
points[8] = [x + w, y + h]
points[9] = [x + w * 0.75, y + h]
points[10] = [x + w * 0.5, y + h]
points[11] = [x + w * 0.25, y + h]
points[12] = [x, y + h]
points[13] = [x, y + h * 0.75]
points[14] = [x - squeeze, y + h * 0.5]
points[15] = [x, y + h * 0.25]
points.pop(12)
points.pop(8)
points.pop(4)
points.pop(0)
return points
def cvp(x, y):
curveVertex(x, y)
#ellipse(x, y, 5, 5)
def draw_16_points(points):
beginShape()
for p in points + points[0:3]:
cvp(*p)
endShape()
def draw_curve_filled(data):
beginShape()
for t in data+data[:3]:
cvp(*t)
endShape()
def get_angles_radii_antennae(angle_offset, r):
angles = [0]*4
angles[0] = helper.random_centered(180, angle_offset)
angles[1] = helper.random_centered(90, angle_offset)
angles[2] = helper.random_centered(30, angle_offset)
angles[3] = helper.random_centered(300, angle_offset)
radii = [0]*4
radii[0] = random(r*0.45, r*0.75)
radii[1] = random(r*0.25, r*0.5)
radii[2] = random(r*0.15, r*0.3)
radii[3] = random(r*0.1, r*0.15)
return angles, radii
def mousePressed():
helper.save_frame_timestamp('butterballs', timestamp, random_seed)
| [
"[email protected]"
] | |
4f44550a4c07a6e54e20dc24371f5ba545147a4d | a48ad7fb3dc743204ac7ae2df3313ebd33d64558 | /transform.py | 2efaa14a44d61f9b6b5102b4d38f24b2c971cbe3 | [] | no_license | sravyasridivakarla/ECS192 | be39564b668da67a58976ec7f25b4e1163f77929 | db8f361be9d6393c4f5c8bbdc6b51f7da8c93b35 | refs/heads/master | 2020-04-28T08:26:45.742537 | 2019-03-13T05:39:10 | 2019-03-13T05:39:10 | 175,126,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,352 | py | import csv
import os
# function to build category dictionary with id as key
def build_id_dictionary_transform(category_filename):
dictionary = {}
with open(category_filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
line_count += 1;
continue
else:
dictionary[str(row[0])] = str(row[2])
return dictionary
# function to build a dictionary of countries with code as key
def build_country_dictionary_transform(country_filename):
dictionary = {}
with open(country_filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
line_count += 1;
continue
else:
dictionary[str(row[1])] = str(row[0])
return dictionary
def transform_output(id_dict, country_dict, output_filename):
gender_dict = {'1':"male", '2':"female"}
age_groups_dict = {'1':"13-20", '2':"21-30", '3':"31-40", \
'4':"41-50", '5':"51-60", '6':"61-65+"}
with open(output_filename) as inf, open('dataset.csv', 'w') as outf:
reader = csv.reader(inf, delimiter=',')
writer = csv.writer(outf, delimiter=',', lineterminator='\n')
first_line = False
for line in reader:
if first_line == False:
first_line = True
writer.writerow(line)
continue
templine = line
templine[0] = country_dict[templine[0]].replace(" ", "_")
templine[1] = gender_dict[templine[1]]
templine[4] = age_groups_dict[templine[4]]
templine[5] = id_dict[templine[5]].replace(" ", "_")
writer.writerow(templine)
def transform():
# build id dictionary
id_dictionary = build_id_dictionary_transform("categories.csv")
# build country dictionary
country_dictionary = build_country_dictionary_transform("country_codes.csv")
# transform the dataset back to more readable and friendlier contents
transform_output(id_dictionary, country_dictionary, "api_responses.csv")
transform()
print("new file 'dataset.csv' created in the current directory!")
| [
"[email protected]"
] | |
d44c4416c934a7ecd2668193f5d1576fa89ad7e0 | 904bfdb54d70ba3f5d6a2aea55a81b2453568448 | /pe47.py | 42fb6be303040ae6107d8b85ae014c45c65bf5af | [] | no_license | tapan1911/project_euler | 01dc87d28915dae26bd474301ab1709f4a3d1ce2 | 7770b62acafcbe5d0ceea3a7526bfa0c71b6a031 | refs/heads/master | 2020-04-02T01:08:52.013910 | 2013-06-26T13:35:03 | 2013-06-26T13:35:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | import time
start = time.time()
a=[]
a.append(0)
a.append(0)
for i in range(2,100000):
a.append(1)
b=[]
for i in range(2,100000):
if(a[i]==1):
b.append(i)
j=2
while 1:
temp=j*i
if(temp>99999):
break
a[temp]=0
j+=1
count=[]
for i in range(0,200000):
count.append(0)
for x in b:
j=1
while 1:
temp=j*x
if(temp>199999):
break
count[temp]+=1
j+=1
From = 644
while From<200000:
if (count[From]==4):
if(count[From+1]==4):
if(count[From+2]==4):
if(count[From+3]==4):
break
else:
From+=4
else:
From+=3
else:
From+=2
else:
From+=1
elapsed = time.time()-start
print "Ans = ",From
print "Time taken to find the result = %s seconds"%elapsed
| [
"[email protected]"
] | |
abb2742c5fb76454cf88e58811ec3e5d1512c143 | e219e84633a17c2000575604c754f099e230c52e | /scrapyd_dash/migrations/0001_initial.py | 4aa64ff7e527c750e1cef96c992ba14add43e871 | [
"MIT"
] | permissive | Dainius-P/scrapyd-dash | a99234ade04a1324ae3b85e198716ef96602e7d7 | f769551070c919ba41616928309460a93e1b120a | refs/heads/master | 2022-04-26T16:22:01.062628 | 2019-08-26T08:36:38 | 2019-08-26T08:36:38 | 193,942,401 | 8 | 1 | MIT | 2022-04-22T22:08:53 | 2019-06-26T16:37:12 | CSS | UTF-8 | Python | false | false | 5,877 | py | # Generated by Django 2.2.2 on 2019-08-21 07:41
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ScrapydProject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
],
options={
'db_table': 'scrapyd_dash_projects',
},
),
migrations.CreateModel(
name='ScrapydServer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.CharField(max_length=128)),
('port', models.CharField(max_length=32)),
('node_name', models.CharField(max_length=256)),
('status', models.CharField(max_length=64)),
('status_message', models.CharField(max_length=512, null=True)),
('pending_tasks', models.PositiveIntegerField(default=0)),
('finished_tasks', models.PositiveIntegerField(default=0)),
('running_tasks', models.PositiveIntegerField(default=0)),
],
options={
'db_table': 'scrapyd_dash_servers',
'ordering': ['-status'],
'unique_together': {('ip', 'port')},
},
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.CharField(max_length=64, primary_key=True, serialize=False)),
('name', models.CharField(max_length=256)),
('spider', models.CharField(max_length=256)),
('status', models.CharField(max_length=64)),
('pages', models.PositiveIntegerField(null=True)),
('items', models.PositiveIntegerField(null=True)),
('pid', models.PositiveIntegerField(null=True)),
('runtime', models.CharField(blank=True, max_length=64, null=True)),
('start_datetime', models.DateTimeField(blank=True, null=True)),
('finished_datetime', models.DateTimeField(blank=True, null=True)),
('log_href', models.CharField(max_length=1024, null=True)),
('items_href', models.CharField(max_length=1024, null=True)),
('create_datetime', models.DateTimeField(auto_now_add=True)),
('update_datetime', models.DateTimeField(auto_now=True)),
('deleted', models.BooleanField(default=False)),
('stopping', models.BooleanField(default=False)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='scrapyd_dash.ScrapydProject')),
('server', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='scrapyd_dash.ScrapydServer')),
],
options={
'db_table': 'scrapyd_dash_tasks',
'ordering': ['-create_datetime'],
},
),
migrations.CreateModel(
name='ScrapydProjectVersion',
fields=[
('version', models.CharField(max_length=125, primary_key=True, serialize=False)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='scrapyd_dash.ScrapydProject')),
],
),
migrations.AddField(
model_name='scrapydproject',
name='server',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='scrapyd_dash.ScrapydServer'),
),
migrations.CreateModel(
name='ScheduledTask',
fields=[
('name', models.CharField(max_length=256, primary_key=True, serialize=False)),
('create_datetime', models.DateTimeField(auto_now_add=True)),
('update_datetime', models.DateTimeField(auto_now=True)),
('spider', models.CharField(max_length=256)),
('year', models.PositiveIntegerField(blank=True, null=True)),
('month', models.PositiveIntegerField(blank=True, null=True, validators=[django.core.validators.MaxValueValidator(12)])),
('day', models.PositiveIntegerField(blank=True, null=True, validators=[django.core.validators.MaxValueValidator(32)])),
('week', models.PositiveIntegerField(blank=True, null=True)),
('day_of_week', models.PositiveIntegerField(blank=True, null=True, validators=[django.core.validators.MaxValueValidator(7)])),
('hour', models.PositiveIntegerField(blank=True, null=True, validators=[django.core.validators.MaxValueValidator(24)])),
('minute', models.PositiveIntegerField(blank=True, null=True, validators=[django.core.validators.MaxValueValidator(60)])),
('last_run', models.DateTimeField(blank=True, null=True)),
('next_run', models.DateTimeField(blank=True, null=True)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='scrapyd_dash.ScrapydProject')),
('server', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='scrapyd_dash.ScrapydServer')),
('tasks', models.ManyToManyField(blank=True, to='scrapyd_dash.Task')),
],
options={
'db_table': 'scrapyd_dash_scheduled_tasks',
'ordering': ['-create_datetime'],
},
),
migrations.AlterUniqueTogether(
name='scrapydproject',
unique_together={('server', 'name')},
),
]
| [
"[email protected]"
] | |
522e7c30860e3bd7ee521bede2a09b7acdf5101c | 27b86f422246a78704e0e84983b2630533a47db6 | /tests/test_09_cython_acceleration/test_903_acc_matrix44.py | 4ac34138cc30e532696762180ccc43733e00e6c1 | [
"MIT"
] | permissive | mozman/ezdxf | 7512decd600896960660f0f580cab815bf0d7a51 | ba6ab0264dcb6833173042a37b1b5ae878d75113 | refs/heads/master | 2023-09-01T11:55:13.462105 | 2023-08-15T11:50:05 | 2023-08-15T12:00:04 | 79,697,117 | 750 | 194 | MIT | 2023-09-14T09:40:41 | 2017-01-22T05:55:55 | Python | UTF-8 | Python | false | false | 2,052 | py | # Copyright (c) 2020-2023, Manfred Moitzi
# License: MIT License
# Test only basic features of Cython implementation,
# Full testing and compatibility check with Python implementation
# is located in test suite 605.
import pytest
import numpy as np
matrix44 = pytest.importorskip("ezdxf.acc.matrix44")
Matrix44: matrix44.Matrix44 = matrix44.Matrix44
def test_default_constructor():
m = Matrix44()
assert m[0, 0] == 1.0
assert m[1, 1] == 1.0
assert m[2, 2] == 1.0
assert m[3, 3] == 1.0
def test_16_numbers_constructor():
m = Matrix44(range(16))
assert m[3, 3] == 15
def test_4_rows_constructor():
m = Matrix44((0, 0, 0, 0), (1, 1, 1, 1), (2, 2, 2, 2), (3, 3, 3, 3))
assert m[0, 0] == 0
assert m[3, 3] == 3
def test_set_item():
m = Matrix44()
m[0, 0] = 17
assert m[0, 0] == 17
def test_set_row_4_values():
m = Matrix44()
m.set_row(0, (2, 3, 4, 5))
assert m.get_row(0) == (2, 3, 4, 5)
def test_set_row_1_value():
m = Matrix44()
m.set_row(1, (2,))
assert m.get_row(1) == (2, 1, 0, 0)
def test_set_col_4_values():
m = Matrix44()
m.set_col(0, (2, 3, 4, 5))
assert m.get_col(0) == (2, 3, 4, 5)
def test_set_col_1_value():
m = Matrix44()
m.set_col(1, (2,))
assert m.get_col(1) == (2, 1, 0, 0)
def test_copy():
m1 = Matrix44(range(16))
m2 = m1.copy()
assert m2.get_row(0) == (0, 1, 2, 3)
m1.set_row(0, (20, 30, 40, 50))
assert m1.get_row(0) == (20, 30, 40, 50)
assert m2.get_row(0) == (0, 1, 2, 3)
def test_get_origin():
m = Matrix44()
m.set_row(3, (7, 8, 9))
assert m.origin == (7, 8, 9)
def test_array_inplace_transformation():
from ezdxf.math import Vec2
m = matrix44.Matrix44.translate(1, 2, 0)
points = [(0, 0), (1, 1), (2, 2)]
control = m.fast_2d_transform(points)
array = np.array(points, dtype=np.float64)
m.transform_array_inplace(array, 2)
assert all(Vec2(c).isclose(r) for c, r in zip(control, array))
if __name__ == "__main__":
pytest.main([__file__])
| [
"[email protected]"
] | |
44628d8275b2cc974d401f3e93a56524d8305463 | 6534ef2fc28a8767019ac14b08e2cf93818a13a7 | /deploy/macosx/setup.py | 104bee5060ac7e286404ebdd3e72424110c60bec | [
"BSD-3-Clause"
] | permissive | zedxxx/mapmbtiles | 8bee63019072dc402c3a0aa6515d35517af32aff | 05ca5c093579d80ae87f1a3fe21c922c0755b962 | refs/heads/master | 2021-01-21T08:37:50.686309 | 2014-02-28T11:41:28 | 2014-02-28T11:41:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 922 | py | import py2app
from setuptools import setup
# Build the .app file
setup(
options=dict(
py2app=dict(
iconfile='resources/mapmbtiles.icns',
packages='wx',
excludes='osgeo,PIL,numpy',
#site_packages=True,
#semi_standalone=True,
resources=['resources/license/LICENSE.txt','mapmbtiles'],
plist=dict(
CFBundleName = "MapMbTiles",
CFBundleShortVersionString = "1.0.alpha2", # must be in X.X.X format
CFBundleGetInfoString = "MapMbTiles 1.0 alpha2",
CFBundleExecutable = "MapMbTiles",
CFBundleIdentifier = "de.mj10777.mapmbtiles",
),
frameworks=['PROJ.framework','GEOS.framework','SQLite3.framework','UnixImageIO.framework','GDAL.framework'],
),
),
app=[ 'mapmbtiles.py' ]
)
| [
"[email protected]"
] | |
982cf380119747dfd4ab555ba1709527d99e676c | f8666599b83d34c861651861cc7db5b3c434fc87 | /plotly/validators/barpolar/_selectedpoints.py | 42c6675b37aa8c90da3726019e97afc754168a53 | [
"MIT"
] | permissive | mode/plotly.py | 8b66806e88c9f1820d478bab726f0bea81884432 | c5a9ac386a40df2816e6c13264dadf14299401e4 | refs/heads/master | 2022-08-26T00:07:35.376636 | 2018-09-26T19:08:54 | 2018-09-26T19:19:31 | 60,372,968 | 1 | 1 | MIT | 2019-11-13T23:03:22 | 2016-06-03T19:34:55 | Python | UTF-8 | Python | false | false | 475 | py | import _plotly_utils.basevalidators
class SelectedpointsValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(
self, plotly_name='selectedpoints', parent_name='barpolar', **kwargs
):
super(SelectedpointsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'info'),
**kwargs
)
| [
"[email protected]"
] | |
2b1c1d5d8b54d1b2012c886dad1cf76539818866 | dc4bfd8a002320ac8ed3767957bbc4db96230216 | /example/config.py | 4433e8a0113f8e75a28d1f334bd15f997c6d76fc | [
"MIT"
] | permissive | antikytheraton/nbChatbot | 5aee172432a20ca8936a0c6612c214934d48bd32 | 54776fc5e4e26950fb58ccd0a2fe98a7084df850 | refs/heads/master | 2022-12-09T15:11:37.764848 | 2020-04-16T18:09:15 | 2020-04-16T18:09:15 | 93,900,722 | 4 | 0 | MIT | 2022-12-07T23:58:00 | 2017-06-09T22:16:50 | Python | UTF-8 | Python | false | false | 169 | py | CONFIG = {
'FACEBOOK_TOKEN': 'EAAbYZAtdQxxxxxxxxxxxxxxxxxxxxxNg4WSc9Flu56XQLRRBjgZDZD',
'VERIFY_TOKEN': 'verify_nbchatbot',
'SERVER_URL': 'heroku app url'
}
| [
"[email protected]"
] | |
eb8f81b3d78e13fdba5aa3f29afb1cfcc88fa8c4 | 6c643313e24fc48b23e40b5d6a7ba8a959986914 | /1untitled.py | e7c9ef90d0caf217b8dcb7456f83fe00033f2d81 | [] | no_license | 771369226/pythonProject6 | 9bb0d5298f74249fc7a4b1f261bcec02078b6151 | e3854538e66ede7f92a50eb9ba306cbe7db31b94 | refs/heads/master | 2023-02-25T10:57:33.639063 | 2021-01-22T05:51:10 | 2021-01-22T05:51:10 | 330,664,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,832 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '1untitled.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QApplication, QMainWindow
import sys
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.layoutWidget = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget.setGeometry(QtCore.QRect(90, 70, 225, 147))
self.layoutWidget.setObjectName("layoutWidget")
self.formLayout = QtWidgets.QFormLayout(self.layoutWidget)
self.formLayout.setContentsMargins(0, 0, 0, 0)
self.formLayout.setObjectName("formLayout")
self.label = QtWidgets.QLabel(self.layoutWidget)
self.label.setObjectName("label")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label)
self.lineEdit = QtWidgets.QLineEdit(self.layoutWidget)
self.lineEdit.setObjectName("lineEdit")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.lineEdit)
self.label_2 = QtWidgets.QLabel(self.layoutWidget)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_2)
self.lineEdit_2 = QtWidgets.QLineEdit(self.layoutWidget)
self.lineEdit_2.setObjectName("lineEdit_2")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.lineEdit_2)
self.label_3 = QtWidgets.QLabel(self.layoutWidget)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_3)
self.lineEdit_3 = QtWidgets.QLineEdit(self.layoutWidget)
self.lineEdit_3.setObjectName("lineEdit_3")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.lineEdit_3)
self.label_4 = QtWidgets.QLabel(self.layoutWidget)
self.label_4.setObjectName("label_4")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_4)
self.comboBox = QtWidgets.QComboBox(self.layoutWidget)
self.comboBox.setObjectName("comboBox")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.comboBox)
self.label_5 = QtWidgets.QLabel(self.layoutWidget)
self.label_5.setObjectName("label_5")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_5)
self.lineEdit_5 = QtWidgets.QLineEdit(self.layoutWidget)
self.lineEdit_5.setObjectName("lineEdit_5")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.lineEdit_5)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "序号:"))
self.label_2.setText(_translate("MainWindow", "姓名:"))
self.label_3.setText(_translate("MainWindow", "年龄:"))
self.label_4.setText(_translate("MainWindow", "省份:"))
self.comboBox.setItemText(0, _translate("MainWindow", "山东"))
self.comboBox.setItemText(1, _translate("MainWindow", "河南"))
self.comboBox.setItemText(2, _translate("MainWindow", "河北"))
self.comboBox.setItemText(3, _translate("MainWindow", "广东"))
self.comboBox.setItemText(4, _translate("MainWindow", "内蒙古"))
self.comboBox.setItemText(5, _translate("MainWindow", "新疆"))
self.label_5.setText(_translate("MainWindow", "薪水:"))
if __name__ == '__main__':
app = QApplication(sys.argv)
MainWindow = QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_()) | [
"[email protected]"
] | |
560bb849b69c91f3c14003570cd8329a0d6f25ca | 4993718f1b5f9b3aa110f147e6f59207d4a6ca83 | /readMetadata.py | f7ea1f7e3081a6cfc058a8f68748638fad081afd | [] | no_license | pawlowska/terastitcher_files_coversions | acd9d7aa3bb9c31a05743ac0efc929172ff37cb5 | 45d525fbbdba25fa18b55a842ff720dbe65ef729 | refs/heads/master | 2020-06-09T16:25:41.477356 | 2020-01-14T12:23:52 | 2020-01-14T12:23:52 | 76,028,154 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,499 | py | # -*- coding: utf-8 -*-
#this file is imported in file_prep.py
import json, os
import prepFunctions
def zrobListeStringowMM2(dataDir, nazwaPliku):
f = open(os.path.join(dataDir, nazwaPliku))
t = f.read()
ind=t.find("[")
t=t[ind:]
jObject = json.loads(t)
listaStringow = []
listaXow = []
for p in jObject:
x=p['subpositions'][0]['x']
y=p['subpositions'][0]['y']
s = format(10*x, '06')+'_'+format(10*y, '06')
listaStringow.append(s)
listaXow.append(format(10*x, '06'))
d = {'listaStringow':listaStringow, 'listaXow':listaXow}
return d
def zrobListeStringow(dataDir, nazwaPliku, stageXY='Standa8SMC4XY'):
f = open(os.path.join(dataDir, nazwaPliku))
jObject = json.loads(f.read())
z = jObject['FrameKey-0-0-0']['ZPositionUm']
slices = jObject['Summary']['Slices']
zStep = jObject['Summary']['z-step_um']
listaPoz = jObject['Summary']['InitialPositionList']
listaStringow = []
listaXow = []
for p in listaPoz:
l = p['DeviceCoordinatesUm'][stageXY]
s = format(10*l[0], '06')+'_'+format(10*l[1], '06')
listaStringow.append(s)
listaXow.append(format(10*l[0], '06'))
d = {'z':z, 'zStep': zStep, 'slices':slices, 'listaStringow':listaStringow, 'listaXow':listaXow}
return d
def zrobListeStringowMM2_fromSavedPL(dataDir, nazwaPliku, stageXY='Standa8SMC4XY'):
f = open(os.path.join(dataDir, nazwaPliku))
jObject = json.loads(f.read())
listaPoz = jObject['POSITIONS']
listaStringow = []
listaXow = []
listaLbl=[]
for p in listaPoz:
listaLbl.append(p['LABEL'])
x = round(p['DEVICES'][0]['X'])
y = round(p['DEVICES'][0]['Y'])
xs=format(10*x, '06')
if xs not in listaXow:
listaXow.append(xs)
s= format(10*x, '06')+'_'+format(10*y, '06')
if s not in listaStringow:
listaStringow.append(s)
d = {'listaStringow':listaStringow, 'listaXow':listaXow, 'labels':listaLbl}
return d
def makeDic(rawDataDir):
#find metadata file in it
nazwaPlikuPozycji=prepFunctions.findPositionsFile(rawDataDir)
assert(nazwaPlikuPozycji), "No positions file found"
if nazwaPlikuPozycji.startswith('METADATA_xy'):
dic = zrobListeStringowMM2_fromSavedPL(rawDataDir, nazwaPlikuPozycji)
else:
dic = zrobListeStringowMM2(rawDataDir, nazwaPlikuPozycji)
return dic | [
"[email protected]"
] | |
c129ee002f6d78f0d81c4c2a136f877bca34f397 | eae8d7821f73233ef52f916b2d22b392fa056086 | /python_one_learn/day17/common/list_helper.py | 023e71741dd20da48f838f91b6887d1ee442f3a1 | [] | no_license | Wuskysong/python01_- | cad75d90dab945cc85719dcb1bb138ce81b813e5 | 4cef60bc5812524f7331e0f97f68c41db7082db7 | refs/heads/master | 2020-06-21T23:59:20.121964 | 2019-07-18T12:54:40 | 2019-07-18T12:54:40 | 197,584,017 | 1 | 0 | null | 2019-07-18T12:44:26 | 2019-07-18T12:37:03 | Python | UTF-8 | Python | false | false | 1,533 | py | """
列表助手模块
"""
class ListHelper:
"""
列表助手类
"""
@staticmethod
def find_all(list_target, func_condition):
"""
通用的查找某个条件的所有元素方法
:param list_target: 需要查找的列表
:param func_condition: 需要查找的条件,函数类型
函数名(参数) --> bool
:return: 需要查找的元素,生成器类型.
"""
for item in list_target:
if func_condition(item):
yield item
@staticmethod
def find_single(list_target, func_condition):
"""
通用的查找某个条件的单个元素方法
:param list_target: 需要查找的列表
:param func_condition: 需要查找的条件,函数类型
函数名(参数) --> bool
:return: 需要查找的元素
"""
for item in list_target:
if func_condition(item):
return item
@staticmethod
def get_count(list_target, func_duration):
"""
通用的计算满足某个条件的元素数量方法
:param list_target: 需要查找的列表
:param func_condition: 需要查找的条件,函数类型
函数名(参数) --> bool
:return: 满足条件元素的数量
"""
count_value = 0
for item in list_target:
if func_duration(item):
count_value += 1
return count_value
| [
"[email protected]"
] | |
ff39390c1993eeb799e4391a558fcdcffaad448a | f1172ebeb8c483a29bdc58b7b0200b8f9af5fc5f | /binlin/data/ud.py | 948e77a1d20b01ef0bcab2e81cbdb8ba46eb42c1 | [
"Apache-2.0"
] | permissive | UKPLab/inlg2019-revisiting-binlin | 04d8f2634cd97fe67a576209ff0abc7d8d7f1024 | 250196403ee4050cac78c547add90087ea04243f | refs/heads/master | 2022-12-15T03:32:57.442069 | 2019-09-30T14:17:32 | 2019-09-30T14:17:32 | 206,311,230 | 1 | 0 | Apache-2.0 | 2022-12-08T06:39:23 | 2019-09-04T12:13:28 | Python | UTF-8 | Python | false | false | 3,112 | py | import logging
from typing import Dict
import numpy as np
from binlin.utils.graph import dg_from_tokens
from binlin.data.readers import read_conll_data_file
from binlin.data.convert_fmt import raw_data_to_tokens
logger = logging.getLogger('main')
class UdDataClass(object):
"""
A class dealing with UD data prerocessing
and preparing data for training and evaluating models.
"""
def __init__(self, data_class_config: Dict, global_config: Dict):
self.config = data_class_config
self.global_config = global_config
self.train = self.dev = self.sr_dev = self.test = None
self.setup()
def setup(self):
logger.info("Data setup ...")
if self.global_config['mode'] == 'train':
self.training_setup()
else:
self.prediction_setup()
def training_setup(self):
train_data_fname = self.config.get("train_data", None)
dev_data_fname = self.config.get("dev_data", None)
assert train_data_fname is not None
assert dev_data_fname is not None
train_raw = read_conll_data_file(train_data_fname)
self.train = [dg_from_tokens(toks, i) for i, toks in enumerate(raw_data_to_tokens(train_raw))]
dev_raw = read_conll_data_file(dev_data_fname)
self.dev = [dg_from_tokens(toks, i) for i, toks in enumerate(raw_data_to_tokens(dev_raw))]
sr_dev_data_fname = self.config.get("sr_dev_data", None)
if not sr_dev_data_fname is None:
sr_dev_raw = read_conll_data_file(sr_dev_data_fname)
self.sr_dev = [dg_from_tokens(toks, i) for i, toks in enumerate(raw_data_to_tokens(sr_dev_raw))]
test_data_fname = self.config.get("test_data", None)
if not test_data_fname is None:
test_raw = read_conll_data_file(test_data_fname)
self.test = [dg_from_tokens(toks, i) for i, toks in enumerate(raw_data_to_tokens(test_raw))]
def prediction_setup(self):
# dev_data_fname = self.config.get("dev_data", None)
# assert dev_data_fname is not None
# dev_raw = read_conll_data_file(dev_data_fname)
# self.dev = [dg_from_tokens(toks,i) for i, toks in enumerate(raw_data_to_tokens(dev_raw))]
sr_dev_data_fname = self.config.get("sr_dev_data", None)
if not sr_dev_data_fname is None:
sr_dev_raw = read_conll_data_file(sr_dev_data_fname)
self.sr_dev = [dg_from_tokens(toks, i) for i, toks in enumerate(raw_data_to_tokens(sr_dev_raw))]
test_data_fname = self.config.get("test_data", None)
if not test_data_fname is None:
test_raw = read_conll_data_file(test_data_fname)
self.test = [dg_from_tokens(toks,i) for i, toks in enumerate(raw_data_to_tokens(test_raw))]
def index_data(data_size, mode="no_shuffling"):
if mode == "random":
indices = np.random.choice(np.arange(data_size), data_size, replace=False)
elif mode == "no_shuffling":
indices = np.arange(data_size)
else:
raise NotImplementedError()
return indices
component = UdDataClass | [
"[email protected]"
] | |
121d1bd4b0a6e1fa40e296f02607b88357d839ec | beb36a0adb8fcd6ce3f3366ebe685ce015825e62 | /lar/settings.py | 03230a75539cc13941fc7e6e1a59e2a1e2d7f709 | [] | no_license | gabvrel/restApi | ecdacab533f95fd58fa7bd071703c51966b25d00 | 0c0ff287c06f47675c335fee4fdab2434ce89c23 | refs/heads/main | 2023-02-04T16:57:00.802015 | 2020-12-15T23:31:00 | 2020-12-15T23:31:00 | 321,792,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,243 | py | """
Django settings for lar project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4m8d&3mvbkmn=bos7))3kd!7kseqz$p7&8eex+@=nfn30g6^$o'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'multiselectfield',
'rest_framework',
'knox',
'accounts',
'items',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'lar.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'lar.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'lar',
'USER': 'postgres',
'PASSWORD': 'root',
'HOST': 'localhost'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_USER_MODEL = 'accounts.User'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
bd43332315ab5985aafd64ba86b8a9a155bc90e6 | db40d41314fdcb28866e842e5f84578d451ba786 | /test/not-automated/blobrace.py | 2a8490a039f09995fae255759d1c43c79c63a371 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | jthelin/azure-storage-fuse | 1022612aceb492bf16d75ad60d6822f7b131ea23 | 0b128b4a0f1e64b8ce65c4d78c1a42d36dd6f0d1 | refs/heads/master | 2023-04-08T16:11:55.626646 | 2022-11-10T08:11:11 | 2022-11-10T08:11:11 | 177,686,771 | 0 | 0 | null | 2019-03-26T00:39:09 | 2019-03-26T00:39:08 | null | UTF-8 | Python | false | false | 645 | py | import os
import sys
import time
from multiprocessing import Pool
def noop():
time.sleep(0.1)
return None
def read_blob(paths):
fh = open(path, 'r')
fh.seek(0)
data = fh.readline()
return len(data)
WORKERS=100
def test_blob(path):
pool = Pool(processes=WORKERS)
tasks = [ pool.apply_async(noop, ()) for i in range(WORKERS) ]
[ t.get() for t in tasks ]
tasks = [ pool.apply_async(read_blob, (path, )) for i in range(WORKERS) ]
lengths = [ t.get() for t in tasks ]
print(path, lengths)
assert 0 not in lengths
if __name__ == '__main__':
for path in sys.argv[1:]:
test_blob(path) | [
""
] | |
568b15e55d720eb96dac71d6cc2b0e31277d5936 | e0ad21b94f92a978d61f84262923d497dfb4d174 | /oaf/bin/easy_install-2.7 | 61dd1e80c08f29c156e6f0e6dabefa7f8065f365 | [] | no_license | doanle84/Canterbury-Bankstown_DA | 113065bb4952a792da99dcada96ac020d1cd73b9 | 0f20257af897d96dce26bd2ba58e7fd94eae6951 | refs/heads/master | 2022-07-25T22:07:37.352268 | 2017-06-23T03:12:31 | 2017-06-23T03:12:31 | 94,496,752 | 0 | 0 | null | 2022-07-06T19:40:21 | 2017-06-16T02:24:12 | Python | UTF-8 | Python | false | false | 280 | 7 | #!/Users/doanle/scrappers/Canterbury-Bankstown_DA/oaf/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | |
ab157daf92240cf7c2b4b4c67e7bf99a480b44a4 | 305e7b0d6251eb5ac765726972d9180fa502329d | /src/ofxstatement/tests/test_lbbamazon.py | a397cd9816fa03fcede51ba1df29dfe0e2a2ca8d | [] | no_license | bubbas/ofxstatement | 099683525c6deec9f03fbbbe79910983be1af271 | 75ad922445dbdb34abc3588e852675a343026528 | refs/heads/master | 2021-01-18T18:10:07.806348 | 2013-12-08T11:51:10 | 2013-12-08T11:51:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,187 | py | import doctest
from ofxstatement.plugins.lbbamazon import LbbAmazonCsvStatementParser
def doctest_LbbAmazonCsvStatementParser():
"""Test LbbAmazonCsvStatementParser
Open sample csv to parse
>>> import os
>>> csvfile = os.path.join(os.path.dirname(__file__),
... 'samples', 'lbbamazon.csv')
Create parser object and parse:
>>> fin = open(csvfile, 'r', encoding='iso-8859-1')
>>> parser = LbbAmazonCsvStatementParser(fin)
>>> statement = parser.parse()
Check what we've got:
>>> statement.account_id
>>> len(statement.lines)
7
>>> statement.start_balance
>>> statement.start_date
>>> statement.end_balance
>>> statement.end_date
>>> statement.currency
Check first line
>>> l = statement.lines[0]
>>> l.amount
-0.17
>>> l.payee
>>> l.memo
'ABGELTUNGSSTEUER'
>>> l.date
datetime.datetime(2012, 12, 28, 0, 0)
Check one more line:
>>> l=statement.lines[2]
>>> l.amount
0.75
>>> l.payee
>>> l.memo
'HANDYRABATT'
>>> l.date
datetime.datetime(2013, 1, 21, 0, 0)
Check one more line with slashes in memo:
>>> l=statement.lines[4]
>>> l.amount
-30.0
>>> l.memo
'AMAZON.ES COMPRA / amazon.es/ay'
>>> l.date
datetime.datetime(2013, 1, 7, 0, 0)
Check one more line with amazon points but without amount:
>>> l=statement.lines[5]
>>> l.amount
0.0
>>> l.memo
'+ 15.0 AMAZON.DE PUNKTE'
>>> l.date
datetime.datetime(2013, 1, 7, 0, 0)
"""
def test_suite(*args):
return doctest.DocTestSuite(optionflags=(doctest.NORMALIZE_WHITESPACE|
doctest.ELLIPSIS|
doctest.REPORT_ONLY_FIRST_FAILURE|
doctest.REPORT_NDIFF
))
load_tests = test_suite
if __name__ == "__main__":
doctest.testmod()
| [
"[email protected]"
] | |
d84883d411e4ad65d46ac21140b7a520f85b9369 | 53784d3746eccb6d8fca540be9087a12f3713d1c | /res/packages/scripts/scripts/client/gui/Scaleform/framework/entities/abstract/VoiceChatManagerMeta.py | b2d3fa4227d019e0454b3829a8fe98a670b3d8cd | [] | no_license | webiumsk/WOT-0.9.17.1-CT | 736666d53cbd0da6745b970e90a8bac6ea80813d | d7c3cf340ae40318933e7205bf9a17c7e53bac52 | refs/heads/master | 2021-01-09T06:00:33.898009 | 2017-02-03T21:40:17 | 2017-02-03T21:40:17 | 80,870,824 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,201 | py | # 2017.02.03 21:51:16 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/framework/entities/abstract/VoiceChatManagerMeta.py
from gui.Scaleform.framework.entities.BaseDAAPIModule import BaseDAAPIModule
class VoiceChatManagerMeta(BaseDAAPIModule):
"""
DO NOT MODIFY!
Generated with yaml.
__author__ = 'yaml_processor'
@extends BaseDAAPIModule
"""
def isPlayerSpeaking(self, accountDBID):
self._printOverrideError('isPlayerSpeaking')
def isVivox(self):
self._printOverrideError('isVivox')
def isYY(self):
self._printOverrideError('isYY')
def isVOIPEnabled(self):
self._printOverrideError('isVOIPEnabled')
def as_onPlayerSpeakS(self, accountDBID, isSpeak, isHimseljoinUnitButtonf):
if self._isDAAPIInited():
return self.flashObject.as_onPlayerSpeak(accountDBID, isSpeak, isHimseljoinUnitButtonf)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\framework\entities\abstract\VoiceChatManagerMeta.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:51:16 Střední Evropa (běžný čas)
| [
"[email protected]"
] | |
8603eea5415e29fa060b2b86baa092c7a6dd988f | 79e81c60f0fd3d4f99801b4c886ce9f16ac1f42e | /apps/tasks/migrations/0002_auto_20200602_0243.py | d0b85d6859e6e1cd7129867ecdfa1f1d737ae9fc | [] | no_license | Machele-codez/todo | c077f1069ee33773a37f98840f551a7aec559d58 | 007715594d0fb0135b8be684c748b5c009b3ff45 | refs/heads/ajaxifying | 2023-02-26T01:43:38.820811 | 2021-05-07T16:14:15 | 2021-05-07T16:14:15 | 253,924,311 | 0 | 0 | null | 2023-02-14T13:56:44 | 2020-04-07T22:16:17 | JavaScript | UTF-8 | Python | false | false | 328 | py | # Generated by Django 2.2.7 on 2020-06-02 02:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tasks', '0001_initial'),
]
operations = [
migrations.RemoveConstraint(
model_name='task',
name='unique_user_task',
),
]
| [
"[email protected]"
] | |
197f9a0ceb0d7fe664ef26f910397dd944f3067a | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /networkmanager_write_1/site_create.py | 366c0d1e0841bb4326b60b68e3fdd04fd303e662 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_one_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/networkmanager/create-site.html
if __name__ == '__main__':
"""
delete-site : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/networkmanager/delete-site.html
get-sites : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/networkmanager/get-sites.html
update-site : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/networkmanager/update-site.html
"""
parameter_display_string = """
# global-network-id : The ID of the global network.
"""
add_option_dict = {}
#######################################################################
# parameter display string
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_one_parameter("networkmanager", "create-site", "global-network-id", add_option_dict)
| [
"[email protected]"
] | |
eda75e2bdf65cfb42977e5bd3c62d695c0253d2c | 29b3cc61da10cde5125bfd53443f2ce0e17902c2 | /lesson4_class_fibo.py | 8b82895473f3ca66fc3099448f2be234eb885927 | [] | no_license | YuriLv/homework | ca5bca499437fe144fc801de7d9911e76f0822e0 | 7c92f5497fd1401e1d5e320de134b833727c7f48 | refs/heads/master | 2020-04-25T21:51:55.131910 | 2019-03-22T10:06:13 | 2019-03-22T10:06:13 | 173,092,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,811 | py | # '''x = [1, 2, 3]
# y = x
# print (x) # [1, 2, 3]
# y += [3, 2, 1]
# print (x) # [1, 2, 3, 3, 2, 1]
#
# print(y)
#
# print("-------------------------")
#
# x = [1, 2, 3]
# y = x
# print (x) # [1, 2, 3]
# y = y + [3, 2, 1]
# print ("x=", x) # [1, 2, 3, 3, 2, 1]
#
# print("y=", y)
# '''
print("1.--------------------------------------------------------")
a= 0
while a<40:
a=a+2
print (a)
print()
for a in range(0,40,2):
print(a)
print("--------------------------------------------------------")
for a in range(1,40,2):
print(a)
print("2.--------------------------------------------------------")
for a in range (40):
if a%2 == 0:
continue
print(a)
print("3.--------------------------------------------------------")
a =[4,2,6,8,12,14,18,522,3]
s=False
for i in a:
if not i%2 == 0:
s=True
print ("number", i)
break
if s==False:
print("No odd")
print("4. --------------------------------------------------------")
a = [2,5,6,7,5,3,6]
for i in range(len(a)):
a[i] = float(a[i])
print (a)
print("5. --------------------------------------------------------")
'''5. Вивести числа Фібоначі включно до введеного числа n, використовуючи цикли. (Послідовність чисел Фібоначі 0, 1, 1, 2, 3, 5, 8, 13 і т.д.)
'''
n=int(input("Input max number N1: "))
# n=21
a=0
b=1
for i in range(0,n):
print(a)
(a,b) = (b,a+b)
if a<=n:
continue
else:
break
n=int(input("Input max number N2: "))
# n=21
a=0
b=1
while a<=n:
print(a)
(a, b) = (b, a + b)
n=int(input("Input max number N3: "))
n=21
a=0
b=1
while a<=n:
print(a)
t = a
a = b
b = t + a
| [
"[email protected]"
] | |
934f197f63186c00c0aa67e347098d67e76e8f11 | 4c0bbd1b8c821fd096435aa06f8bb08bedb917e9 | /pbt.py | 1f2bf330d096effe1e4642bb8e69a93239b0f06d | [] | no_license | sparticlesteve/heptrkx-gnn-tracking | ec301a6984af217956302ec90781153b8255ff2a | 85d257f6fed79d0f6f0f25e2ba10ac8d3487c39f | refs/heads/master | 2021-06-20T12:17:55.124924 | 2019-09-05T05:31:55 | 2020-04-19T20:45:51 | 167,315,319 | 6 | 10 | null | 2021-01-14T00:57:45 | 2019-01-24T06:27:41 | Jupyter Notebook | UTF-8 | Python | false | false | 3,245 | py | import os
import argparse
import time
from crayai import hpo
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('config', nargs='?', default='configs/agnn_pbt.yaml')
parser.add_argument('--nodes', type=int, default=16,
help='Number of nodes to run optimization over, total')
parser.add_argument('--nodes-per-eval', type=int, default=1,
help='Number of nodes per individual evaluation')
parser.add_argument('--demes', type=int, default=2,
help='Number of populations')
parser.add_argument('--pop-size', type=int, default=8,
help='Size of the genetic population')
parser.add_argument('--generations', type=int, default=4,
help='Number of generations to run')
parser.add_argument('--mutation-rate', type=float, default=0.05,
help='Mutation rate between generations of genetic optimization')
parser.add_argument('--crossover-rate', type=float, default=0.33,
help='Crossover rate between generations of genetic optimization')
parser.add_argument('--output-dir', default='./run',
help='Directory to store all outputs and checkpoints')
parser.add_argument('--alloc-args', default='-J hpo -C haswell -q interactive -t 4:00:00')
return parser.parse_args()
def main():
args = parse_args()
# Hardcode some config
#n_nodes = 4 #32
#config_file = 'configs/test.yaml'
#pop_size = 2 #16
#n_demes = 2 #4
#n_generations = 4
#mutation_rate = 0.05
#crossover_rate = 0.33
#alloc_args='-J hpo -C haswell -q interactive -t 4:00:00'
#checkpoint_dir = 'checkpoints'
# Hyperparameters
params = hpo.Params([
['--lr', 0.001, (1e-6, 0.1)],
['--n-graph-iters', 4, (1, 16)],
['--real-weight', 3., (1., 6.)]
])
# Define the command to be run by the evaluator
cmd = 'python train.py %s' % args.config
cmd += ' --fom last --n-epochs 1 --resume --output-dir @checkpoint'
# Define the evaluator
result_dir = os.path.expandvars('$SCRATCH/heptrkx/results/pbt_%s' %
time.strftime('%Y%m%d_%H%M%S'))
evaluator = hpo.Evaluator(cmd,
run_path=result_dir,
nodes=args.nodes,
launcher='wlm',
verbose=True,
nodes_per_eval=args.nodes_per_eval,
checkpoint='checkpoints',
alloc_args=args.alloc_args)
# Define the Optimizer
optimizer = hpo.GeneticOptimizer(evaluator,
pop_size=args.pop_size,
num_demes=args.demes,
generations=args.generations,
mutation_rate=args.mutation_rate,
crossover_rate=args.crossover_rate,
verbose=True)
# Run the Optimizer
optimizer.optimize(params)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
2af5b432ac298bebe3343f9ee18ae79e5f368b3e | 8c2c6d5c9ff1f36d73275ef90d35a622b9fce4d7 | /aliens.py | 296b9758824adc23c661414de5f9fdbd905ea432 | [] | no_license | Dushyanttara/Competitive-Programing | cf90611de94643a347449d68d51751a86bf7d528 | 6caa35b0d58792d9f6dcdb071feb40dc9e0bd9bf | refs/heads/master | 2022-12-21T19:37:36.121470 | 2020-09-17T05:31:01 | 2020-09-17T05:31:01 | 296,226,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,319 | py | """#Dushyant Tara(19-06-2020): This program will help you understand dictionary as a data strucutre
alien_0 = {'color': 'green',
'points': 5}
#print(alien_0['color'])
#print(alien_0['points'])
#new_points = alien_0['points']
#print("You just earned " + str(new_points) + " points.")
#Adding new key:value pairs
alien_0['x_position'] = 0
alien_0['y_position'] = 25
print(alien_0)
#Starting with an empty dictionary
alien_0 = {}
alien_0['color'] = 'green'
alien_0['points'] = 5
print(alien_0)
#Modifying values in a Dictionary
alien_0 = {'color': 'green'}
print("The alien is " + alien_0['color'] + ".")
alien_0['color'] = 'yellow'
print("The alien is now " + alien_0['color'] + ".")
alien_0 = {'x_position' : 0, 'y_position': 25, 'speed':'medium'}
print("Original x-position: " + str(alien_0['x_position']))
alien_0['speed'] = 'fast'
#Move the alien to the right
#Determine how far to move the alien based on its speed.
if alien_0['speed'] == 'slow':
x_increment = 1
elif alien_0['speed'] == 'medium':
x_increment = 2
else:
x_increment = 3
#The new position is the old position plus the increment
alien_0['x_position'] += x_increment
print("New x-position: " + str(alien_0['x_position']))
#Removing Key-value pairs
alien_0 = {'color':'green','points':5}
print(alien_0)
del alien_0['points']
print(alien_0)
"""
alien_0 = {'color': 'green', 'points':5}
alien_1 = {'color': 'yellow', 'points':10}
alien_2 = {'color': 'red', 'points': 15}
aliens = [alien_0, alien_1, alien_2]
for alien in aliens:
print(alien)
#More aliens
#Make an empty list for storing aliens
aliens = []
# Make 30 green aliens.
for alien_number in range(30):
new_alien = {'color':'green','points':5, 'speed':'slow'}
aliens.append(new_alien)
for alien in aliens[0:3]:
if alien['color'] == 'green':
alien['color'] = 'yellow'
alien['speed'] = 'medium'
alien['points'] = 10
elif alien['color'] == 'yellow':
alien['color'] = 'red'
alien['speed'] = 'fast'
alien['points'] = 15
#Show first 5 aliens
for alien in aliens[:5]:
print(alien)
print("....")
#Show how many aliens have been created.
print("Total number of aliens: " + str(len(aliens)))
| [
"[email protected]"
] | |
38c410730d901ad85541edf2a62f42ccffc4bb48 | cbc8aa592c4569541f6871664b2a34b7d8c44ff3 | /hide_text.py | 5e5e4d4571fdaff6eb941f015f1844811338e18e | [] | no_license | premnetha9/Steganography | 1f231b78fe4d768495ed94f71ca70c6ab5106ebc | adc536903a39bb6912b83fddb6ad63d1ac37df43 | refs/heads/main | 2023-04-02T17:55:31.009380 | 2021-04-12T11:51:38 | 2021-04-12T11:51:38 | 356,925,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,757 | py | from PIL import Image
def encrypt(image_path, text_path):
# check if image is already a bitmap
if image_path[-4:] != '.bmp':
img = Image.open(image_path)
image_path = image_path[:-4] + '.bmp'
img.save(image_path)
with open(image_path[:-4] + '.bmp', 'rb') as bmp_file:
bmp = bmp_file.read()
with open(text_path, 'rb') as hide_file:
msg = hide_file.read()
# append the length of the message to assist with decoding
temp = msg.decode('utf-8')
msg = bytearray(str(len(temp)) + '\n' + temp, 'utf-8')
# color data begins at the byte at position 10
start_offset = bmp[10]
bmpa = bytearray(bmp)
# convert the msg in bytes to bits
bits = []
for i in range(len(msg)):
# a byte can at max be 8 digits long, i.e. 0b11111111 = 255
# we start at the left most bit (position 7) and work down to 0
for j in range(7, -1, -1):
# create the logic array of bits for our data
bits.append(nth_bit_present(msg[i], j))
data_array = bits
# ensure the image is large enough to contain the text
assert len(data_array) < len(bmpa) + start_offset
for i in range(len(data_array)):
bmpa[i + start_offset] = set_final_bit(bmpa[i + start_offset],
data_array[i])
with open(image_path.replace('.bmp', '_out.bmp'), 'wb') as out:
out.write(bmpa)
print('\nMain image with secret message saved as original filename'
' with "_out.bmp" appended\n')
return bmpa
def decrypt(image_path):
with open(image_path, 'rb') as bmp_file:
bmp = bmp_file.read()
# color data begins at the byte at position 10
start_offset = bmp[10]
# deconstruct each byte and get its final bit
bits = []
for i in range(start_offset, len(bmp)):
bits.append(nth_bit_present(bmp[i], 0))
# combine our bit array into bytes
out_bytes = []
for i in range(0, len(bits), 8):
if(len(bits) - i > 8):
out_bytes.append(bits_to_byte(bits[i: i + 8]))
# convert bytes to characters
out = []
for b in out_bytes:
out.append(chr(b))
output = ''.join(out)
# strip out the first line containing the length of the message
idx = output.find('\n')
msg_len = int(output[:idx])
# ignore data after the message is complete
msg = output[idx + 1: idx + msg_len + 1]
with open("hidden_message.txt", "w") as text_file:
text_file.write(msg)
print('Hidden message:')
print(msg, '\n')
print('Hidden message saved as "hidden_message.txt"\n')
def nth_bit_present(my_byte, n):
# bitwise check to see what the nth bit is
# if anything other than 0, it is TRUE else FALSE
return (my_byte & (1 << n)) != 0
def set_final_bit(my_byte, ends_in_one):
new_byte = 0
if ends_in_one:
if(nth_bit_present(my_byte, 0)):
# byte already ends in 1
new_byte = my_byte
else:
new_byte = my_byte + 1
else:
if(nth_bit_present(my_byte, 0)):
new_byte = my_byte - 1
else:
# byte already ends in 0
new_byte = my_byte
return new_byte
def bits_to_byte(bits):
# convert 8 bits into 1 byte
assert len(bits) == 8
new_byte = 0
for i in range(8):
if bits[i]:
# this bit == 1 and the "position" we are at in the byte is 7-i
# bitwise OR will insert a 1 a this position
new_byte |= 1 << 7 - i
else:
# this bit == 0 and the "position" we are at in the byte is 7-i
# bitwise OR will insert a 0 a this position
new_byte |= 0 << 7 - i
return new_byte
| [
"[email protected]"
] | |
a69f66bef3361f7d39e7ffefadee31214bd809e1 | 3a8fd98b56e6f7188963b6fb04838f29ef8eab58 | /5.seq2seq模型的实现/decoder/__init__.py | bcffe23b8f7751c894db068a41756f12356a5cd2 | [] | no_license | sunshinewhy/Pytorch_Learning_Notes | a5d6790ec67a1824271250710caf2fe2f0267d2b | 3cf67a01fce023952514863f5ed072d984746aa3 | refs/heads/master | 2023-08-30T09:37:51.813355 | 2021-10-28T06:28:52 | 2021-10-28T06:28:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | # -*- coding: utf-8 -*-
# date: 2021/10/25
# Project: Pytorch学习
# File Name: __init__.py.py
# Description:
# Author: Anefuer_kpl
# Email: [email protected]
| [
"[email protected]"
] | |
6610629d54b2c5ac59da26cba068dd2d1f43a0f7 | 485c7e76b32df2255b70534f93f1865f556ddb7f | /worksheet/migrations/0002_todo_reminder.py | 58d7b84938c72608be6d7b7d189760d7d0a743cb | [] | no_license | srrobin/personal-book | ec0c07b95d0e24946950238a8aa2b5ac41eabfa2 | 82add4f2de22f88ba9b822ba4e0ecc727554111f | refs/heads/master | 2022-11-30T07:37:06.978056 | 2019-09-10T05:38:02 | 2019-09-10T05:38:02 | 207,211,267 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | # Generated by Django 2.2.3 on 2019-07-23 17:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('worksheet', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='todo',
name='reminder',
field=models.TimeField(null=True),
),
]
| [
"[email protected]"
] | |
2692296396f33700f74d1e69204a5124a8b12e1e | 2dea008817e368a4ef97fc514e7c7ede62a529c3 | /app/libs/models.py | c5cb96e16c2384553d8ba0df9913322a528368ed | [] | no_license | ChuckWoodraska/IP-Query | 4fbf50ea21d693b1cc2df320b4640bb9e5e6b08c | 4dd2796cca946218c0d720f722c74157472a9e56 | refs/heads/master | 2022-12-10T07:03:57.561732 | 2019-06-17T00:07:15 | 2019-06-17T00:07:15 | 192,244,817 | 0 | 0 | null | 2022-12-08T05:47:27 | 2019-06-16T23:05:55 | Python | UTF-8 | Python | false | false | 3,935 | py | from typing import Callable
from app.database import db
class BaseModel(db.Model):
__abstract__ = True
@staticmethod
def create(new_entry, commit=True):
try:
db.session.add(new_entry)
if commit:
db.session.commit()
return new_entry.id
except Exception as e:
print("c", e)
return False
@classmethod
def read(cls, id_) -> Callable[..., "BaseModel"]:
return BaseModel.query.get(id_)
def update(self, commit=True):
try:
if commit:
db.session.commit()
return self.id
except Exception as e:
print("u", e)
return False
def delete(self):
try:
db.session.delete(self)
db.session.commit()
return self.id
except Exception as e:
print("d", e)
return False
@staticmethod
def commit():
db.session.commit()
return True
@staticmethod
def object_dump(obj_name, obj_inst):
def dig_deep(prop_value):
dd_str = prop_value
if (
type(prop_value).__str__ is object.__str__
and not isinstance(prop_value, str)
and not isinstance(prop_value, dict)
):
dd_str = BaseModel.object_dump(
prop_value.__class__.__name__, prop_value
)
return str(dd_str)
obj_vars = sorted(
[
x
for x in tuple(set(obj_inst.__dict__))
if not x.startswith("__") and not x.startswith("_sa_instance_state")
]
)
return "{}({})".format(
obj_name,
", ".join(
[
"{}={}".format(var, dig_deep(getattr(obj_inst, var)))
for var in obj_vars
]
),
)
def __repr__(self):
obj_vars = sorted(
[
x
for x in tuple(set(self.__dict__))
if not x.startswith("__") and x != "_sa_instance_state"
]
)
return "{}({})".format(
self.__class__.__name__,
", ".join(["{}={}".format(var, getattr(self, var)) for var in obj_vars]),
)
def serialize(self):
fields = {}
for key, value in self.__dict__.items():
if not key.startswith("_") and key != "metadata":
fields[key] = value
return fields
class Ips(BaseModel):
__tablename__ = "ips"
id = db.Column(
"id", db.Integer, primary_key=True, unique=True, index=True, autoincrement=True
)
ip_address = db.Column("ip_address", db.String(255), index=True)
continent_code = db.Column("continent_code", db.String(255))
continent_name = db.Column("continent_name", db.String(255))
country_code = db.Column("country_code", db.String(255))
country_name = db.Column("country_name", db.String(255))
region_code = db.Column("region_code", db.String(255))
region_name = db.Column("region_name", db.String(255))
city = db.Column("city", db.String(255))
zip = db.Column("zip", db.String(255))
latitude = db.Column("latitude", db.Float)
longitude = db.Column("longitude", db.Float)
rdap_handle = db.Column("rdap_handle", db.String(255))
rdap_name = db.Column("rdap_name", db.String(255))
rdap_type = db.Column("rdap_type", db.String(255))
rdap_start_address = db.Column("rdap_start_address", db.String(255))
rdap_end_address = db.Column("rdap_end_address", db.String(255))
rdap_registrant_handle = db.Column("rdap_registrant_handle", db.String(255))
rdap_registrant_description = db.Column("rdap_registrant_description", db.Text)
@classmethod
def read(cls, id_) -> "Ips":
return Ips.query.get(id_)
| [
"[email protected]"
] | |
bf706e030428e195da2f4dd5bd22ce03fdae05e3 | c9429024ec31ada460686dca1977678c20bd312c | /library/imdb/parser/http/__init__.py | ade424fd6b9ae135b70bec3267e46da5c19e9bce | [] | no_license | FrozenCow/CouchPotato | 1bd43146f19354572f49b524fa36f012c35ddd77 | d5e6669bbee5792c127d2e6a0998fb6de6da62bc | refs/heads/master | 2021-01-18T08:30:06.000754 | 2010-11-02T22:16:28 | 2010-11-02T22:16:28 | 1,046,437 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 34,163 | py | """
parser.http package (imdb package).
This package provides the IMDbHTTPAccessSystem class used to access
IMDb's data through the web interface.
the imdb.IMDb function will return an instance of this class when
called with the 'accessSystem' argument set to "http" or "web"
or "html" (this is the default).
Copyright 2004-2010 Davide Alberani <[email protected]>
2008 H. Turgut Uyar <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import sys
import logging
from urllib import FancyURLopener, quote_plus
from codecs import lookup
from imdb import IMDbBase, imdbURL_movie_main, imdbURL_person_main, \
imdbURL_character_main, imdbURL_company_main, \
imdbURL_keyword_main, imdbURL_find, imdbURL_top250, \
imdbURL_bottom100
from imdb.utils import analyze_title
from imdb._exceptions import IMDbDataAccessError, IMDbParserError
import searchMovieParser
import searchPersonParser
import searchCharacterParser
import searchCompanyParser
import searchKeywordParser
import movieParser
import personParser
import characterParser
import companyParser
import topBottomParser
# Logger for miscellaneous functions.
_aux_logger = logging.getLogger('imdbpy.parser.http.aux')
IN_GAE = False
try:
import google.appengine
IN_GAE = True
_aux_logger.info('IMDbPY is running in the Google App Engine environment')
except ImportError:
pass
class _ModuleProxy:
"""A proxy to instantiate and access parsers."""
def __init__(self, module, defaultKeys=None, oldParsers=False,
useModule=None, fallBackToNew=False):
"""Initialize a proxy for the given module; defaultKeys, if set,
muste be a dictionary of values to set for instanced objects."""
if oldParsers or fallBackToNew:
_aux_logger.warn('The old set of parsers was removed; falling ' \
'back to the new parsers.')
self.useModule = useModule
if defaultKeys is None:
defaultKeys = {}
self._defaultKeys = defaultKeys
self._module = module
def __getattr__(self, name):
"""Called only when no look-up is found."""
_sm = self._module
# Read the _OBJECTS dictionary to build the asked parser.
if name in _sm._OBJECTS:
_entry = _sm._OBJECTS[name]
# Initialize the parser.
kwds = {}
if self.useModule:
kwds = {'useModule': self.useModule}
parserClass = _entry[0][0]
obj = parserClass(**kwds)
attrsToSet = self._defaultKeys.copy()
attrsToSet.update(_entry[1] or {})
# Set attribute to the object.
for key in attrsToSet:
setattr(obj, key, attrsToSet[key])
setattr(self, name, obj)
return obj
return getattr(_sm, name)
PY_VERSION = sys.version_info[:2]
# The cookies for the "adult" search.
# Please don't mess with these account.
# Old 'IMDbPY' account.
_old_cookie_id = 'boM2bYxz9MCsOnH9gZ0S9QHs12NWrNdApxsls1Vb5/NGrNdjcHx3dUas10UASoAjVEvhAbGagERgOpNkAPvxdbfKwaV2ikEj9SzXY1WPxABmDKQwdqzwRbM+12NSeJFGUEx3F8as10WwidLzVshDtxaPIbP13NdjVS9UZTYqgTVGrNcT9vyXU1'
_old_cookie_uu = '3M3AXsquTU5Gur/Svik+ewflPm5Rk2ieY3BIPlLjyK3C0Dp9F8UoPgbTyKiGtZp4x1X+uAUGKD7BM2g+dVd8eqEzDErCoYvdcvGLvVLAen1y08hNQtALjVKAe+1hM8g9QbNonlG1/t4S82ieUsBbrSIQbq1yhV6tZ6ArvSbA7rgHc8n5AdReyAmDaJ5Wm/ee3VDoCnGj/LlBs2ieUZNorhHDKK5Q=='
# New 'IMDbPYweb' account.
_cookie_id = 'rH1jNAkjTlNXvHolvBVBsgaPICNZbNdjVjzFwzas9JRmusdjVoqBs/Hs12NR+1WFxEoR9bGKEDUg6sNlADqXwkas12N131Rwdb+UQNGKN8PWrNdjcdqBQVLq8mbGDHP3hqzxhbD692NQi9D0JjpBtRaPIbP1zNdjUOqENQYv1ADWrNcT9vyXU1'
_cookie_uu = 'su4/m8cho4c6HP+W1qgq6wchOmhnF0w+lIWvHjRUPJ6nRA9sccEafjGADJ6hQGrMd4GKqLcz2X4z5+w+M4OIKnRn7FpENH7dxDQu3bQEHyx0ZEyeRFTPHfQEX03XF+yeN1dsPpcXaqjUZAw+lGRfXRQEfz3RIX9IgVEffdBAHw2wQXyf9xdMPrQELw0QNB8dsffsqcdQemjPB0w+moLcPh0JrKrHJ9hjBzdMPpcXTH7XRwwOk='
class _FakeURLOpener(object):
"""Fake URLOpener object, used to return empty strings instead of
errors.
"""
def __init__(self, url, headers):
self.url = url
self.headers = headers
def read(self, *args, **kwds): return ''
def close(self, *args, **kwds): pass
def info(self, *args, **kwds): return self.headers
class IMDbURLopener(FancyURLopener):
"""Fetch web pages and handle errors."""
_logger = logging.getLogger('imdbpy.parser.http.urlopener')
def __init__(self, *args, **kwargs):
self._last_url = u''
FancyURLopener.__init__(self, *args, **kwargs)
# Headers to add to every request.
# XXX: IMDb's web server doesn't like urllib-based programs,
# so lets fake to be Mozilla.
# Wow! I'm shocked by my total lack of ethic! <g>
for header in ('User-Agent', 'User-agent', 'user-agent'):
self.del_header(header)
self.set_header('User-Agent', 'Mozilla/5.0')
# XXX: This class is used also to perform "Exact Primary
# [Title|Name]" searches, and so by default the cookie is set.
c_header = 'id=%s; uu=%s' % (_cookie_id, _cookie_uu)
self.set_header('Cookie', c_header)
def get_proxy(self):
"""Return the used proxy, or an empty string."""
return self.proxies.get('http', '')
def set_proxy(self, proxy):
"""Set the proxy."""
if not proxy:
if self.proxies.has_key('http'):
del self.proxies['http']
else:
if not proxy.lower().startswith('http://'):
proxy = 'http://%s' % proxy
self.proxies['http'] = proxy
def set_header(self, header, value, _overwrite=True):
"""Set a default header."""
if _overwrite:
self.del_header(header)
self.addheaders.append((header, value))
def del_header(self, header):
"""Remove a default header."""
for index in xrange(len(self.addheaders)):
if self.addheaders[index][0] == header:
del self.addheaders[index]
break
def retrieve_unicode(self, url, size=-1):
"""Retrieves the given URL, and returns a unicode string,
trying to guess the encoding of the data (assuming latin_1
by default)"""
encode = None
try:
if size != -1:
self.set_header('Range', 'bytes=0-%d' % size)
uopener = self.open(url)
kwds = {}
if PY_VERSION > (2, 3) and not IN_GAE:
kwds['size'] = size
content = uopener.read(**kwds)
self._last_url = uopener.url
# Maybe the server is so nice to tell us the charset...
server_encode = uopener.info().getparam('charset')
# Otherwise, look at the content-type HTML meta tag.
if server_encode is None and content:
first_bytes = content[:512]
begin_h = first_bytes.find('text/html; charset=')
if begin_h != -1:
end_h = first_bytes[19+begin_h:].find('"')
if end_h != -1:
server_encode = first_bytes[19+begin_h:19+begin_h+end_h]
if server_encode:
try:
if lookup(server_encode):
encode = server_encode
except (LookupError, ValueError, TypeError):
pass
uopener.close()
if size != -1:
self.del_header('Range')
self.close()
except IOError, e:
if size != -1:
# Ensure that the Range header is removed.
self.del_header('Range')
raise IMDbDataAccessError, {'errcode': e.errno,
'errmsg': str(e.strerror),
'url': url,
'proxy': self.get_proxy(),
'exception type': 'IOError',
'original exception': e}
if encode is None:
encode = 'latin_1'
# The detection of the encoding is error prone...
self._logger.warn('Unable to detect the encoding of the retrieved '
'page [%s]; falling back to default latin1.', encode)
##print unicode(content, encode, 'replace').encode('utf8')
return unicode(content, encode, 'replace')
def http_error_default(self, url, fp, errcode, errmsg, headers):
if errcode == 404:
self._logger.warn('404 code returned for %s: %s (headers: %s)',
url, errmsg, headers)
return _FakeURLOpener(url, headers)
raise IMDbDataAccessError, {'url': 'http:%s' % url,
'errcode': errcode,
'errmsg': errmsg,
'headers': headers,
'error type': 'http_error_default',
'proxy': self.get_proxy()}
def open_unknown(self, fullurl, data=None):
raise IMDbDataAccessError, {'fullurl': fullurl,
'data': str(data),
'error type': 'open_unknown',
'proxy': self.get_proxy()}
def open_unknown_proxy(self, proxy, fullurl, data=None):
raise IMDbDataAccessError, {'proxy': str(proxy),
'fullurl': fullurl,
'error type': 'open_unknown_proxy',
'data': str(data)}
class IMDbHTTPAccessSystem(IMDbBase):
"""The class used to access IMDb's data through the web."""
accessSystem = 'http'
_http_logger = logging.getLogger('imdbpy.parser.http')
def __init__(self, isThin=0, adultSearch=1, proxy=-1, oldParsers=False,
fallBackToNew=False, useModule=None, cookie_id=-1,
cookie_uu=None, *arguments, **keywords):
"""Initialize the access system."""
IMDbBase.__init__(self, *arguments, **keywords)
self.urlOpener = IMDbURLopener()
# When isThin is set, we're parsing the "maindetails" page
# of a movie (instead of the "combined" page) and movie/person
# references are not collected if no defaultModFunct is provided.
self.isThin = isThin
self._getRefs = True
self._mdparse = False
if isThin:
if self.accessSystem == 'http':
self.accessSystem = 'httpThin'
self._mdparse = True
if self._defModFunct is None:
self._getRefs = False
from imdb.utils import modNull
self._defModFunct = modNull
self.do_adult_search(adultSearch)
if cookie_id != -1:
if cookie_id is None:
self.del_cookies()
elif cookie_uu is not None:
self.set_cookies(cookie_id, cookie_uu)
if proxy != -1:
self.set_proxy(proxy)
if useModule is not None:
if not isinstance(useModule, (list, tuple)) and ',' in useModule:
useModule = useModule.split(',')
_def = {'_modFunct': self._defModFunct, '_as': self.accessSystem}
# Proxy objects.
self.smProxy = _ModuleProxy(searchMovieParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.spProxy = _ModuleProxy(searchPersonParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.scProxy = _ModuleProxy(searchCharacterParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.scompProxy = _ModuleProxy(searchCompanyParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.skProxy = _ModuleProxy(searchKeywordParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.mProxy = _ModuleProxy(movieParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.pProxy = _ModuleProxy(personParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.cProxy = _ModuleProxy(characterParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.compProxy = _ModuleProxy(companyParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.topBottomProxy = _ModuleProxy(topBottomParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
def _normalize_movieID(self, movieID):
"""Normalize the given movieID."""
try:
return '%07d' % int(movieID)
except ValueError, e:
raise IMDbParserError, 'invalid movieID "%s": %s' % (movieID, e)
def _normalize_personID(self, personID):
"""Normalize the given personID."""
try:
return '%07d' % int(personID)
except ValueError, e:
raise IMDbParserError, 'invalid personID "%s": %s' % (personID, e)
def _normalize_characterID(self, characterID):
"""Normalize the given characterID."""
try:
return '%07d' % int(characterID)
except ValueError, e:
raise IMDbParserError, 'invalid characterID "%s": %s' % \
(characterID, e)
def _normalize_companyID(self, companyID):
"""Normalize the given companyID."""
try:
return '%07d' % int(companyID)
except ValueError, e:
raise IMDbParserError, 'invalid companyID "%s": %s' % \
(companyID, e)
def get_imdbMovieID(self, movieID):
"""Translate a movieID in an imdbID; in this implementation
the movieID _is_ the imdbID.
"""
return movieID
def get_imdbPersonID(self, personID):
"""Translate a personID in an imdbID; in this implementation
the personID _is_ the imdbID.
"""
return personID
def get_imdbCharacterID(self, characterID):
"""Translate a characterID in an imdbID; in this implementation
the characterID _is_ the imdbID.
"""
return characterID
def get_imdbCompanyID(self, companyID):
"""Translate a companyID in an imdbID; in this implementation
the companyID _is_ the imdbID.
"""
return companyID
def get_proxy(self):
"""Return the used proxy or an empty string."""
return self.urlOpener.get_proxy()
def set_proxy(self, proxy):
"""Set the web proxy to use.
It should be a string like 'http://localhost:8080/'; if the
string is empty, no proxy will be used.
If set, the value of the environment variable HTTP_PROXY is
automatically used.
"""
self.urlOpener.set_proxy(proxy)
def set_cookies(self, cookie_id, cookie_uu):
"""Set a cookie to access an IMDb's account."""
c_header = 'id=%s; uu=%s' % (cookie_id, cookie_uu)
self.urlOpener.set_header('Cookie', c_header)
def del_cookies(self):
"""Remove the used cookie."""
self.urlOpener.del_header('Cookie')
def do_adult_search(self, doAdult,
cookie_id=_cookie_id, cookie_uu=_cookie_uu):
"""If doAdult is true, 'adult' movies are included in the
search results; cookie_id and cookie_uu are optional
parameters to select a specific account (see your cookie
or cookies.txt file."""
if doAdult:
self.set_cookies(cookie_id, cookie_uu)
#c_header = 'id=%s; uu=%s' % (cookie_id, cookie_uu)
#self.urlOpener.set_header('Cookie', c_header)
else:
self.urlOpener.del_header('Cookie')
def _retrieve(self, url, size=-1):
"""Retrieve the given URL."""
##print url
self._http_logger.debug('fetching url %s (size: %d)', url, size)
return self.urlOpener.retrieve_unicode(url, size=size)
def _get_search_content(self, kind, ton, results):
"""Retrieve the web page for a given search.
kind can be 'tt' (for titles), 'nm' (for names),
'char' (for characters) or 'co' (for companies).
ton is the title or the name to search.
results is the maximum number of results to be retrieved."""
if isinstance(ton, unicode):
ton = ton.encode('utf-8')
##params = 'q=%s&%s=on&mx=%s' % (quote_plus(ton), kind, str(results))
params = 's=%s;mx=%s;q=%s' % (kind, str(results), quote_plus(ton))
if kind == 'ep':
params = params.replace('s=ep;', 's=tt;ttype=ep;', 1)
cont = self._retrieve(imdbURL_find % params)
#print 'URL:', imdbURL_find % params
if cont.find('Your search returned more than') == -1 or \
cont.find("displayed the exact matches") == -1:
return cont
# The retrieved page contains no results, because too many
# titles or names contain the string we're looking for.
params = 's=%s;q=%s;lm=0' % (kind, quote_plus(ton))
size = 22528 + results * 512
return self._retrieve(imdbURL_find % params, size=size)
def _search_movie(self, title, results):
# The URL of the query.
# XXX: To retrieve the complete results list:
# params = urllib.urlencode({'more': 'tt', 'q': title})
##params = urllib.urlencode({'tt': 'on','mx': str(results),'q': title})
##params = 'q=%s&tt=on&mx=%s' % (quote_plus(title), str(results))
##cont = self._retrieve(imdbURL_find % params)
cont = self._get_search_content('tt', title, results)
return self.smProxy.search_movie_parser.parse(cont, results=results)['data']
def _search_episode(self, title, results):
t_dict = analyze_title(title)
if t_dict['kind'] == 'episode':
title = t_dict['title']
cont = self._get_search_content('ep', title, results)
return self.smProxy.search_movie_parser.parse(cont, results=results)['data']
def get_movie_main(self, movieID):
if not self.isThin:
cont = self._retrieve(imdbURL_movie_main % movieID + 'combined')
else:
cont = self._retrieve(imdbURL_movie_main % movieID + 'maindetails')
return self.mProxy.movie_parser.parse(cont, mdparse=self._mdparse)
def get_movie_full_credits(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'fullcredits')
return self.mProxy.movie_parser.parse(cont)
def get_movie_plot(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'plotsummary')
return self.mProxy.plot_parser.parse(cont, getRefs=self._getRefs)
def get_movie_awards(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'awards')
return self.mProxy.movie_awards_parser.parse(cont)
def get_movie_taglines(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'taglines')
return self.mProxy.taglines_parser.parse(cont)
def get_movie_keywords(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'keywords')
return self.mProxy.keywords_parser.parse(cont)
def get_movie_alternate_versions(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'alternateversions')
return self.mProxy.alternateversions_parser.parse(cont,
getRefs=self._getRefs)
def get_movie_crazy_credits(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'crazycredits')
return self.mProxy.crazycredits_parser.parse(cont,
getRefs=self._getRefs)
def get_movie_goofs(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'goofs')
return self.mProxy.goofs_parser.parse(cont, getRefs=self._getRefs)
def get_movie_quotes(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'quotes')
return self.mProxy.quotes_parser.parse(cont, getRefs=self._getRefs)
def get_movie_release_dates(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'releaseinfo')
ret = self.mProxy.releasedates_parser.parse(cont)
ret['info sets'] = ('release dates', 'akas')
return ret
get_movie_akas = get_movie_release_dates
def get_movie_vote_details(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'ratings')
return self.mProxy.ratings_parser.parse(cont)
def get_movie_official_sites(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'officialsites')
return self.mProxy.officialsites_parser.parse(cont)
def get_movie_trivia(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'trivia')
return self.mProxy.trivia_parser.parse(cont, getRefs=self._getRefs)
def get_movie_connections(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'movieconnections')
return self.mProxy.connections_parser.parse(cont)
def get_movie_technical(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'technical')
return self.mProxy.tech_parser.parse(cont)
def get_movie_business(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'business')
return self.mProxy.business_parser.parse(cont, getRefs=self._getRefs)
def get_movie_literature(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'literature')
return self.mProxy.literature_parser.parse(cont)
def get_movie_locations(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'locations')
return self.mProxy.locations_parser.parse(cont)
def get_movie_soundtrack(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'soundtrack')
return self.mProxy.soundtrack_parser.parse(cont)
def get_movie_dvd(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'dvd')
return self.mProxy.dvd_parser.parse(cont, getRefs=self._getRefs)
def get_movie_recommendations(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'recommendations')
return self.mProxy.rec_parser.parse(cont)
def get_movie_external_reviews(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'externalreviews')
return self.mProxy.externalrev_parser.parse(cont)
def get_movie_newsgroup_reviews(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'newsgroupreviews')
return self.mProxy.newsgrouprev_parser.parse(cont)
def get_movie_misc_sites(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'miscsites')
return self.mProxy.misclinks_parser.parse(cont)
def get_movie_sound_clips(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'soundsites')
return self.mProxy.soundclips_parser.parse(cont)
def get_movie_video_clips(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'videosites')
return self.mProxy.videoclips_parser.parse(cont)
def get_movie_photo_sites(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'photosites')
return self.mProxy.photosites_parser.parse(cont)
def get_movie_news(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'news')
return self.mProxy.news_parser.parse(cont, getRefs=self._getRefs)
def get_movie_amazon_reviews(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'amazon')
return self.mProxy.amazonrev_parser.parse(cont)
def get_movie_guests(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'epcast')
return self.mProxy.episodes_cast_parser.parse(cont)
get_movie_episodes_cast = get_movie_guests
def get_movie_merchandising_links(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'sales')
return self.mProxy.sales_parser.parse(cont)
def get_movie_episodes(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'episodes')
data_d = self.mProxy.episodes_parser.parse(cont)
# set movie['episode of'].movieID for every episode of the series.
if data_d.get('data', {}).has_key('episodes'):
nr_eps = 0
for season in data_d['data']['episodes'].values():
for episode in season.values():
episode['episode of'].movieID = movieID
nr_eps += 1
# Number of episodes.
if nr_eps:
data_d['data']['number of episodes'] = nr_eps
return data_d
def get_movie_episodes_rating(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'epdate')
data_d = self.mProxy.eprating_parser.parse(cont)
# set movie['episode of'].movieID for every episode.
if data_d.get('data', {}).has_key('episodes rating'):
for item in data_d['data']['episodes rating']:
episode = item['episode']
episode['episode of'].movieID = movieID
return data_d
def get_movie_faqs(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'faq')
return self.mProxy.movie_faqs_parser.parse(cont, getRefs=self._getRefs)
def get_movie_airing(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'tvschedule')
return self.mProxy.airing_parser.parse(cont)
get_movie_tv_schedule = get_movie_airing
def get_movie_synopsis(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'synopsis')
return self.mProxy.synopsis_parser.parse(cont)
def get_movie_parents_guide(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'parentalguide')
return self.mProxy.parentsguide_parser.parse(cont)
def _search_person(self, name, results):
# The URL of the query.
# XXX: To retrieve the complete results list:
# params = urllib.urlencode({'more': 'nm', 'q': name})
##params = urllib.urlencode({'nm': 'on', 'mx': str(results), 'q': name})
#params = 'q=%s&nm=on&mx=%s' % (quote_plus(name), str(results))
#cont = self._retrieve(imdbURL_find % params)
cont = self._get_search_content('nm', name, results)
return self.spProxy.search_person_parser.parse(cont, results=results)['data']
def get_person_main(self, personID):
cont = self._retrieve(imdbURL_person_main % personID + 'maindetails')
ret = self.pProxy.maindetails_parser.parse(cont)
ret['info sets'] = ('main', 'filmography')
return ret
def get_person_filmography(self, personID):
return self.get_person_main(personID)
def get_person_biography(self, personID):
cont = self._retrieve(imdbURL_person_main % personID + 'bio')
return self.pProxy.bio_parser.parse(cont, getRefs=self._getRefs)
def get_person_awards(self, personID):
cont = self._retrieve(imdbURL_person_main % personID + 'awards')
return self.pProxy.person_awards_parser.parse(cont)
def get_person_other_works(self, personID):
cont = self._retrieve(imdbURL_person_main % personID + 'otherworks')
return self.pProxy.otherworks_parser.parse(cont, getRefs=self._getRefs)
#def get_person_agent(self, personID):
# cont = self._retrieve(imdbURL_person_main % personID + 'agent')
# return self.pProxy.agent_parser.parse(cont)
def get_person_publicity(self, personID):
cont = self._retrieve(imdbURL_person_main % personID + 'publicity')
return self.pProxy.publicity_parser.parse(cont)
def get_person_official_sites(self, personID):
cont = self._retrieve(imdbURL_person_main % personID + 'officialsites')
return self.pProxy.person_officialsites_parser.parse(cont)
def get_person_news(self, personID):
cont = self._retrieve(imdbURL_person_main % personID + 'news')
return self.pProxy.news_parser.parse(cont)
def get_person_episodes(self, personID):
cont = self._retrieve(imdbURL_person_main % personID + 'filmoseries')
return self.pProxy.person_series_parser.parse(cont)
def get_person_merchandising_links(self, personID):
cont = self._retrieve(imdbURL_person_main % personID + 'forsale')
return self.pProxy.sales_parser.parse(cont)
def get_person_genres_links(self, personID):
cont = self._retrieve(imdbURL_person_main % personID + 'filmogenre')
return self.pProxy.person_genres_parser.parse(cont)
def get_person_keywords_links(self, personID):
cont = self._retrieve(imdbURL_person_main % personID + 'filmokey')
return self.pProxy.person_keywords_parser.parse(cont)
def _search_character(self, name, results):
cont = self._get_search_content('char', name, results)
return self.scProxy.search_character_parser.parse(cont, results=results)['data']
def get_character_main(self, characterID):
cont = self._retrieve(imdbURL_character_main % characterID)
ret = self.cProxy.character_main_parser.parse(cont)
ret['info sets'] = ('main', 'filmography')
return ret
get_character_filmography = get_character_main
def get_character_biography(self, characterID):
cont = self._retrieve(imdbURL_character_main % characterID + 'bio')
return self.cProxy.character_bio_parser.parse(cont,
getRefs=self._getRefs)
def get_character_episodes(self, characterID):
cont = self._retrieve(imdbURL_character_main % characterID +
'filmoseries')
return self.cProxy.character_series_parser.parse(cont)
def get_character_quotes(self, characterID):
cont = self._retrieve(imdbURL_character_main % characterID + 'quotes')
return self.cProxy.character_quotes_parser.parse(cont,
getRefs=self._getRefs)
def _search_company(self, name, results):
cont = self._get_search_content('co', name, results)
url = self.urlOpener._last_url
return self.scompProxy.search_company_parser.parse(cont, url=url,
results=results)['data']
def get_company_main(self, companyID):
cont = self._retrieve(imdbURL_company_main % companyID)
ret = self.compProxy.company_main_parser.parse(cont)
return ret
def _search_keyword(self, keyword, results):
# XXX: the IMDb web server seems to have some serious problem with
# non-ascii keyword.
# E.g.: http://akas.imdb.com/keyword/fianc%E9/
# will return a 500 Internal Server Error: Redirect Recursion.
keyword = keyword.encode('utf8', 'ignore')
try:
cont = self._get_search_content('kw', keyword, results)
except IMDbDataAccessError:
self._http_logger.warn('unable to search for keyword %s', keyword,
exc_info=True)
return []
return self.skProxy.search_keyword_parser.parse(cont, results=results)['data']
def _get_keyword(self, keyword, results):
keyword = keyword.encode('utf8', 'ignore')
try:
cont = self._retrieve(imdbURL_keyword_main % keyword)
except IMDbDataAccessError:
self._http_logger.warn('unable to get keyword %s', keyword,
exc_info=True)
return []
return self.skProxy.search_moviekeyword_parser.parse(cont, results=results)['data']
def _get_top_bottom_movies(self, kind):
if kind == 'top':
parser = self.topBottomProxy.top250_parser
url = imdbURL_top250
elif kind == 'bottom':
parser = self.topBottomProxy.bottom100_parser
url = imdbURL_bottom100
else:
return []
cont = self._retrieve(url)
return parser.parse(cont)['data']
| [
"[email protected]"
] | |
da80f0b87d1b6ddfae31337351eca0f9c9c2d213 | 1e50f1643376039ca988d909e79f528e01fa1371 | /leetcode/editor/cn/1137.第-n-个泰波那契数.py | a777ba13f377e8e91336da1a7cfec2c1d4ed6ce6 | [] | no_license | mahatmaWM/leetcode | 482a249e56e2121f4896e34c58d9fa44d6d0034b | 4f41dad6a38d3cac1c32bc1f157e20aa14eab9be | refs/heads/master | 2022-09-04T17:53:54.832210 | 2022-08-06T07:29:46 | 2022-08-06T07:29:46 | 224,415,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,390 | py | #
# @lc app=leetcode.cn id=1137 lang=python3
#
# [1137] 第 N 个泰波那契数
#
# https://leetcode-cn.com/problems/n-th-tribonacci-number/description/
#
# algorithms
# Easy (52.68%)
# Likes: 39
# Dislikes: 0
# Total Accepted: 15.2K
# Total Submissions: 28.8K
# Testcase Example: '4'
#
# 泰波那契序列 Tn 定义如下:
#
# T0 = 0, T1 = 1, T2 = 1, 且在 n >= 0 的条件下 Tn+3 = Tn + Tn+1 + Tn+2
#
# 给你整数 n,请返回第 n 个泰波那契数 Tn 的值。
#
#
#
# 示例 1:
#
# 输入:n = 4
# 输出:4
# 解释:
# T_3 = 0 + 1 + 1 = 2
# T_4 = 1 + 1 + 2 = 4
#
#
# 示例 2:
#
# 输入:n = 25
# 输出:1389537
#
#
#
#
# 提示:
#
#
# 0 <= n <= 37
# 答案保证是一个 32 位整数,即 answer <= 2^31 - 1。
#
#
#
# @lc code=start
class Solution:
# 备忘录加速
def tribonacci(self, n: int) -> int:
if n == 0: return 0
if n == 1 or n == 2: return 1
res = [0] * (n + 1)
res[1] = 1
res[2] = 1
for i in range(3, n + 1):
res[i] = res[i - 1] + res[i - 2] + res[i - 3]
return res[n]
class Solution:
# 递归解法会超时(重复计算太多)
def tribonacci(self, n: int) -> int:
if n == 0: return 0
if n == 1 or n == 2: return 1
return self.tribonacci(n - 1) + self.tribonacci(n - 2) + self.tribonacci(n - 3)
# @lc code=end
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.