id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
4922898
|
<reponame>mononobi/charma-server
# -*- coding: utf-8 -*-
"""
streaming manager module.
"""
import os
import time
from time import sleep
from flask import send_from_directory
import pyrin.globalization.datetime.services as datetime_services
import pyrin.configuration.services as config_services
import pyrin.utils.path as path_utils
from pyrin.core.globals import _
from pyrin.core.structs import Manager, Context
import charma.movies.services as movie_services
import charma.movies.collector.services as movie_collector_services
import charma.movies.root.services as movie_root_services
import charma.subtitles.services as subtitle_services
from charma.streaming import StreamingPackage
from charma.streaming.enumerations import TranscodingStatusEnum, StreamProviderEnum
from charma.streaming.interface import AbstractStreamProvider
from charma.streaming.exceptions import StreamDirectoryNotExistedError, \
InvalidTranscodingStatusError, InvalidStreamProviderTypeError, \
StreamProviderDoesNotExistError, DuplicateStreamProviderError, StreamDoesNotExistError, \
MovieDirectoryNotFoundError, MultipleMovieDirectoriesFoundError, MovieFileNotFoundError, \
MultipleMovieFilesFoundError
class StreamingManager(Manager):
"""
streaming manager class.
"""
package_class = StreamingPackage
# how many seconds to wait for manifest file on each try.
INTERVAL = 1
# how many times to check for manifest file creation before giving up.
RETRY = 20
def __init__(self):
"""
initializes an instance of StreamingManager.
"""
super().__init__()
# a dict containing all registered stream providers. in the form of:
# {str name: AbstractStreamProvider instance}
self._providers = Context()
self._threads = config_services.get('streaming', 'transcoding', 'threads')
self._preset = config_services.get('streaming', 'transcoding', 'preset')
self._stream_directory = config_services.get('streaming', 'general', 'directory')
self._create_stream_directory(self._stream_directory)
def _create_stream_directory(self, directory):
"""
creates the given stream directory.
:param str directory: stream directory path.
"""
path_utils.create_directory(directory, ignore_existed=True)
def _is_failed(self, directory):
"""
gets a value indicating that given stream directory transcoding is failed.
:param str directory: directory path of stream.
:rtype: bool
"""
failed_file = self._get_status_file_name(directory, TranscodingStatusEnum.FAILED)
return os.path.exists(failed_file)
def _is_started(self, directory):
"""
gets a value indicating that given stream directory transcoding is started.
:param str directory: directory path of stream.
:rtype: bool
"""
started_file = self._get_status_file_name(directory, TranscodingStatusEnum.STARTED)
return os.path.exists(started_file)
def _is_finished(self, directory):
"""
gets a value indicating that given stream directory transcoding is finished.
:param str directory: directory path of stream.
:rtype: bool
"""
finished_file = self._get_status_file_name(directory, TranscodingStatusEnum.FINISHED)
return os.path.exists(finished_file)
def _set_status(self, directory, status, **options):
"""
sets the transcoding status of given stream directory.
:param str directory: directory path of stream.
:param str status: status of transcoding.
:enum status:
NOT_AVAILABLE = 'not_available'
STARTED = 'started'
FINISHED = 'finished'
FAILED = 'failed'
:keyword str message: message to be written to file.
:raises StreamDirectoryNotExistedError: stream directory not existed error.
:raises InvalidTranscodingStatusError: invalid transcoding status error.
"""
if not self.exists(directory):
raise StreamDirectoryNotExistedError('Stream directory [{directory}] does not exist.'
.format(directory=directory))
if status not in TranscodingStatusEnum:
raise InvalidTranscodingStatusError('Transcoding status [{status}] is invalid.'
.format(status=status))
message = options.get('message')
file_name = self._get_status_file_name(directory, status)
with open(file_name, mode='w') as file:
now = datetime_services.get_current_timestamp()
file.write(now)
if message not in (None, ''):
message = '\n{message}'.format(message=message)
file.write(message)
def _get_status_file_name(self, directory, status):
"""
gets the file name of given status in given stream directory.
:param str directory: directory path of stream.
:param str status: status of transcoding.
:enum status:
NOT_AVAILABLE = 'not_available'
STARTED = 'started'
FINISHED = 'finished'
FAILED = 'failed'
"""
return os.path.join(directory, status)
def _get_stream_provider(self, name):
"""
gets the stream provider with given name.
it raises an error if stream provider does not exist.
:param str name: stream provider name.
:raises StreamProviderDoesNotExistError: stream provider does not exist error.
:rtype: AbstractStreamProvider
"""
if name not in self._providers:
raise StreamProviderDoesNotExistError('Stream provider with name [{name}] '
'does not exist.'.format(name=name))
return self._providers.get(name)
def _get_stream_path(self, movie_id):
"""
gets the stream path for given movie.
:param uuid.UUID movie_id: movie id.
"""
return os.path.join(self._stream_directory, str(movie_id))
def _get_movie_directory(self, movie_id, **options):
"""
gets given movie's directory path if possible.
:param uuid.UUID movie_id: movie id to get its directory path.
:keyword str directory: movie directory path.
it will only be used if more than
one directory found for given movie.
:raises MovieDirectoryNotFoundError: movie directory not found error.
:raises MultipleMovieDirectoriesFoundError: multiple movie directories found error.
:rtype: str
"""
movie = movie_services.get(movie_id)
movie_paths = movie_root_services.get_full_path(movie.directory_name)
if not movie_paths:
raise MovieDirectoryNotFoundError(_('Movie directory [{directory}] not found.')
.format(directory=movie.directory_name))
found_directory = None
if len(movie_paths) > 1:
directory = options.get('directory')
if directory in movie_paths:
found_directory = directory
else:
found_directory = movie_paths[0]
if found_directory is None:
raise MultipleMovieDirectoriesFoundError(_('Multiple movie directories '
'found for movie [{directory}].')
.format(directory=movie.directory_name))
return found_directory
def _get_movie_file(self, movie_id, directory_path, **options):
"""
gets given movie's file path if possible.
:param uuid.UUID movie_id: movie id to get its file path.
:param str directory_path: movie directory path.
:keyword str file: movie file path.
it will only be used if more than
one file found for given movie.
:raises MovieFileNotFoundError: movie file not found error.
:raises MultipleMovieFilesFoundError: multiple movie files found error.
:rtype: str
"""
movie = movie_services.get(movie_id)
movie_files = movie_collector_services.get_movie_files(directory_path,
force=movie.forced)
if not movie_files:
raise MovieFileNotFoundError(_('No movie files found for movie [{directory}].')
.format(directory=movie.directory_name))
found_file = None
if len(movie_files) > 1:
file = options.get('file')
if file in movie_files:
found_file = file
else:
found_file = movie_files[0]
if found_file is None:
raise MultipleMovieFilesFoundError(_('Multiple movie files found '
'for movie [{directory}].')
.format(directory=movie.directory_name))
return found_file
def _transcode(self, movie_id, **options):
"""
transcodes a movie file to stream directory.
it returns a tuple of two items. first item is the stream directory
path and the second item is the output file name.
if the stream is already present and is usable, it returns the available
stream and bypasses the transcoding.
:param uuid.UUID movie_id: movie id to be transcoded.
:keyword str directory: movie directory path.
it will only be used if more than
one directory found for given movie.
:keyword str file: movie file path.
it will only be used if more than
one file found for given movie.
:raises MovieDirectoryNotFoundError: movie directory not found error.
:raises MultipleMovieDirectoriesFoundError: multiple movie directories found error.
:raises MovieFileNotFoundError: movie file not found error.
:raises MultipleMovieFilesFoundError: multiple movie files found error.
:returns: tuple[str stream_directory, str output_file]
:rtype: tuple[str, str]
"""
stream_path = self._get_stream_path(movie_id)
stream = self._get_stream_provider(StreamProviderEnum.DASH)
status = self.get_status(stream_path)
if status in (TranscodingStatusEnum.STARTED,
TranscodingStatusEnum.FINISHED):
return stream_path, stream.output_file
path_utils.remove_directory(stream_path)
found_directory = self._get_movie_directory(movie_id, **options)
found_file = self._get_movie_file(movie_id, found_directory, **options)
subtitles = subtitle_services.get_subtitles(found_directory)
self._create_stream_directory(stream_path)
options.update(threads=self._threads, preset=self._preset, subtitles=subtitles)
stream.transcode(found_file, stream_path, **options)
# we have to wait here for manifest file to become available.
self._wait_for_manifest(stream_path, stream.output_file,
self.RETRY, self.INTERVAL)
return stream_path, stream.output_file
def _wait_for_manifest(self, stream_path, manifest, retry, interval):
"""
sleeps current thread and checks if manifest file is created on specific intervals.
:param str stream_path: stream directory path to look for manifest file.
:param str manifest: manifest file name.
:param int retry: number of retries to check if manifest file is created.
:param float interval: number of seconds to wait between each interval.
"""
full_path = os.path.join(stream_path, manifest)
while not path_utils.exists(full_path) and retry > 0:
retry -= 1
sleep(interval)
def _send_stream(self, stream, file, **options):
"""
sends given file from given stream to client.
:param str stream: stream directory.
:param str file: file name to be returned.
:raises StreamDoesNotExistError: stream does not exist error.
:rtype: bytes
"""
full_path = os.path.join(stream, file)
if not os.path.exists(full_path):
raise StreamDoesNotExistError(_('Stream [{stream}] does not exist.')
.format(stream=full_path))
options.update(conditional=True)
return send_from_directory(stream, file, **options)
def register_stream_provider(self, instance, **options):
"""
registers the given stream provider.
:param AbstractStreamProvider instance: stream provider instance.
:raises InvalidStreamProviderTypeError: invalid stream provider type error.
:raises DuplicateStreamProviderError: duplicate stream provider error.
"""
if not isinstance(instance, AbstractStreamProvider):
raise InvalidStreamProviderTypeError('Input parameter [{instance}] is '
'not an instance of [{base}].'
.format(instance=instance,
base=AbstractStreamProvider))
if instance.name in self._providers:
raise DuplicateStreamProviderError('There is another registered stream '
'provider with name [{name}].'
.format(name=instance.name))
self._providers[instance.name] = instance
def get_provider_names(self):
"""
gets the name of all registered stream providers.
:rtype: list[str]
"""
return list(self._providers.keys())
def exists(self, directory):
"""
gets a value indicating that given stream directory exists.
:param str directory: directory path of stream.
:raises InvalidPathError: invalid path error.
:raises PathIsNotAbsoluteError: path is not absolute error.
:rtype: bool
"""
return path_utils.exists(directory)
def get_status(self, directory):
"""
gets the transcoding status of given stream directory.
:param str directory: directory path of stream.
:rtype: str
"""
if not self.exists(directory):
return TranscodingStatusEnum.NOT_AVAILABLE
if self._is_failed(directory):
return TranscodingStatusEnum.FAILED
if self._is_finished(directory):
return TranscodingStatusEnum.FINISHED
if self._is_started(directory):
return TranscodingStatusEnum.STARTED
return TranscodingStatusEnum.NOT_AVAILABLE
def set_started(self, directory):
"""
sets the given stream as started transcoding.
:param str directory: directory path of stream.
:raises StreamDirectoryNotExistedError: stream directory not existed error.
"""
self._set_status(directory, TranscodingStatusEnum.STARTED)
def set_finished(self, directory):
"""
sets the given stream as finished transcoding.
:param str directory: directory path of stream.
:raises StreamDirectoryNotExistedError: stream directory not existed error.
"""
self._set_status(directory, TranscodingStatusEnum.FINISHED)
def set_failed(self, directory, error):
"""
sets the given stream as failed transcoding.
:param str directory: directory path of stream.
:param str error: error message.
:raises StreamDirectoryNotExistedError: stream directory not existed error.
"""
self._set_status(directory, TranscodingStatusEnum.FAILED, message=error)
def set_process_id(self, directory, process_id):
"""
sets the ffmpeg process id for given stream.
:param str directory: directory path of stream.
:param int process_id: ffmpeg process id.
:raises StreamDirectoryNotExistedError: stream directory not existed error.
"""
if not self.exists(directory):
raise StreamDirectoryNotExistedError('Stream directory [{directory}] does not exist.'
.format(directory=directory))
file_name = os.path.join(directory, 'pid')
with open(file_name, mode='w') as file:
file.write(str(process_id))
def set_access_time(self, directory):
"""
sets the last access time for given stream.
:param str directory: directory path of stream.
:raises StreamDirectoryNotExistedError: stream directory not existed error.
"""
if not self.exists(directory):
raise StreamDirectoryNotExistedError('Stream directory [{directory}] does not exist.'
.format(directory=directory))
file_name = os.path.join(directory, 'access')
with open(file_name, mode='w') as file:
file.write(str(time.time()))
def start_stream(self, movie_id, **options):
"""
starts streaming of given movie.
it returns the related manifest file of the stream.
:param uuid.UUID movie_id: movie id to be streamed.
:raises MovieDirectoryNotFoundError: movie directory not found error.
:raises MultipleMovieDirectoriesFoundError: multiple movie directories found error.
:raises MovieFileNotFoundError: movie file not found error.
:raises MultipleMovieFilesFoundError: multiple movie files found error.
:raises StreamDoesNotExistError: stream does not exist error.
:rtype: bytes
"""
directory, file = self._transcode(movie_id, **options)
self.set_access_time(directory)
return self._send_stream(directory, file)
def continue_stream(self, movie_id, file, **options):
"""
continues the streaming of given movie.
:param uuid.UUID movie_id: movie id to be streamed.
:param str file: stream file name to be returned.
:raises StreamDoesNotExistError: stream does not exist error.
:rtype: bytes
"""
directory = self._get_stream_path(movie_id)
self.set_access_time(directory)
return self._send_stream(directory, file)
|
StarcoderdataPython
|
6453787
|
# Standard Python Libraries
import logging
# Third Party Libraries
from PyQt5.QtCore import QVariant, pyqtSlot, pyqtSignal, QObject, pyqtProperty
from PyQt5.QtQml import QJSValue
# Project Libraries
from py.common.FramListModel import FramListModel
class FullSpeciesListModel(FramListModel):
def __init__(self, app=None):
super().__init__()
self._app = app
self._rpc = self._app.rpc
self.add_role_name(name="id")
self.add_role_name(name="text")
self.partition_size = 24
self.populate_model()
def populate_model(self):
""""
Method to initially populate the model on startup
"""
self.clear()
sql = """
SELECT DISPLAY_NAME, CATCH_CONTENT_ID
FROM CATCH_CONTENT_LU
WHERE CONTENT_TYPE_LU_ID = (SELECT LOOKUP_ID FROM LOOKUPS WHERE
TYPE = 'Catch Content' AND VALUE = 'Taxonomy')
ORDER BY DISPLAY_NAME asc;
"""
results = self._rpc.execute_query(sql=sql)
for result in results:
item = dict()
item["id"] = result[1]
item["text"] = result[0]
self.appendItem(item)
@pyqtSlot(int, name="getSubset", result=QVariant)
def get_subset(self, index):
"""
Method to return a subset of the FramListModel for use in a SwipeView for the fullSpecieslist
:param index:
:return:
"""
return self.items[index * self.partition_size: (index+1) * self.partition_size]
class Hooks(QObject):
fullSpeciesListModelChanged = pyqtSignal()
hooksSelected = pyqtSignal(QVariant, arguments=["results", ])
hooksChanged = pyqtSignal(QVariant, arguments=["angler_op_id"]) # signal to update hooks label in DropAngler.qml
def __init__(self, app=None, db=None):
super().__init__()
self._app = app
self._db = db
self._rpc = self._app.rpc
self._full_species_list_model = FullSpeciesListModel(self._app)
self._non_fish_items = self._get_non_fish_items() # added for issue #82
@pyqtProperty(bool)
def isGearUndeployed(self):
"""
#144: Check if Undeployed has been populated for current angler
Used to see if we're changing hook to something other than undeployed when undeployed gear perf is entered
TODO: move this to a statemachine property? Tried but it was annoying, left as is for now - jf
:return: boolean
"""
op_id = self.get_angler_op_id()
try:
undeployed = self._rpc.execute_query(
sql='''
select oa.operation_attribute_id
from operation_attributes oa
join lookups l
on oa.attribute_type_lu_id = l.lookup_id
where oa.operation_id = ?
and l.type = 'Angler Gear Performance'
and l.value = 'Undeployed'
''',
params=[op_id, ]
)
except Exception as e:
logging.error(f"Unable to query undeployed gear perfs for angler op id {op_id}")
return False
return len(undeployed) > 0
def _get_non_fish_items(self):
"""
Get list of hook items that are not fish/taxonomic
e.g. Bait Back, No Bait, No Hook, Multiple Hook, Undeployed (subject to change going forward)
:return: str[]
"""
sql = '''
select display_name
from catch_content_lu
where taxonomy_id is null
'''
return [i[0] for i in self._rpc.execute_query(sql=sql)]
@pyqtSlot(QVariant, name='isFish', result=bool)
def is_fish(self, hooked_item):
"""
Used for hook text styling (see #82)
:param hooked_item: string from UI
:return: bool
"""
return hooked_item not in self._non_fish_items if hooked_item else False
@pyqtProperty(FramListModel, notify=fullSpeciesListModelChanged)
def fullSpeciesListModel(self):
"""
Method to return the self._full_species_list for populating the repeater BackdeckButtons in the HooksScreen.qml
:return:
"""
return self._full_species_list_model
@pyqtSlot(name="getAnglerOpId", result=QVariant) # 143: expose as pyqtSlot
def get_angler_op_id(self):
"""
Method to return the angler operation id from the given state machine angler letter. The Angler Operation ID
is the OPERATIONS table record for the given angler, and all angler-related items are attached to that record.
:return:
"""
op_id = None
mapping = {"A": self._app.state_machine.anglerAOpId,
"B": self._app.state_machine.anglerBOpId,
"C": self._app.state_machine.anglerCOpId}
if self._app.state_machine.angler in ["A", "B", "C"]:
op_id = mapping[self._app.state_machine.angler]
logging.info(f"angler letter: {self._app.state_machine.angler} > op_id: {op_id}")
return op_id
@pyqtSlot(name="selectHooks")
def select_hooks(self):
"""
Method to select values for the five hooks
:return:
"""
angler_op_id = self.get_angler_op_id()
sql = "SELECT RECEPTACLE_SEQ, DISPLAY_NAME FROM CATCH c WHERE c.OPERATION_ID = ?;"
sql = """
SELECT c.RECEPTACLE_SEQ, cc.DISPLAY_NAME
FROM CATCH c LEFT JOIN CATCH_CONTENT_LU cc ON c.HM_CATCH_CONTENT_ID = cc.CATCH_CONTENT_ID
WHERE c.OPERATION_ID = ?;
"""
params = [angler_op_id, ]
hooks = self._rpc.execute_query(sql=sql, params=params)
if hooks:
hooks = {x[0]: x[1] for x in hooks}
logging.info(f"hooks = {hooks}")
self.hooksSelected.emit(hooks)
@pyqtSlot(int, str, name="saveHook")
def save_hook(self, hook_number, species):
"""
Method to save the hook data down to the database
:param hooks: Dictionary of hook values
:return:
"""
# if isinstance(hooks, QJSValue):
# hooks = hooks.toVariant()
# logging.info(f"hooks to save: {hooks}")
angler_op_id = self.get_angler_op_id()
species = species.replace("\n", " ")
species_map = {"Bocaccio": "Bocaccio", "Vermilion": "Vermilion Rockfish",
"Bank": "Bank Rockfish", "Blue": "Blue Rockfish", "Canary": "Canary Rockfish",
"Copper": "Copper Rockfish",
"Cowcod": "Cowcod", "Greenblotched": "Greenblotched Rockfish",
"GSpot": "Greenspotted Rockfish", "Greenstriped": "Greenstriped Rockfish",
"Halfbanded": "Halfbanded Rockfish", "Lingcod": "Lingcod",
"Sanddab": "Sanddab Unidentified", "Speckled": "Speckled Rockfish",
"Squarespot": "Squarespot Rockfish", "Starry": "Starry Rockfish",
"Swordspine": "Swordspine Rockfish", "Widow": "Widow Rockfish",
"Yellowtail": "Yellowtail Rockfish"}
species = species_map[species] if species in species_map else species
# Insert CATCH table records
try:
# Determine if the CATCH record already exists (OPERATION_ID, RECEPTACLE_SEQ, and RECEPTACLE_TYPE_ID
sql = """SELECT CATCH_ID FROM CATCH WHERE
OPERATION_ID = ? AND
RECEPTACLE_SEQ = ? AND
RECEPTACLE_TYPE_ID = (SELECT LOOKUP_ID FROM LOOKUPS WHERE
TYPE = 'Receptacle Type' AND VALUE = 'Hook')
"""
params = [angler_op_id, hook_number]
results = self._rpc.execute_query(sql=sql, params=params)
# UPDATE Results
if results:
sql = """
UPDATE CATCH
SET HM_CATCH_CONTENT_ID = (SELECT CATCH_CONTENT_ID FROM CATCH_CONTENT_LU
WHERE DISPLAY_NAME = ?)
WHERE RECEPTACLE_SEQ = ? AND OPERATION_ID = ? AND CATCH_ID = ?;
"""
logging.info(f"catch results = {results}")
params = [species, hook_number, angler_op_id, results[0][0]]
logging.info(f"updating an existing catch record")
# INSERT Results
else:
sql = """
INSERT INTO CATCH
(HM_CATCH_CONTENT_ID, RECEPTACLE_SEQ, RECEPTACLE_TYPE_ID, OPERATION_ID, OPERATION_TYPE_ID)
VALUES(
(SELECT CATCH_CONTENT_ID FROM CATCH_CONTENT_LU
WHERE DISPLAY_NAME = ?),
?,
(SELECT LOOKUP_ID FROM LOOKUPS WHERE TYPE = 'Receptacle Type'
AND VALUE = 'Hook'),
?,
(SELECT LOOKUP_ID FROM LOOKUPS WHERE TYPE = 'Operation' AND VALUE = 'Angler')
);
"""
params = [species, hook_number, angler_op_id]
logging.info(f"inserting hook data: {params}")
logging.info(f"params: {params}")
adh = f"{self._app.state_machine.angler}{self._app.state_machine.drop}{self._app.state_machine.hook}"
notify = {"speciesUpdate": {"station": "HookMatrix", "set_id": self._app.state_machine.setId, "adh": adh}}
self._rpc.execute_query(sql=sql, params=params, notify=notify)
logging.info(f"Hooks changed for angler op id {angler_op_id}")
self.hooksChanged.emit(angler_op_id) # received by DropAngler.qml
except Exception as ex:
logging.error(f"Error inserting hook data into CATCH table: {ex}")
# ToDo - <NAME> - INSERT SPECIMENS records
# Insert SPECIMEN table records
try:
sql = "INSERT INTO SPECIMENS(CATCH_ID) VALUES(?);"
params = []
# self._rpc.execute_query(sql=sql, params=params)
except Exception as ex:
logging.error(f"Error inserting hook data into SPECIMENS table: {ex}")
@pyqtSlot(int, str, name="deleteHook")
def delete_hook(self, hook_number, species):
"""
Method to delete an individual hook
:param hook_number:
:param species:
:return:
"""
logging.info(f"{hook_number}, {species}")
try:
angler_op_id = self.get_angler_op_id()
# sql = """
# DELETE FROM CATCH WHERE
# DISPLAY_NAME = ? AND
# RECEPTACLE_SEQ = ? AND
# RECEPTACLE_TYPE_ID =
# (SELECT LOOKUP_ID FROM LOOKUPS WHERE TYPE = 'Receptacle Type'
# AND VALUE = 'Hook') AND
# OPERATION_ID = ?
# """
# Check if Cutter Species or Best Species already exist, if so, don't delete the record, just remove the
# HM species
sql = """
SELECT CS_CATCH_CONTENT_ID, BEST_CATCH_CONTENT_ID FROM CATCH WHERE
RECEPTACLE_SEQ = ? AND
RECEPTACLE_TYPE_ID =
(SELECT LOOKUP_ID FROM LOOKUPS WHERE TYPE = 'Receptacle Type'
AND VALUE = 'Hook') AND
OPERATION_ID = ?
"""
params = [hook_number, angler_op_id]
results = self._rpc.execute_query(sql=sql, params=params)
if len(results) == 1:
cs_species = results[0][0]
best_species = results[0][1]
# Nothing has been recorded by the cutter or fpc for this species, so go ahead and delete the catch record
if cs_species is None and best_species is None:
sql = """
DELETE FROM CATCH WHERE
RECEPTACLE_SEQ = ? AND
RECEPTACLE_TYPE_ID =
(SELECT LOOKUP_ID FROM LOOKUPS WHERE TYPE = 'Receptacle Type'
AND VALUE = 'Hook') AND
OPERATION_ID = ?
"""
params = [hook_number, angler_op_id]
logging.info(f"deleting the catch record, nothing else exists for it")
# Something has been input for the cutter or best species, so just remove the hm species
else:
sql = """
UPDATE CATCH SET HM_CATCH_CONTENT_ID = Null
WHERE
RECEPTACLE_SEQ = ? AND
RECEPTACLE_TYPE_ID =
(SELECT LOOKUP_ID FROM LOOKUPS WHERE TYPE = 'Receptacle Type'
AND VALUE = 'Hook') AND
OPERATION_ID = ?
"""
params = [hook_number, angler_op_id]
logging.info(f"updating the catch record as cutter or best species exist")
else:
# We've hit a bad state as we should never encounter this
sql = ""
params = ""
if sql != "":
adh = f"{self._app.state_machine.angler}{self._app.state_machine.drop}{self._app.state_machine.hook}"
notify = {"speciesUpdate": {"station": "HookMatrix", "set_id": self._app.state_machine.setId, "adh": adh}}
self._rpc.execute_query(sql=sql, params=params, notify=notify)
logging.info(f"hook deletion completed, params = {params}")
self.hooksChanged.emit(angler_op_id) # received by DropAngler.qml
logging.info(f"Hooks changed for angler op id {angler_op_id}")
except Exception as ex:
logging.error(f"Error deleting a hook: {ex}")
|
StarcoderdataPython
|
3591298
|
from .base_classes import (
Attributes,
Component,
Container,
Content,
Element,
ElementGroup,
TextElement,
Void,
transform,
)
from .components import ContentTemplate, Slot
from .content import (
BlockQuotation,
ContentDivision,
DescriptionDetails,
DescriptionList,
DescriptionTerm,
Divider,
Figure,
FigureCaption,
OrderedList,
Paragraph,
PreformattedText,
UnorderedList,
)
from .forms import (
Button,
Checkbox,
ColorInput,
ColourInput,
DataList,
DateInput,
DatetimeInput,
EmailInput,
FieldSet,
FileInput,
Form,
HiddenInput,
ImageButton,
Input,
Label,
Legend,
Meter,
MonthInput,
NumberInput,
Option,
OptionGroup,
Output,
PasswordInput,
Progress,
RadioButton,
RangeSlider,
ResetButton,
SearchBox,
Select,
SubmitButton,
TelephoneInput,
TextArea,
TextBox,
TimeInput,
URLBox,
WeekInput,
)
from .interactive_elements import Details, Dialogue, DisclosureSummary, Menu
from .media import (
Area,
AudioPlayer,
Image,
ImageMap,
Path,
Polygon,
ScalableVectorGraphic,
Track,
VideoPlayer,
)
from .metadata import (
BaseURL,
ExternalResourceLink,
ExternalStyleSheet,
Meta,
Style,
Title,
)
from .scripts import Canvas, NoScript, Script
from .sectioning import (
HTML,
ArticleContents,
Aside,
Body,
ContactAddress,
Footer,
Head,
Header,
Heading1,
Heading2,
Heading3,
Heading4,
Heading5,
Heading6,
HeadingGroup,
MainContent,
Navigation,
Section,
)
from .semantics import (
Abbreviation,
Anchor,
BidirectionalIsolateElement,
BidirectionalTextOverride,
BringAttentionTo,
Citation,
Code,
Data,
Definition,
DeletedText,
Emphasis,
IdiomaticText,
InlineQuotation,
InsertedText,
KeyboardInput,
LineBreak,
MarkText,
Ruby,
RubyFallback,
RubyText,
SampleOutput,
Small,
Span,
Strikethrough,
StrongImportance,
Subscript,
Superscript,
Time,
UnarticulatedAnnotation,
Variable,
WordBreak,
)
from .tables import (
ColumnDeclaration,
ColumnGroup,
Table,
TableBody,
TableCaption,
TableDataCell,
TableFoot,
TableHead,
TableHeaderCell,
TableRow,
)
|
StarcoderdataPython
|
321490
|
from typing import List, Set, Dict, Any, Union
def get_bdf_stats(model, return_type='string', word=''):
# type: (Any, str, str) -> Union[str, List[str]]
"""
Print statistics for the BDF
Parameters
----------
return_type : str (default='string')
the output type ('list', 'string')
'list' : list of strings
'string' : single, joined string
word : str; default=''
model flag
Returns
-------
return_data : str, optional
the output data
.. note:: if a card is not supported and not added to the proper
lists, this method will fail
.. todo:: RBE3s from OP2s can show up as ???s
"""
card_dict_groups = [
'params', 'nodes', 'spoints', 'epoints', 'points', 'gridb',
'elements', 'ao_element_flags', 'normals', 'rigid_elements', 'plotels',
'properties', 'pbusht', 'pdampt', 'pelast',
'properties_mass', 'masses',
'materials', 'creep_materials', 'hyperelastic_materials',
'MATT1', 'MATT2', 'MATT3', 'MATT4', 'MATT5', 'MATT8', 'MATT9',
'MATS1', 'MATS3', 'MATS8', 'MATT8',
'coords', 'mpcs',
# axisysmmetric
# dynamic cards
'dareas', 'delays', 'dphases', 'nlparms', 'nlpcis',
'tsteps', 'tstepnls',
'rotors',
# direct matrix input - DMIG - dict
'dmi', 'dmig', 'dmij', 'dmiji', 'dmik', 'dmiax',
'dequations',
'transfer_functions',
'tics',
# frequencies - dict[List[FREQ]]
'frequencies',
# optimization - dict
'dconadds', 'dconstrs', 'desvars', 'topvar', 'ddvals', 'dlinks', 'dresps',
'dvcrels', 'dvmrels', 'dvprels', 'dvgrids',
# SESETx - dict
'suport1',
# tables
'tables', 'tables_d', 'tables_m', 'random_tables', 'tables_sdamping',
# methods
'methods', 'cMethods',
# aero
'caeros', 'paeros', 'aecomps', 'aefacts', 'aelinks',
'aelists', 'aeparams', 'aesurf', 'aesurfs', 'aestats', 'gusts', 'flfacts',
'flutters', 'splines', 'trims', 'divergs', 'csschds',
# thermal
'bcs', 'thermal_materials', 'phbdys', 'views', 'view3ds',
'convection_properties',
# contact
'bsurf', 'bsurfs', 'blseg',
'bconp', 'bcrparas', 'bctadds', 'bctparas', 'bctsets',
# sets
'sets', 'usets',
# superelements
'csuper', 'csupext',
'sebulk', 'sebndry', 'seconct', 'seelt', 'seexcld',
'selabel', 'seloc', 'seload', 'sempln', 'senqset',
'setree',
'se_sets', 'se_usets',
# ???
'dscreen', 'dti', 'nxstrats', 'radcavs', 'radmtx', 'ringaxs', 'ringfl',
'tempds', 'spcoffs',
]
scalar_attrs = [
'aero', 'aeros', 'grdset', # handled below
# not handled
'axic', 'axif',
'baror', 'beamor', 'doptprm', 'dtable',
'zona',
]
list_attrs = [
'asets', 'bsets', 'csets', 'omits', 'qsets',
'se_bsets', 'se_csets', 'se_qsets',
'suport', 'se_suport',
'monitor_points',
]
skip_attrs = [
'active_filename', 'active_filenames', 'debug', 'log', 'reject_lines',
'is_nx', 'is_msc', 'is_bdf_vectorized', 'dumplines', 'values_to_skip',
'system_command_lines', 'executive_control_lines', 'case_control_lines',
'case_control_deck',
'is_superelements', 'special_cards', 'units',
'sol', 'sol_iline', 'sol_method', 'cards_to_read', 'card_count',
'superelement_models', 'wtmass', 'echo', 'force_echo_off',
'read_includes', 'reject_cards', 'reject_count', 'punch',
'include_dir', 'include_filenames', 'save_file_structure',
'rsolmap_to_str', 'nastran_format', 'nid_map', 'bdf_filename',
'radset', 'is_zona',
# handled below
'mpcadds', 'mpcs', 'spcadds', 'spcs',
'loads', 'load_combinations',
'dloads', 'dload_entries',
'aero', 'aeros', 'mkaeros',
'nsmadds', 'nsms',
'seqgp',
] + list_attrs + card_dict_groups + scalar_attrs
#missed_attrs = []
#for attr in model.object_attributes():
#if attr in skip_attrs:
#continue
#missed_attrs.append(attr)
#assert missed_attrs == [], missed_attrs
# These are ignored because they're lists
#ignored_types = set([
#'spoints', 'spointi', # singleton
#'grdset', # singleton
#'spcs',
#'suport', 'se_suport', # suport, suport1 - list
#'doptprm', # singleton
## SETx - list
#'sets', 'asets', 'bsets', 'csets', 'qsets',
#'se_bsets', 'se_csets', 'se_qsets',
#])
## TODO: why are some of these ignored?
#ignored_types2 = set([
#'case_control_deck', 'caseControlDeck',
## done
#'sol', 'loads', 'mkaeros',
#'reject_lines', 'reject_cards',
## not cards
#'debug', 'executive_control_lines',
#'case_control_lines', 'cards_to_read', 'card_count',
#'is_structured', 'uniqueBulkDataCards',
#'model_type', 'include_dir',
#'sol_method', 'log',
#'sol_iline',
#'reject_count', '_relpath',
#'special_cards',])
#unsupported_types = ignored_types.union(ignored_types2)
#all_params = object_attributes(model, keys_to_skip=unsupported_types)
msg = ['---BDF Statistics%s---' % word]
# sol
if 'Superelement' not in word:
msg.append('SOL %s\n' % model.sol)
msg.extend(_get_bdf_stats_loads(model))
# load_combinations / loads: handled below
# dloads
for (lid, loads) in sorted(model.dloads.items()):
msg.append('bdf.dloads[%s]' % lid)
groups_dict = {} # type: Dict[str, Any]
for loadi in loads:
groups_dict[loadi.type] = groups_dict.get(loadi.type, 0) + 1
for name, count_name in sorted(groups_dict.items()):
msg.append(' %-8s %s' % (name + ':', count_name))
msg.append('')
for (lid, loads) in sorted(model.dload_entries.items()):
msg.append('bdf.dload_entries[%s]' % lid)
groups_dict = {}
for loadi in loads:
groups_dict[loadi.type] = groups_dict.get(loadi.type, 0) + 1
for name, count_name in sorted(groups_dict.items()):
msg.append(' %-8s %s' % (name + ':', count_name))
msg.append('')
# spcs
for (spc_id, spcadds) in sorted(model.spcadds.items()):
msg.append('bdf.spcadds[%s]' % spc_id)
groups_dict = {}
for spcadd in spcadds:
groups_dict[spcadd.type] = groups_dict.get(spcadd.type, 0) + 1
for name, count_name in sorted(groups_dict.items()):
msg.append(' %-8s %s' % (name + ':', count_name))
msg.append('')
for (spc_id, spcs) in sorted(model.spcs.items()):
msg.append('bdf.spcs[%s]' % spc_id)
groups_dict = {}
for spc in spcs:
groups_dict[spc.type] = groups_dict.get(spc.type, 0) + 1
for name, count_name in sorted(groups_dict.items()):
msg.append(' %-8s %s' % (name + ':', count_name))
msg.append('')
# mpcs
for (mpc_id, mpcadds) in sorted(model.mpcadds.items()):
msg.append('bdf.mpcadds[%s]' % mpc_id)
groups_dict = {}
for mpcadd in mpcadds:
groups_dict[mpcadd.type] = groups_dict.get(mpcadd.type, 0) + 1
for name, count_name in sorted(groups_dict.items()):
msg.append(' %-8s %s' % (name + ':', count_name))
msg.append('')
for (mpc_id, mpcs) in sorted(model.mpcs.items()):
msg.append('bdf.mpcs[%s]' % mpc_id)
groups_dict = {}
for mpc in mpcs:
groups_dict[mpc.type] = groups_dict.get(mpc.type, 0) + 1
for name, count_name in sorted(groups_dict.items()):
msg.append(' %-8s %s' % (name + ':', count_name))
msg.append('')
# nsms
for (nsm_id, nsmadds) in sorted(model.nsmadds.items()):
msg.append('bdf.nsmadds[%s]' % nsm_id)
groups_dict = {}
for nsmadd in nsmadds:
groups_dict[nsmadd.type] = groups_dict.get(nsmadd.type, 0) + 1
for name, count_name in sorted(groups_dict.items()):
msg.append(' %-8s %s' % (name + ':', count_name))
msg.append('')
for (mpc_id, nsms) in sorted(model.nsms.items()):
msg.append('bdf.nsms[%s]' % mpc_id)
groups_dict = {}
for nsm in nsms:
groups_dict[nsm.type] = groups_dict.get(nsm.type, 0) + 1
for name, count_name in sorted(groups_dict.items()):
msg.append(' %-8s %s' % (name + ':', count_name))
msg.append('')
# aero
if model.aero:
msg.append('bdf.aero')
msg.append(' %-8s 1' % ('AERO:'))
# aeros
if model.aeros:
msg.append('bdf:aeros')
msg.append(' %-8s 1' % ('AEROS:'))
#mkaeros
if model.mkaeros:
msg.append('bdf:mkaeros')
msg.append(' %-8s %s' % ('MKAERO:', len(model.mkaeros)))
# radset
if model.radset:
msg.append('bdf:radset')
msg.append(' %-8s 1' % ('RADSET:'))
#mkaeros
if model.seqgp:
msg.append('bdf:seqgp')
msg.append(' %-8s 1' % ('SEQGP:'))
for card_group_name in card_dict_groups:
try:
card_group = getattr(model, card_group_name)
except AttributeError:
msgi = 'cant find card_group_name=%r' % card_group_name
raise AttributeError(msgi)
groups = set() # type: Set[str]
if not isinstance(card_group, dict):
msgi = '%s is a %s; not dictionary, which is required by get_bdf_stats()' % (
card_group_name, type(card_group))
model.log.error(msgi)
continue
#raise RuntimeError(msg)
for card in card_group.values():
if isinstance(card, list):
for card2 in card:
groups.add(card2.type)
else:
groups.add(card.type)
group_msg = []
for card_name in sorted(groups):
try:
ncards = model.card_count[card_name]
group_msg.append(' %-8s : %s' % (card_name, ncards))
except KeyError:
# we get in here because we used add_grid or similar method, which
# doesn't increase the card_count, so instead we'll use _type_to_id_map
counter = '???'
if card_name in model._type_to_id_map:
counter = len(model._type_to_id_map[card_name])
if card_name == 'CORD2R' and counter == '???':
# there is always 1 CORD2R that isn't added to card_count/_type_to_id_map
continue
group_msg.append(' %-8s : %s' % (card_name, counter))
#assert card_name == 'CORD2R', model.card_count
if group_msg:
msg.append('bdf.%s' % card_group_name)
msg.append('\n'.join(group_msg))
msg.append('')
if model.reject_lines: # List[card]; card = List[str]
msg.append('Rejected Cards')
for name, counter in sorted(model.card_count.items()):
if name not in model.cards_to_read:
msg.append(' %-8s %s' % (name + ':', counter))
msg.append('')
for super_id, superelement in model.superelement_models.items():
msg += get_bdf_stats(superelement, return_type='list', word=' (Superelement %i)' % super_id)
if return_type == 'string':
return '\n'.join(msg)
return msg
def _get_bdf_stats_loads(model):
# type: (Any) -> List[str]
"""helper for ``get_bdf_stats(...)``"""
# loads
msg = []
if model.is_bdf_vectorized:
## kind of hackish
for (lid, load_combination) in sorted(model.load_combinations.items()):
msg.append('bdf.load_combinations[%s]' % lid)
msg.append('')
if len(model.loads):
msg.append('bdf.loads[%s] : ???')
else:
for (lid, load_combinations) in sorted(model.load_combinations.items()):
msg.append('bdf.load_combinations[%s]' % lid)
groups_dict = {} # type: Dict[str, int]
for load_combination in load_combinations:
groups_dict[load_combination.type] = groups_dict.get(load_combination.type, 0) + 1
for name, count_name in sorted(groups_dict.items()):
msg.append(' %-8s %s' % (name + ':', count_name))
msg.append('')
for (lid, loads) in sorted(model.loads.items()):
msg.append('bdf.loads[%s]' % lid)
groups_dict = {}
for loadi in loads:
groups_dict[loadi.type] = groups_dict.get(loadi.type, 0) + 1
for name, count_name in sorted(groups_dict.items()):
msg.append(' %-8s %s' % (name + ':', count_name))
msg.append('')
return msg
|
StarcoderdataPython
|
4869283
|
<gh_stars>0
# Server must be restarted after creating new tags file
from django import template
register = template.Library ()
@ register.simple_tag
def get_comment_count (entry):
'' 'Get the total number of comments for an article' ''
lis = entry.article_comments.all ()
return lis.count ()
@ register.simple_tag
def get_parent_comments (entry):
'' 'Get a list of parent comments for an article' ''
lis = entry.article_comments.filter (parent = None)
return lis
@ register.simple_tag
def get_child_comments (com):
'' 'Get a list of child level paths for a parent comment' ''
lis = com.articlecomment_child_comments.all ()
return lis
@ register.simple_tag
def get_comment_user_count (entry):
'' 'Get total number of reviewers' ''
p = []
lis = entry.article_comments.all ()
for each in lis:
if each.author not in p:
p.append (each.author)
return len (p)
@ register.simple_tag
def get_notifications (user, f = None):
'''Get prompt information for a user\'s corresponding conditions'''
if f == 'true':
lis = user.notification_get.filter (is_read = True)
elif f == 'false':
lis = user.notification_get.filter (is_read = False)
else:
lis = user.notification_get.all ()
return lis
@ register.simple_tag
def get_notifications_count (user, f = None):
'' 'Get the total number of prompts under the corresponding conditions of a user' ''
if f == 'true':
lis = user.notification_get.filter (is_read = True)
elif f == 'false':
lis = user.notification_get.filter (is_read = False)
else:
lis = user.notification_get.all ()
return lis.count ()
|
StarcoderdataPython
|
3599946
|
<filename>Ene-Jun-2022/victor-geronimo-de-leon-cuellar/printing_functions.py<gh_stars>0
import print_models
print_models.make_helado('Vainilla', 'Nueces')
print_models.make_helado('Fresa', 'Grajea', 'Nueces', 'Cajeta')
|
StarcoderdataPython
|
6671839
|
<reponame>github16cp/emma
#!/usr/bin/python3
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Activation, Input
from keras.utils.test_utils import layer_test
from keras.utils.generic_utils import CustomObjectScope
import tensorflow as tf
import keras.backend as K
import keras
import ai
import numpy as np
with CustomObjectScope({'CCLayer': ai.CCLayer}):
input_data = np.array([
[7, 8, 9, 0, 0, 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 1, 2, 3],
[1, 5, 2, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
], dtype=np.float32)
labels = np.array([
[1, 0] + [0] * 254,
[0, 1] + [0] * 254,
])
input_data = np.expand_dims(input_data, axis=2)
cclayer_expected_output = []
kwargs = {
'filters': 256,
'kernel_size': 6,
'dilation_rate': 1,
'padding': 'valid',
'kernel_initializer': 'glorot_uniform',
'use_bias': False,
'activation': 'softmax'
}
layer = ai.CCLayer(**kwargs)
x = Input(batch_shape=input_data.shape)
y = layer(x)
model = Model(x, y)
'''
with tf.Session() as sess:
print("SIM")
sess.run(tf.global_variables_initializer())
y_pred = tf.placeholder(tf.float32, shape=cclayer_actual_output.shape)
print(y_pred.shape)
y_true = tf.placeholder(tf.float32, shape=labels.shape)
print(y_true.shape)
filter_score = tf.reduce_max(y_pred, axis=1, keepdims=False) * y_true
print(filter_score.shape)
filter_loss = tf.reduce_sum(-filter_score, axis=1)
print(filter_loss.shape)
loss = tf.reduce_sum(filter_loss, axis=0, keepdims=False)
print(sess.run(tf.square(y_pred), feed_dict={y_pred: cclayer_actual_output}))
print(sess.run(tf.reduce_max(tf.square(y_pred), axis=1, keepdims=False), feed_dict={y_pred: cclayer_actual_output}))
print(sess.run(y_true, feed_dict={y_true: labels}))
print(sess.run(filter_loss, feed_dict={y_pred: cclayer_actual_output, y_true: labels}))
print(sess.run(loss, feed_dict={y_pred: cclayer_actual_output, y_true: labels}))
print("MODEL")
model.compile(optimizer='adam', loss=ai.cc_loss)
closs = model.train_on_batch(input_data, labels)
print(closs)
exit(0)
'''
optimizer = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, decay=0.0)
model.compile(optimizer=optimizer, loss='categorical_crossentropy')
for i in range(0, 6000):
loss = model.train_on_batch(input_data, labels)
if i % 100 == 0:
print(loss)
kernel = np.array(layer.get_weights()[0])
print("KERNEL")
print(kernel.shape)
print((kernel - np.mean(kernel, axis=0)) / np.std(kernel, axis=0))
cclayer_actual_output = model.predict(input_data)
print("OUTPUT")
print(cclayer_actual_output.shape)
print(cclayer_actual_output)
#best_points = np.amax(cclayer_actual_output, axis=1)
#print(best_points)
predicted_classes = np.argmax(cclayer_actual_output, axis=1)
print(predicted_classes)
# Use generic Keras checks (loading, saving, etc.) as extra test
#layer_test(ai.CCLayer, input_data=input_data, kwargs=kwargs, expected_output=expected_output)
|
StarcoderdataPython
|
9782641
|
from flask import jsonify, request, current_app, Blueprint
from api import db, bcrypt
from api.models import User, Post
import jwt
import datetime
users = Blueprint('users', __name__)
session_days = 365
@users.route("/register", methods=['POST'])
def register():
name = request.json.get('name')
password = request.json.get('password')
if not name or not password:
return jsonify(error="Missing name/password")
user = User.query.filter_by(name=name).first()
if user:
return jsonify(error=f'Sorry, the name {name} has been taken.')
hashed_password = bcrypt.generate_password_hash(password).decode('utf-8')
user = User(name=name, password=<PASSWORD>)
db.session.add(user)
db.session.commit()
token = jwt.encode({'name': name, 'password': password, 'exp': datetime.datetime.utcnow()
+ datetime.timedelta(days=session_days)}, current_app.config['SECRET_KEY'])
return jsonify(token=token.decode('utf-8'))
@users.route("/login", methods=['POST'])
def login():
name = request.json.get('name')
password = request.json.get('password')
if not name or not password:
return jsonify(error="Missing name/password")
user = User.query.filter_by(name=name).first()
if user and user.verify_password(password):
token = jwt.encode({'name': name, 'password': password, 'exp': datetime.datetime.utcnow()
+ datetime.timedelta(days=session_days)}, current_app.config['SECRET_KEY'])
return jsonify(token=token.decode('utf-8'))
return jsonify(error="Invalid credentials")
@users.route("/account", methods=['POST'])
def account():
token = request.json.get('token')
if not token:
return jsonify(error="Missing token")
try:
token_data = jwt.decode(token, current_app.config['SECRET_KEY'])
except:
return jsonify(error="Token invalid or expired")
name = token_data.get('name')
password = token_data.get('password')
if name is None or password is None:
return jsonify(error="Missing token name/password")
user = User.query.filter_by(name=name).first()
if user and user.verify_password(password):
return jsonify(name=name)
return jsonify(error="Invalid token credentials")
@users.route("/user/<string:name>", methods=['GET'])
def user_posts(name):
user = User.query.filter_by(name=name).first()
if not user:
return jsonify(error="Invalid user")
posts = Post.query.filter_by(author=user).order_by(Post.date_posted.desc()).all()
f = lambda post: {
"id":post.id,
"title":post.title,
"melody_data":post.melody_data,
"author":post.author.name,
"date":post.date_posted.strftime('%m-%d-%Y'),
}
return jsonify(posts=list(map(f, posts)))
|
StarcoderdataPython
|
245995
|
<filename>src/outpost/django/research/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-11-07 14:08
from __future__ import unicode_literals
from django.db import migrations
from django.conf import settings
class Migration(migrations.Migration):
initial = True
dependencies = [("base", "0001_initial")]
ops = [
(
"""
CREATE SCHEMA IF NOT EXISTS research;
""",
"""
DROP SCHEMA IF EXISTS "research";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."forschung_programm" (
FORSCHUNG_PROGRAMM_ID numeric,
FORSCHUNG_PROGRAMM_NAME varchar,
AKTIV_JN varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'FORSCHUNG_PROGRAMM_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."forschung_programm";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."forschung_art" (
FORSCHUNG_ART_ID numeric,
FORSCHUNG_ART_DE varchar,
FORSCHUNG_ART_EN varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'FORSCHUNG_ART_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."forschung_art";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."geldgeber" (
GELDGEBER_ID numeric,
GELDGEBER_DE varchar,
GELDGEBER_EN varchar,
STRASSE varchar,
ORT varchar,
POSTLEITZAHL varchar,
LAND_ID numeric,
URL varchar,
GELDGEBER_TYP_ID numeric
)
SERVER sqlalchemy OPTIONS (
tablename 'GELDGEBER',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."geldgeber";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."geldgeber_typ" (
GELDGEBER_TYP_ID numeric,
GELDGEBER_TYP_DE varchar,
GELDGEBER_TYP_EN varchar,
GELDGEBER_TYP_KURZ varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'GELDGEBER_TYP_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."geldgeber_typ";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."land" (
LAND_ID numeric,
LAND_DE varchar,
LAND_EN varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'LAND_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."land";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."org_partner_projektfunktion" (
ORG_PARTNER_PROJEKTFUNKTION_ID numeric,
ORG_PARTNER_PROJEKTFUNKTION_DE varchar,
ORG_PARTNER_PROJEKTFUNKTION_EN varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'ORG_PARTNER_PROJEKTFUNKTION_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."org_partner_projektfunktion";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."projekt_typ" (
PROJEKT_TYP_ID numeric,
PROJEKT_TYP_DE varchar,
PROJEKT_TYP_EN varchar,
PROJEKT_TYP_KURZ_DE varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'PROJEKT_TYP_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."projekt_typ";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."projekt" (
PROJEKT_ID numeric,
ORGEINHEIT_ID numeric,
PROJEKT_TYP_ID numeric,
KURZBEZEICHNUNG varchar,
PROJEKTTITEL_DE varchar,
PROJEKTTITEL_EN varchar,
ORG_PARTNER_PROJEKTFUNKTION_ID numeric,
PROJEKTLEITER_ID numeric,
KONTAKTPERSON_ID numeric,
PROJEKT_STATUS_ID numeric,
PROJEKT_URL varchar,
ABSTRACT_DE varchar,
ABSTRACT_EN varchar,
PROJEKTBEGINN_GEPLANT timestamp,
PROJEKTBEGINN_EFFEKTIV timestamp,
PROJEKTENDE_GEPLANT timestamp,
PROJEKTENDE_EFFEKTIV timestamp,
VERGABE_ART_ID numeric,
FORSCHUNG_ART_ID numeric,
VERANSTALTUNG_ART_ID numeric,
STUDIE_ART_ID numeric,
SPRACHE_ID numeric,
STAMMDATEN_UEBERTRAGUNG timestamp,
FORSCHUNG_PROGRAMM_ID numeric,
FORSCHUNG_SUBPROGRAMM varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'PROJEKT',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."projekt";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."projekt_geldgeber" (
PROJEKT_ID numeric,
GELDGEBER_ID numeric,
HAUPTGELDGEBER_JA_NEIN varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'PROJEKT_GELDGEBER',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."projekt_geldgeber";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."projekt_status" (
PROJEKT_STATUS_ID numeric,
PROJEKT_STATUS varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'PROJEKT_STATUS_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."projekt_status";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."sprache" (
SPRACHE_ID numeric,
SPRACHE_DE varchar,
SPRACHE_EN varchar,
SPRACHE_EN_KURZ varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'SPRACHE_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."sprache";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."studie_art" (
STUDIE_ART_ID numeric,
STUDIE_ART_DE varchar,
STUDIE_ART_EN varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'STUDIE_ART_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."studie_art";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."veranstaltung_art" (
VERANSTALTUNG_ART_ID numeric,
VERANSTALTUNG_ART_DE varchar,
VERANSTALTUNG_ART_EN varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'VERANSTALTUNG_ART_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."veranstaltung_art";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."vergabe_art" (
VERGABE_ART_ID numeric,
VERGABE_ART_DE varchar,
VERGABE_ART_EN varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'VERGABE_ART_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."vergabe_art";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."person_publikation" (
MEDONLINE_PERSON_ID numeric,
PUBLIKATION_ID numeric
)
SERVER sqlalchemy OPTIONS (
tablename 'PERSON_PUBLIKATION',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."person_publikation";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."orgeinheit_publikation" (
PUBLIKATION_ID numeric,
MEDONLINE_ID numeric,
PUBLIKATION_AUTORENSCHAFT_ID numeric,
ZUORDNUNGSDATUM timestamp
)
SERVER sqlalchemy OPTIONS (
tablename 'ORGEINHEIT_PUBLIKATION',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."orgeinheit_publikation";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."publikation_typ" (
PUBLIKATION_TYP_ID numeric,
PUBLIKATION_TYP_DE varchar,
PUBLIKATION_TYP_EN varchar,
SORTIERUNG_ID numeric
)
SERVER sqlalchemy OPTIONS (
tablename 'PUBLIKATION_TYP_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."publikation_typ";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."publikation_dokumenttyp" (
PUBLIKATION_DOKUMENTTYP_ID numeric,
PUBLIKATION_DOKUMENTTYP_DE varchar,
PUBLIKATION_DOKUMENTTYP_EN varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'PUBLIKATION_DOKUMENTTYP_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."publikation_dokumenttyp";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."publikation" (
PUBLIKATION_ID varchar,
TITEL varchar,
AUTOR varchar,
JAHR numeric,
QUELLE varchar,
PUBLIKATION_TYP_ID numeric,
PUBLIKATION_DOKUMENTTYP_ID numeric,
SCI_ID varchar,
PUBMED_ID varchar,
DOI varchar,
PMC_ID varchar,
ABSTRACT bytea,
IMPACT_FAKTOR_NORM_MAX numeric
)
SERVER sqlalchemy OPTIONS (
tablename 'PUBLIKATION',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."publikation";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."klassifikation_oestat_2012" (
KLASSIFIKATION_OESTAT_ID numeric,
KLASSIFIKATION_OESTAT_DE varchar,
KLASSIFIKATION_OESTAT_EN varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'KLASSIFIKATION_OESTAT_2012_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."klassifikation_oestat_2012";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."person_fachkenntnis" (
PERSON_FACHKENNTNIS_ID numeric,
MEDONLINE_PERSON_ID numeric,
FACHKENNTNIS_DE varchar,
FACHKENNTNIS_EN varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'PERSON_FACHKENNTNIS',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."person_fachkenntnis";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."person_kenntnis" (
PERSON_KENNTNIS_ID numeric,
MEDONLINE_PERSON_ID numeric,
KENNTNIS_DE varchar,
KENNTNIS_EN varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'PERSON_KENNTNIS',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."person_kenntnis";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."person_klass_oestat_2012" (
KLASSIFIKATION_OESTAT_ID numeric,
MEDONLINE_PERSON_ID numeric
)
SERVER sqlalchemy OPTIONS (
tablename 'PERSON_KLASS_OESTAT_2012',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."person_klass_oestat_2012";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."person_weiterbildung" (
PERSON_WEITERBILDUNG_ID numeric,
MEDONLINE_PERSON_ID numeric,
PERSON_WEITERBILDUNG_DE varchar,
PERSON_WEITERBILDUNG_EN varchar,
JAHR varchar,
JAHR_BIS varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'PERSON_WEITERBILDUNG',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."person_weiterbildung";
""",
),
(
"""
CREATE FOREIGN TABLE "research"."publikation_autorenschaft" (
PUBLIKATION_AUTORENSCHAFT_ID numeric,
PUBLIKATION_AUTORENSCHAFT_DE varchar,
PUBLIKATION_AUTORENSCHAFT_EN varchar
)
SERVER sqlalchemy OPTIONS (
tablename 'PUBLIKATION_AUTORENSCHAFT_L',
db_url '{}'
);
""".format(
settings.MULTICORN.get("research")
),
"""
DROP FOREIGN TABLE IF EXISTS "research"."publikation_autorenschaft";
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_classification" AS SELECT
KLASSIFIKATION_OESTAT_ID::integer AS id,
hstore(
ARRAY['de', 'en'],
ARRAY[KLASSIFIKATION_OESTAT_DE, KLASSIFIKATION_OESTAT_EN]
) AS name
FROM
"research"."klassifikation_oestat_2012"
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_classification";
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_classification_person" AS SELECT
KLASSIFIKATION_OESTAT_ID::integer AS classification_id,
MEDONLINE_PERSON_ID::integer AS person_id
FROM
"research"."person_klass_oestat_2012"
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_classification_person";
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_expertise" AS SELECT
PERSON_FACHKENNTNIS_ID::integer AS id,
MEDONLINE_PERSON_ID::integer AS person_id,
hstore(
ARRAY['de', 'en'],
ARRAY[FACHKENNTNIS_DE, FACHKENNTNIS_EN]
) AS name
FROM
"research"."person_fachkenntnis"
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_expertise";
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_knowledge" AS SELECT
PERSON_KENNTNIS_ID::integer AS id,
MEDONLINE_PERSON_ID::integer AS person_id,
hstore(
ARRAY['de', 'en'],
ARRAY[KENNTNIS_DE, KENNTNIS_EN]
) AS name
FROM
"research"."person_kenntnis"
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_knowledge";
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_education" AS SELECT
PERSON_WEITERBILDUNG_ID::integer AS id,
MEDONLINE_PERSON_ID::integer AS person_id,
hstore(
ARRAY['de', 'en'],
ARRAY[PERSON_WEITERBILDUNG_DE, PERSON_WEITERBILDUNG_EN]
) AS name,
JAHR AS from,
JAHR_BIS AS to
FROM
"research"."person_weiterbildung"
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_education";
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_publicationauthorship" AS SELECT
PUBLIKATION_AUTORENSCHAFT_ID::integer AS id,
hstore(
ARRAY['de', 'en'],
ARRAY[PUBLIKATION_AUTORENSCHAFT_DE, PUBLIKATION_AUTORENSCHAFT_EN]
) AS name
FROM
"research"."publikation_autorenschaft"
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_publicationauthorship";
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_program" AS SELECT
FORSCHUNG_PROGRAMM_ID::integer AS id,
FORSCHUNG_PROGRAMM_NAME AS name,
COALESCE((LOWER(AKTIV_JN) = 'n'), FALSE)::boolean AS active
FROM
"research"."forschung_programm"
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_program";
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_projectresearch" AS SELECT
FORSCHUNG_ART_ID::integer AS id,
FORSCHUNG_ART_DE AS name
FROM
"research"."forschung_art"
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_projectresearch";
""",
),
(
"""
CREATE UNIQUE INDEX research_projectresearch_id_idx ON "public"."research_projectresearch" ("id");
""",
"""
DROP INDEX IF EXISTS research_projectresearch_id_idx;
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_funder" AS SELECT
GELDGEBER_ID::integer AS id,
GELDGEBER_DE AS name,
STRASSE AS street,
ORT AS city,
POSTLEITZAHL AS zipcode,
LAND_ID::integer AS country_id,
URL,
GELDGEBER_TYP_ID::integer AS category_id
FROM
"research"."geldgeber"
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_funder";
""",
),
(
"""
CREATE UNIQUE INDEX research_funder_id_idx ON "public"."research_funder" ("id");
""",
"""
DROP INDEX IF EXISTS research_funder_id_idx;
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_fundercategory" AS SELECT
GELDGEBER_TYP_ID::integer AS id,
GELDGEBER_TYP_DE AS name,
GELDGEBER_TYP_KURZ AS short
FROM
"research"."geldgeber_typ"
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_fundercategory";
""",
),
(
"""
CREATE UNIQUE INDEX research_fundercategory_id_idx ON "public"."research_fundercategory" ("id");
""",
"""
DROP INDEX IF EXISTS research_fundercategory_id_idx;
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_country" AS SELECT
LAND_ID::integer AS id,
LAND_DE AS name
FROM
"research"."land"
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_country";
""",
),
(
"""
CREATE UNIQUE INDEX research_country_id_idx ON "public"."research_country" ("id");
""",
"""
DROP INDEX IF EXISTS research_country_id_idx;
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_projectpartnerfunction" AS SELECT
ORG_PARTNER_PROJEKTFUNKTION_ID::integer AS id,
ORG_PARTNER_PROJEKTFUNKTION_DE AS name
FROM
"research"."org_partner_projektfunktion"
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_projectpartnerfunction";
""",
),
(
"""
CREATE UNIQUE INDEX research_projectpartnerfunction_id_idx ON "public"."research_projectpartnerfunction" ("id");
""",
"""
DROP INDEX IF EXISTS research_projectpartnerfunction_id_idx;
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_project" AS SELECT
PROJEKT_ID::integer AS id,
ORGEINHEIT_ID::integer AS organization_id,
PROJEKT_TYP_ID::integer AS category_id,
KURZBEZEICHNUNG AS short,
hstore(
ARRAY['de', 'en'],
ARRAY[PROJEKTTITEL_DE, PROJEKTTITEL_EN]
) AS title,
ORG_PARTNER_PROJEKTFUNKTION_ID::integer AS partner_function_id,
PROJEKTLEITER_ID::integer AS manager_id,
KONTAKTPERSON_ID::integer AS contact_id,
PROJEKT_STATUS_ID::integer AS status_id,
PROJEKT_URL AS url,
hstore(
ARRAY['de', 'en'],
ARRAY[ABSTRACT_DE, ABSTRACT_EN]
) AS abstract,
PROJEKTBEGINN_GEPLANT::timestamptz AS begin_planned,
PROJEKTBEGINN_EFFEKTIV::timestamptz AS begin_effective,
PROJEKTENDE_GEPLANT::timestamptz AS end_planned,
PROJEKTENDE_EFFEKTIV::timestamptz AS end_effective,
VERGABE_ART_ID::integer AS grant_id,
FORSCHUNG_ART_ID::integer AS research_id,
VERANSTALTUNG_ART_ID::integer AS event_id,
STUDIE_ART_ID::integer AS study_id,
SPRACHE_ID::integer AS language_id,
STAMMDATEN_UEBERTRAGUNG::timestamptz AS assignment,
FORSCHUNG_PROGRAMM_ID::integer AS program_id,
FORSCHUNG_SUBPROGRAMM AS subprogram
FROM
research.projekt
INNER JOIN
campusonline.personen AS co_p_m
ON projekt.projektleiter_id::integer = co_p_m.pers_nr::integer
INNER JOIN
campusonline.personen AS co_p_c
ON projekt.kontaktperson_id::integer = co_p_c.pers_nr::integer
INNER JOIN
campusonline.organisationen AS co_o
ON projekt.orgeinheit_id::integer = co_o.nr::integer
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_project";
""",
),
(
"""
CREATE UNIQUE INDEX research_project_id_idx ON "public"."research_project" ("id");
""",
"""
DROP INDEX IF EXISTS research_project_id_idx;
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_project_funder" AS SELECT
PROJEKT_ID::integer AS project_id,
GELDGEBER_ID::integer AS funder_id,
CASE lower(HAUPTGELDGEBER_JA_NEIN) WHEN 'ja' THEN TRUE ELSE FALSE END AS primary
FROM
"research"."projekt_geldgeber"
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_project_funder";
""",
),
(
"""
CREATE UNIQUE INDEX research_project_funder_idx ON "public"."research_project_funder" ("project_id", "funder_id");
""",
"""
DROP INDEX IF EXISTS research_project_funder_idx;
""",
),
(
"""
CREATE INDEX research_project_funder_project_id_idx ON "public"."research_project_funder" ("project_id");
""",
"""
DROP INDEX IF EXISTS research_project_funder_project_id_idx;
""",
),
(
"""
CREATE INDEX research_project_funder_funder_id_idx ON "public"."research_project_funder" ("funder_id");
""",
"""
DROP INDEX IF EXISTS research_project_funder_funder_id_idx;
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_language" AS SELECT
SPRACHE_ID::integer AS id,
SPRACHE_DE AS name,
SPRACHE_EN_KURZ AS iso
FROM
"research"."sprache";
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_language";
""",
),
(
"""
CREATE UNIQUE INDEX research_language_id_idx ON "public"."research_language" ("id");
""",
"""
DROP INDEX IF EXISTS research_language_id_idx;
""",
),
(
"""
CREATE INDEX research_language_iso_idx ON "public"."research_language" ("iso");
""",
"""
DROP INDEX IF EXISTS research_language_iso_idx;
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_projectstudy" AS SELECT
STUDIE_ART_ID::integer AS id,
STUDIE_ART_DE AS name
FROM
"research"."studie_art";
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_projectstudy";
""",
),
(
"""
CREATE UNIQUE INDEX research_projectstudy_id_idx ON "public"."research_projectstudy" ("id");
""",
"""
DROP INDEX IF EXISTS research_projectstudy_id_idx;
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_projectevent" AS SELECT
VERANSTALTUNG_ART_ID::integer AS id,
VERANSTALTUNG_ART_DE AS name
FROM
"research"."veranstaltung_art";
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_projectevent";
""",
),
(
"""
CREATE UNIQUE INDEX research_projectevent_id_idx ON "public"."research_projectevent" ("id");
""",
"""
DROP INDEX IF EXISTS research_projectevent_id_idx;
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_projectgrant" AS SELECT
VERGABE_ART_ID::integer AS id,
VERGABE_ART_DE AS name
FROM
"research"."vergabe_art"
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_projectgrant";
""",
),
(
"""
CREATE UNIQUE INDEX research_projectgrant_id_idx ON "public"."research_projectgrant" ("id");
""",
"""
DROP INDEX IF EXISTS research_projectgrant_id_idx;
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_publication_person" AS SELECT DISTINCT
person_publikation.publikation_id::integer AS publication_id,
person_publikation.medonline_person_id::integer AS person_id
FROM
research.person_publikation
INNER JOIN
research.publikation r_p
ON person_publikation.publikation_id::integer = r_p.publikation_id::integer
INNER JOIN
campusonline.personen co_p
ON person_publikation.medonline_person_id::integer = co_p.pers_nr::integer
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_publication_person";
""",
),
(
"""
CREATE UNIQUE INDEX research_publication_person_idx ON "public"."research_publication_person" ("publication_id", "person_id");
""",
"""
DROP INDEX IF EXISTS research_publication_person_idx;
""",
),
(
"""
CREATE INDEX research_publication_person_publication_id_idx ON "public"."research_publication_person" ("publication_id");
""",
"""
DROP INDEX IF EXISTS research_publication_person_publication_id_idx;
""",
),
(
"""
CREATE INDEX research_publication_person_person_id_idx ON "public"."research_publication_person" ("person_id");
""",
"""
DROP INDEX IF EXISTS research_publication_person_person_id_idx;
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_publicationorganization" AS SELECT DISTINCT
FORMAT(
'%s-%s',
r_op.PUBLIKATION_ID::integer,
r_op.MEDONLINE_ID::integer
) AS id,
r_op.PUBLIKATION_ID::integer AS publication_id,
r_op.MEDONLINE_ID::integer AS organization_id,
r_op.PUBLIKATION_AUTORENSCHAFT_ID:: integer AS authorship_id,
r_op.ZUORDNUNGSDATUM::timestamptz AS assigned
FROM
research.orgeinheit_publikation r_op
INNER JOIN
research.publikation r_p
ON r_op.PUBLIKATION_ID::integer = r_p.PUBLIKATION_ID::integer
INNER JOIN
campusonline.organisationen co_o
ON r_op.MEDONLINE_ID::integer = co_o.NR::integer
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_publicationorganization";
""",
),
(
"""
CREATE UNIQUE INDEX research_publicationorganization_idx ON "public"."research_publicationorganization" ("publication_id", "organization_id");
""",
"""
DROP INDEX IF EXISTS research_publicationorganization_idx;
""",
),
(
"""
CREATE INDEX research_publicationorganization_publication_id_idx ON "public"."research_publicationorganization" ("publication_id");
""",
"""
DROP INDEX IF EXISTS research_publicationorganization_publication_id_idx;
""",
),
(
"""
CREATE INDEX research_publicationorganization_organization_id_idx ON "public"."research_publicationorganization" ("organization_id");
""",
"""
DROP INDEX IF EXISTS research_publicationorganization_organization_id_idx;
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_publicationcategory" AS SELECT
PUBLIKATION_TYP_ID::integer AS id,
hstore(
ARRAY['de', 'en'],
ARRAY[PUBLIKATION_TYP_DE, PUBLIKATION_TYP_EN]
) AS name
FROM
"research"."publikation_typ";
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_publicationcategory";
""",
),
(
"""
CREATE UNIQUE INDEX research_publicationcategory_id_idx ON "public"."research_publicationcategory" ("id");
""",
"""
DROP INDEX IF EXISTS research_publicationcategory_id_idx;
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_publicationdocument" AS SELECT
PUBLIKATION_DOKUMENTTYP_ID::integer AS id,
hstore(
ARRAY['de', 'en'],
ARRAY[PUBLIKATION_DOKUMENTTYP_DE, PUBLIKATION_DOKUMENTTYP_EN]
) AS name
FROM
"research"."publikation_dokumenttyp";
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_publicationdocument";
""",
),
(
"""
CREATE UNIQUE INDEX research_publicationdocument_id_idx ON "public"."research_publicationdocument" ("id");
""",
"""
DROP INDEX IF EXISTS research_publicationdocument_id_idx;
""",
),
(
"""
CREATE MATERIALIZED VIEW "public"."research_publication" AS SELECT DISTINCT
PUBLIKATION_ID::integer AS id,
TITEL AS title,
regexp_split_to_array(trim(both ' ' from AUTOR), ';\s*') AS authors,
JAHR::integer AS year,
QUELLE AS source,
PUBLIKATION_TYP_ID::integer AS category_id,
PUBLIKATION_DOKUMENTTYP_ID::integer AS document_id,
SCI_ID AS sci,
PUBMED_ID AS pubmed,
DOI AS doi,
PMC_ID AS pmc,
ABSTRACT AS abstract_bytes
FROM
"research"."publikation"
""",
"""
DROP MATERIALIZED VIEW IF EXISTS "public"."research_publication";
""",
),
(
"""
CREATE UNIQUE INDEX research_publication_id_idx ON "public"."research_publication" ("id");
""",
"""
DROP INDEX IF EXISTS research_publication_id_idx;
""",
),
(
"""
CREATE INDEX research_publication_year_idx ON "public"."research_publication" ("year");
""",
"""
DROP INDEX IF EXISTS research_publication_year_idx;
""",
),
(
"""
CREATE INDEX research_publication_category_id_idx ON "public"."research_publication" ("category_id");
""",
"""
DROP INDEX IF EXISTS research_publication_category_id_idx;
""",
),
(
"""
CREATE INDEX research_publication_document_id_idx ON "public"."research_publication" ("document_id");
""",
"""
DROP INDEX IF EXISTS research_publication_document_id_idx;
""",
),
]
operations = [
migrations.RunSQL(
[forward for forward, reverse in ops],
[reverse for forward, reverse in reversed(ops)],
)
]
|
StarcoderdataPython
|
9725520
|
# OpenMC z-mesh
#
# Some code to slice the Serpent assemblies
from copy import deepcopy
import openmc
import openmc.mgxs as mgxs
from .meshes import MeshGroup
from . import cuts
def build_tallies(lat_id, geometry, export_file="tallies.xml"):
"""Create the 'tallies.xml' file
Parameters
----------
lat_id: int; the id of the lattice in the geometry
(c.ROOT_LATTICE should exist)
geometry: openmc.Geometry; the model's geometry. May be created through the API
(probably what's done here), or loaded through a statepoint or summary.
export_file: str; path to the file to export the Tallies() to.
[Default: "tallies.xml"]
"""
# Extract the geometry from an existing summary
# This is the main lattice. I will have to do something different for a lone element.
lattice = geometry.get_all_lattices()[lat_id]
nx = lattice.shape[0]
ny = lattice.shape[1]
tallies_xml = openmc.Tallies()
# Exploit the pwr.mesh module
low_left = [None]*3
low_left[0:2] = deepcopy(lattice.lower_left)[0:2]
low_left[2] = cuts.Z0
axial_mesh = MeshGroup(lattice.pitch, nx, ny, low_left)
n = len(cuts.dzs)
for i in range(n):
dz = cuts.dzs[i]
nz = cuts.n_cuts[i]
axial_mesh.add_mesh(nz=nz, dz=dz)
# Build the mesh library
mesh_lib = mgxs.Library(geometry)
mesh_lib.domain_type = "mesh"
mesh_lib.domains = axial_mesh.meshes
mesh_lib.build_library()
mesh_lib.add_to_tallies_file(tallies_xml)
# Wrap it up in a nice XML file
tallies_xml.extend(axial_mesh.tallies)
tallies_xml.export_to_xml(export_file)
|
StarcoderdataPython
|
392696
|
<filename>src/python/grapl-common/grapl_common/env_helpers.py
from __future__ import annotations
import logging
import os
from typing import TYPE_CHECKING, Any, Callable, NamedTuple, Optional, TypeVar
from botocore.client import Config
from typing_extensions import Protocol
if TYPE_CHECKING:
from mypy_boto3_cloudwatch import CloudWatchClient
from mypy_boto3_dynamodb import DynamoDBClient, DynamoDBServiceResource
from mypy_boto3_ec2 import EC2ServiceResource
from mypy_boto3_route53 import Route53Client
from mypy_boto3_s3 import S3Client, S3ServiceResource
from mypy_boto3_secretsmanager import SecretsManagerClient
from mypy_boto3_sns import SNSClient
from mypy_boto3_sqs import SQSClient
from mypy_boto3_ssm import SSMClient
T = TypeVar("T", covariant=True)
class FromEnv(Protocol[T]):
def from_env(self, config: Optional[Config] = None) -> T:
pass
class FromEnvException(Exception):
pass
ClientGetParams = NamedTuple(
"ClientGetParams",
(("boto3_client_name", str),), # e.g. "s3" or "sqs"
)
def _client_get(
client_create_fn: Callable[..., Any],
params: ClientGetParams,
config: Optional[Config] = None,
) -> Any:
"""
:param client_create_fn: the `boto3.client` or `boto3.resource` function
"""
which_service = params.boto3_client_name
endpoint_url = os.getenv("GRAPL_AWS_ENDPOINT")
access_key_id = os.getenv("GRAPL_AWS_ACCESS_KEY_ID")
access_key_secret = os.getenv("GRAPL_AWS_ACCESS_KEY_SECRET")
access_session_token = os.getenv("GRAPL_AWS_ACCESS_SESSION_TOKEN")
# determine the aws region
if config is not None and config.region_name is not None:
# prefer config's region if set
region = config.region_name
else:
region = os.getenv("AWS_DEFAULT_REGION") or os.getenv("AWS_REGION")
if not region:
raise FromEnvException(
"Please set AWS_REGION, AWS_DEFAULT_REGION, or config.region_name"
)
if all((endpoint_url, access_key_id, access_key_secret)):
# Local, all are passed in from docker-compose.yml
logging.info(f"Creating a local client for {which_service}")
return client_create_fn(
params.boto3_client_name,
endpoint_url=endpoint_url,
aws_access_key_id=access_key_id,
aws_secret_access_key=access_key_secret,
aws_session_token=access_session_token,
region_name=region,
config=config,
)
elif endpoint_url and not any((access_key_id, access_key_secret)):
# Local or AWS doing cross-region stuff
return client_create_fn(
params.boto3_client_name,
endpoint_url=endpoint_url,
region_name=region,
config=config,
)
elif not any((endpoint_url, access_key_id, access_key_secret)):
# AWS
logging.info(f"Creating a prod client for {which_service}")
return client_create_fn(
params.boto3_client_name,
region_name=region,
config=config,
)
else:
raise FromEnvException(
f"You specified access key but not endpoint for {params.boto3_client_name}?"
)
_SQSParams = ClientGetParams(
boto3_client_name="sqs",
)
class SQSClientFactory(FromEnv["SQSClient"]):
def __init__(self, boto3_module: Any):
self.client_create_fn = boto3_module.client
def from_env(self, config: Optional[Config] = None) -> SQSClient:
client: SQSClient = _client_get(
self.client_create_fn, _SQSParams, config=config
)
return client
_SNSParams = ClientGetParams(
boto3_client_name="sns",
)
class SNSClientFactory(FromEnv["SNSClient"]):
def __init__(self, boto3_module: Any):
self.client_create_fn = boto3_module.client
def from_env(self, config: Optional[Config] = None) -> SNSClient:
client: SNSClient = _client_get(
self.client_create_fn, _SNSParams, config=config
)
return client
_EC2Params = ClientGetParams(
boto3_client_name="ec2",
)
class EC2ResourceFactory(FromEnv["EC2ServiceResource"]):
def __init__(self, boto3_module: Any):
self.client_create_fn = boto3_module.resource
def from_env(self, config: Optional[Config] = None) -> EC2ServiceResource:
client: EC2ServiceResource = _client_get(
self.client_create_fn, _EC2Params, config=config
)
return client
_SSMParams = ClientGetParams(
boto3_client_name="ssm",
)
class SSMClientFactory(FromEnv["SSMClient"]):
def __init__(self, boto3_module: Any):
self.client_create_fn = boto3_module.client
def from_env(self, config: Optional[Config] = None) -> SSMClient:
client: SSMClient = _client_get(
self.client_create_fn, _SSMParams, config=config
)
return client
_CloudWatchParams = ClientGetParams(
boto3_client_name="cloudwatch",
)
class CloudWatchClientFactory(FromEnv["CloudWatchClient"]):
def __init__(self, boto3_module: Any):
self.client_create_fn = boto3_module.client
def from_env(self, config: Optional[Config] = None) -> CloudWatchClient:
client: CloudWatchClient = _client_get(
self.client_create_fn, _CloudWatchParams, config=config
)
return client
_Route53Params = ClientGetParams(
boto3_client_name="route53",
)
class Route53ClientFactory(FromEnv["Route53Client"]):
def __init__(self, boto3_module: Any):
self.client_create_fn = boto3_module.client
def from_env(self, config: Optional[Config] = None) -> Route53Client:
client: Route53Client = _client_get(
self.client_create_fn, _Route53Params, config=config
)
return client
_S3Params = ClientGetParams(
boto3_client_name="s3",
)
class S3ClientFactory(FromEnv["S3Client"]):
def __init__(self, boto3_module: Any):
self.client_create_fn = boto3_module.client
def from_env(self, config: Optional[Config] = None) -> S3Client:
client: S3Client = _client_get(self.client_create_fn, _S3Params, config=config)
return client
class S3ResourceFactory(FromEnv["S3ServiceResource"]):
def __init__(self, boto3_module: Any):
self.client_create_fn = boto3_module.resource
def from_env(self, config: Optional[Config] = None) -> S3ServiceResource:
client: S3ServiceResource = _client_get(
self.client_create_fn, _S3Params, config=config
)
return client
_DynamoDBParams = ClientGetParams(
boto3_client_name="dynamodb",
)
class DynamoDBResourceFactory(FromEnv["DynamoDBServiceResource"]):
def __init__(self, boto3_module: Any):
self.client_create_fn = boto3_module.resource
def from_env(self, config: Optional[Config] = None) -> DynamoDBServiceResource:
client: DynamoDBServiceResource = _client_get(
self.client_create_fn, _DynamoDBParams, config=config
)
return client
class DynamoDBClientFactory(FromEnv["DynamoDBClient"]):
def __init__(self, boto3_module: Any):
self.client_create_fn = boto3_module.client
def from_env(self, config: Optional[Config] = None) -> DynamoDBClient:
client: DynamoDBClient = _client_get(
self.client_create_fn, _DynamoDBParams, config=config
)
return client
_SecretsManagerParams = ClientGetParams(
boto3_client_name="secretsmanager",
)
class SecretsManagerClientFactory(FromEnv["SecretsManagerClient"]):
def __init__(self, boto3_module: Any):
self.client_create_fn = boto3_module.client
def from_env(self, config: Optional[Config] = None) -> SecretsManagerClient:
client: SecretsManagerClient = _client_get(
self.client_create_fn, _SecretsManagerParams, config=config
)
return client
|
StarcoderdataPython
|
3466667
|
<reponame>hizardapp/Hizard<filename>hyrodactil/openings/models.py
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_countries import CountryField
from model_utils import Choices
from model_utils.models import TimeStampedModel
from companies.models import Company
from companysettings.models import InterviewStage
class Opening(TimeStampedModel):
EMPLOYMENT_TYPES = Choices(
('part_time', _('Part Time')),
('full_time', _('Full Time')),
('internship', _('Internship')),
)
title = models.CharField(max_length=770)
description = models.TextField()
is_private = models.BooleanField(default=False)
department = models.CharField(_("Department"), max_length=128, blank=True)
country = CountryField(_("Country"), blank=True)
city = models.CharField(_("City"), max_length=128, blank=True)
published_date = models.DateTimeField(blank=True, null=True)
employment_type = models.CharField(
choices=EMPLOYMENT_TYPES,
default=EMPLOYMENT_TYPES.full_time,
max_length=20
)
company = models.ForeignKey(Company)
def stage_counts(self):
for stage in InterviewStage.objects.filter(company=self.company):
yield self.application_set.filter(current_stage=stage).count()
def get_apply_url(self):
company_prefix = (
settings.COMPANY_URL_PREFIX % self.company.subdomain
)
return "%s%s" % (
company_prefix, reverse('public:apply', args=(self.id,))
)
def get_location_string(self):
location = ''
if self.city:
location += '%s' % self.city
if self.country:
location += ', '
if self.country:
location += '%s' % unicode(self.country.name)
return location
def get_status(self):
if self.is_private:
return _('Private')
if self.published_date:
return _('Published')
return _('Created')
def __str__(self):
return '%s' % self.title
class OpeningQuestion(TimeStampedModel):
title = models.CharField(max_length=770)
opening = models.ForeignKey(Opening, related_name='questions')
position = models.IntegerField()
class Meta:
ordering = ['position']
def __str__(self):
return '%s' % self.title
|
StarcoderdataPython
|
8168143
|
<reponame>corner4world/cubeai<gh_stars>0
from app.global_data.global_data import g
from app.domain.artifact import Artifact
def create_artifact(artifact):
sql = '''
INSERT INTO artifact (
solution_uuid,
name,
jhi_type,
url,
file_size,
created_date,
modified_date
) VALUES ("{}", "{}", "{}", "{}", "{}", "{}", "{}")
'''.format(
artifact.solutionUuid,
artifact.name,
artifact.type,
artifact.url,
artifact.fileSize,
artifact.createdDate,
artifact.modifiedDate,
)
conn = g.db.pool.connection()
with conn.cursor() as cursor:
cursor.execute(sql)
conn.commit()
cursor.execute('SELECT last_insert_id() FROM artifact limit 1')
id = cursor.fetchone()[0]
conn.close()
return id
def get_artifacts(where):
sql = 'SELECT * FROM artifact {}'.format(where)
conn = g.db.pool.connection()
with conn.cursor() as cursor:
cursor.execute(sql)
records = cursor.fetchall()
conn.close()
artifact_list = []
for record in records:
artifact = Artifact()
artifact.from_record(record)
artifact_list.append(artifact.__dict__)
return artifact_list
def delete_artifact(id):
sql = 'DELETE FROM artifact WHERE id = "{}"'.format(id)
conn = g.db.pool.connection()
with conn.cursor() as cursor:
cursor.execute(sql)
conn.commit()
conn.close()
|
StarcoderdataPython
|
6676004
|
# Given an array of integers, return a new array such that each element at index i of the new array is the product of all the numbers in the original array except the one at i.
#
# For example, if our input was [1, 2, 3, 4, 5], the expected output would be [120, 60, 40, 30, 24]. If our input was [3, 2, 1], the expected output would be [2, 3, 6].
#
# Follow-up: what if you can't use division?
def prod(inp):
# Because neither side can be included!
if len(inp) < 3:
return inp
s = False
res = []
for i in inp:
if not s:
s = i
else:
s *= i
for i in inp:
res.append(s / i)
return res
# Now let's do it without division. One way to look at
# at the problem is that we are tracking partial products
# for all elements of the array. The result array will
# always be as long as the input array
def prod_without_division(inp):
res = {}
for i in range(len(inp)):
if not i in res:
res[i] = 1
for j in range(len(inp)):
if not i == j:
res[i] *= inp[j]
return list(res.values())
# Hrrm, can we do it in near linear time, i mean N*N is not
# really nice
def prod_fast(inp):
res = []
left = []
right = []
lt = 1
rt = 1
rindex = len(inp) - 1
for i in range(len(inp)):
lt *= inp[i]
left.append(lt)
rt *= inp[rindex]
right.insert(0,rt)
rindex -= 1
res.append(right[1])
i = 1
while i < len(inp) - 1:
# notice we skip 1 on the right side!
res.append(left[i - 1] * right[i + 1])
i += 1
res.append(left[i - 1])
return res
print(prod([1, 2, 3, 4, 5]) == [120, 60, 40, 30, 24])
print(prod([3, 2, 1]) == [2, 3, 6])
print(prod_without_division([1, 2, 3, 4, 5]) == [120, 60, 40, 30, 24])
print(prod_without_division([3, 2, 1]) == [2, 3, 6])
print(prod_fast([1, 2, 3, 4, 5]) == [120, 60, 40, 30, 24])
print(prod_fast([3, 2, 1]) == [2, 3, 6])
|
StarcoderdataPython
|
3311981
|
<filename>tools/videoinfer_opticalflow.py<gh_stars>10-100
import cv2, torch, argparse
from time import time
import numpy as np
from torch.nn import functional as F
from models import UNet
from models import DeepLabV3Plus
from utils import utils
from utils.postprocess import postprocess, threshold_mask
def parse_args():
parser = argparse.ArgumentParser(description="Arguments for the script")
parser.add_argument('--use_cuda', action='store_true', default=False, help='Use GPU acceleration')
parser.add_argument('--bg', type=str, default=None, help='Path to the background image file')
parser.add_argument('--watch', action='store_true', default=False, help='Indicate show result live')
parser.add_argument('--input_sz', type=int, default=320, help='Input size')
parser.add_argument('--model', type=str, default='unet', help='model name')
parser.add_argument('--net', type=str, default='resnet18', help='Path to the background image file')
parser.add_argument('--checkpoint', type=str, default="", help='Path to the trained model file')
parser.add_argument('--video', type=str, default="", help='Path to the input video')
parser.add_argument('--output', type=str, default="", help='Path to the output video')
return parser.parse_args()
def video_infer(args):
cap = cv2.VideoCapture(args.video)
_, frame = cap.read()
H, W = frame.shape[:2]
fps = cap.get(cv2.CAP_PROP_FPS)
out = cv2.VideoWriter(args.output, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), fps, (W, H))
# Background
if args.bg is not None:
BACKGROUND = cv2.imread(args.bg)[...,::-1]
BACKGROUND = cv2.resize(BACKGROUND, (W,H), interpolation=cv2.INTER_LINEAR)
KERNEL_SZ = 25
SIGMA = 0
# Alpha transperency
else:
COLOR1 = [255, 0, 0]
COLOR2 = [0, 0, 255]
if args.model=='unet':
model = UNet(backbone=args.net, num_classes=2, pretrained_backbone=None)
elif args.model=='deeplabv3_plus':
model = DeepLabV3Plus(backbone=args.net, num_classes=2,pretrained_backbone=None)
if args.use_cuda:
model = model.cuda()
trained_dict = torch.load(args.checkpoint, map_location="cpu")['state_dict']
model.load_state_dict(trained_dict, strict=False)
model.eval()
if W > H:
w_new = int(args.input_sz)
h_new = int(H * w_new / W)
else:
h_new = int(args.input_sz)
w_new = int(W * h_new / H)
disflow = cv2.DISOpticalFlow_create(cv2.DISOPTICAL_FLOW_PRESET_ULTRAFAST)
prev_gray = np.zeros((h_new, w_new), np.uint8)
prev_cfd = np.zeros((h_new, w_new), np.float32)
is_init = True
while(cap.isOpened()):
start_time = time()
ret, frame = cap.read()
if ret:
image = frame[...,::-1]
h, w = image.shape[:2]
read_cam_time = time()
# Predict mask
X, pad_up, pad_left, h_new, w_new = utils.preprocessing(image, expected_size=args.input_sz, pad_value=0)
preproc_time = time()
with torch.no_grad():
if args.use_cuda:
mask = model(X.cuda())
mask = mask[..., pad_up: pad_up+h_new, pad_left: pad_left+w_new]
#mask = F.interpolate(mask, size=(h,w), mode='bilinear', align_corners=True)
mask = F.softmax(mask, dim=1)
mask = mask[0,1,...].cpu().numpy() #(213, 320)
else:
mask = model(X)
mask = mask[..., pad_up: pad_up+h_new, pad_left: pad_left+w_new]
#mask = F.interpolate(mask, size=(h,w), mode='bilinear', align_corners=True)
mask = F.softmax(mask, dim=1)
mask = mask[0,1,...].numpy()
predict_time = time()
# optical tracking
cur_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
cur_gray = cv2.resize(cur_gray, (w_new, h_new))
scoremap = 255 * mask
optflow_map = postprocess(cur_gray, scoremap, prev_gray, prev_cfd, disflow, is_init)
optical_flow_track_time = time()
prev_gray = cur_gray.copy()
prev_cfd = optflow_map.copy()
is_init = False
optflow_map = cv2.GaussianBlur(optflow_map, (3, 3), 0)
optflow_map = threshold_mask(optflow_map, thresh_bg=0.2, thresh_fg=0.8)
img_matting = np.repeat(optflow_map[:, :, np.newaxis], 3, axis=2)
bg_im = np.ones_like(img_matting) * 255
re_image = cv2.resize(image, (w_new, h_new))
comb = (img_matting * re_image + (1 - img_matting) * bg_im).astype(np.uint8)
comb = cv2.resize(comb, (W, H))
comb = comb[...,::-1]
# Print runtime
read = read_cam_time-start_time
preproc = preproc_time-read_cam_time
pred = predict_time-preproc_time
optical = optical_flow_track_time-predict_time
total = read + preproc + pred + optical
print("read: %.3f [s]; preproc: %.3f [s]; pred: %.3f [s]; optical: %.3f [s]; total: %.3f [s]; fps: %.2f [Hz]" %
(read, preproc, pred, optical, total, 1/pred))
out.write(comb)
if args.watch:
cv2.imshow('webcam', comb[..., ::-1])
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
if __name__ == '__main__':
args = parse_args()
video_infer(args)
|
StarcoderdataPython
|
5014070
|
import os
import subprocess
from contextlib import contextmanager
from os.path import isdir, isfile
@contextmanager
def inside_dir(path):
"""
Execute code from inside the given directory
:param path: String, path of the directory the command is being run.
"""
if not isinstance(path, str):
path = str(path)
old_path = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(old_path)
def test_project_tree(cookies):
result = cookies.bake(
extra_context={
'project_name': 'test-project',
'project_slug': 'test_project',
}
)
assert result.exit_code == 0
assert result.exception is None
assert result.project.basename == 'test-project'
files = [
'.gitignore',
'.pylintrc',
'CHANGELOG.md',
'LICENSE',
'README.md',
'setup.cfg',
'setup.py',
]
dirs = [
'src',
'src/test_project',
'tests',
]
with inside_dir(result.project):
for path in files:
assert isfile(path)
for path in dirs:
assert isdir(path)
def test_run_flake8(cookies):
result = cookies.bake(extra_context={'project_slug': 'flake8_compat'})
with inside_dir(result.project):
subprocess.check_call(['flake8'])
def test_run_pytest(cookies):
result = cookies.bake(extra_context={'project_name': 'run-pytest'})
with inside_dir(result.project):
subprocess.check_call(['git', 'init'])
subprocess.check_call(['python', '-m', 'venv', '.env'])
subprocess.check_call(
'. .env/bin/activate && pip install .[dev]',
shell=True,
)
subprocess.check_call('. .env/bin/activate && pytest', shell=True)
|
StarcoderdataPython
|
3225246
|
<reponame>saisankargochhayat/doot
from tornado import websocket, web, ioloop
import os
path=os.getcwd()
path=path.strip('Lettertrainer') + 'ML'
import sys
sys.path.append(path)
import tornado.escape
from tornado import gen
import tornado.httpserver
import tornado.options
from sklearn.neighbors import KNeighborsClassifier
from sklearn import tree
from sklearn.linear_model import SGDClassifier
from sklearn import svm
import collections
# from data_loader import data_loader
import json
import pprint
import pandas
from sklearn import svm
import numpy as np
from tornado.escape import json_decode
from tornado.escape import json_encode
from feature_extracter_live import *
from sklearn import preprocessing
from helper import svm,knn,dtree,sgd,lda,qda
from textblob import TextBlob
# define("port", default=8080, help="run on the given port", type=int)
data = []
labels = []
dataFrame = pandas.read_csv('../CSV_Data/server_dataset.csv')
svm_model , svm_scaler = svm.get_model(dataFrame)
knn_model , knn_scaler = knn.get_model(dataFrame)
sgd_model , sgd_scaler = sgd.get_model(dataFrame)
dtree_model , dtree_scaler = dtree.get_model(dataFrame)
lda_model , lda_scaler = lda.get_model(dataFrame)
qda_model , qda_scaler = qda.get_model(dataFrame)
print("Trained")
class HomeHandler(web.RequestHandler):
def get(self):
self.render("static/index.html")
class Words(web.RequestHandler):
def get(self):
self.render("static/words.html")
class Letter(web.RequestHandler):
def get(self):
self.render("static/letter.html")
class Visualizer1(web.RequestHandler):
def get(self):
self.render("static/visualizer1.html")
class Predictor(web.RequestHandler):
def get(self):
self.render("static/predictor.html")
class Visualizer(web.RequestHandler):
def get(self):
self.render("static/visualizer.html")
class Predict(websocket.WebSocketHandler):
def check_origin(self, origin):
return True
def open(self):
print("WebSocket opened")
def on_message(self, message):
msg = json.loads(message)
test=extract_array(msg)
test = np.array(test)
test = test.reshape(1,-1)
predictions = {}
vote = {}
predictions['svm'] = str(svm_model.predict(svm_scaler.transform(test))[0])
if predictions['svm'] in vote:
vote[predictions['svm']] = vote[predictions['svm']]+1
else:
vote[predictions['svm']] = 1
predictions['knn'] = str(knn_model.predict(knn_scaler.transform(test))[0])
if predictions['knn'] in vote:
vote[predictions['knn']] = vote[predictions['knn']]+1
else:
vote[predictions['knn']] = 1
predictions['lda'] = str(lda_model.predict(lda_scaler.transform(test))[0])
if predictions['lda'] in vote:
vote[predictions['lda']] = vote[predictions['lda']]+1
else:
vote[predictions['lda']] = 1
predictions['qda'] = str(qda_model.predict(qda_scaler.transform(test))[0])
if predictions['qda'] in vote:
vote[predictions['qda']] = vote[predictions['qda']]+1
else:
vote[predictions['qda']] = 1
predictions['sgd'] = str(sgd_model.predict(sgd_scaler.transform(test))[0])
if predictions['sgd'] in vote:
vote[predictions['sgd']] = vote[predictions['sgd']]+1
else:
vote[predictions['sgd']] = 1
predictions['dtree'] = str(dtree_model.predict(dtree_scaler.transform(test))[0])
if predictions['dtree'] in vote:
vote[predictions['dtree']] = vote[predictions['dtree']]+1
else:
vote[predictions['dtree']] = 1
count = collections.Counter(vote)
predictions['max_vote'] = count.most_common(1)[0][0]
letter = predictions['max_vote']
self.write_message(letter)
def on_close(self):
print("WebSocket closed")
# class normal_user(websocket.WebSocketHandler):
# def check_origin(self, origin):
# return True
app = web.Application([
(r'/assets/(.*)', web.StaticFileHandler, {'path': 'static/assets/'}),
(r'/static/(.*)', web.StaticFileHandler, {'path': 'static/'}),
(r"/",HomeHandler),
(r"/predictor",Predictor),
(r"/visualizer",Visualizer),
(r"/visualizer1",Visualizer1),
(r"/words",Words),
(r"/letter",Letter),
(r"/ws",Predict)
])
if __name__ == '__main__':
app.listen(3000)
print("Listening at 127.0.0.1:3000")
ioloop.IOLoop.instance().start()
|
StarcoderdataPython
|
161858
|
#!/usr/bin/env python
# Gemini Flat light controller (National Control Devices Pulsar series)
# RLM + DL 19 Jan 2016
import socket
import struct
import binascii
import sys
def dimmer(Intensity):
# TCP port of light dimmer
TCP_IP = '192.168.1.22'
TCP_PORT = 2101
BUFFER_SIZE = 1024
# Get desired intensity level from command line or ask user
#if len(sys.argv) == 2:
# Intensity = int(sys.argv[1])
#else:
# Intensity = int(raw_input('Enter intensity level (0-254): '))
if not 0 <= Intensity < 255:
sys.exit('Intensity (%i) must be 0->254, exiting' % ans)
# Open socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((TCP_IP, TCP_PORT))
# Create 4 byte array
my_bytes = bytearray()
my_bytes.append(253); my_bytes.append(0)
my_bytes.append(Intensity); my_bytes.append(1)
try:
s.send(my_bytes)
except:
print('Test')
finally:
data = s.recv(BUFFER_SIZE)
if data != 'U': print('Failed')
s.close()
|
StarcoderdataPython
|
162343
|
<reponame>shannonrstewart/FLARE<filename>refactoredforgithub.py<gh_stars>0
import pandas as pd
import numpy as np
from pandas import DataFrame
import statistics, dedupe, json, os, csv, re, unidecode, urllib.parse, requests
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
import imblearn
from imblearn import under_sampling
from collections import Counter
from sklearn.manifold import TSNE
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
plt.style.use('ggplot')
# readData comes from dedupe tutorial
def readData(filename):
"""
Read in data from a CSV file and create a dictionary of records,
where the key is a unique record ID.
"""
data_d = {}
with open(filename) as f:
reader = csv.DictReader(f)
for i, row in enumerate(reader):
clean_row = dict([(k, preProcess(v)) for (k, v) in row.items()])
data_d[i] = dict(clean_row)
return data_d
def preProcess(column):
"""
Do a little bit of data cleaning with the help of Unidecode and Regex.
Things like casing, extra spaces, quotes and new lines can be ignored.
"""
column = unidecode.unidecode(column)
column = re.sub('\n', ' ', column)
column = re.sub('/', ' ', column)
column = re.sub("'", '', column)
column = re.sub(":", ' ', column)
column = re.sub(' +', ' ', column)
column = column.strip().strip('"').strip("'").upper().strip()
if not column:
column = None
return column
# clusterMerge adapted from dedupe tutorial
def clusterMerge(datasource, key, values):
clusters = datasource[datasource[values].notnull()]
to_cluster = {}
for i,row in clusters.iterrows():
dictKey = row[key]
dictValues = row[values]
if dictKey not in to_cluster:
to_cluster[dictKey] = [dictValues]
else:
to_cluster[dictKey].append(dictValues)
return to_cluster
def load_data(path_to_spreadsheet, columns, label, rename_columns=None, column_edits=None):
'''
This function loads in the data that needs to be processed.
path_to_spreadsheet: string pointing to file
columns: list of columns to retain
label: label high or unknown risk (1 for high risk, 0 for unknown)
rename_columns: dictionary in form of {'current name': 'new name'}
column_edits: list of lists. [[column, find value, replace value]]
'''
data = pd.read_csv(path_to_spreadsheet)
data_subset = data[columns]
data_subset['labels'] = label
if rename_columns:
data_subset.rename(columns=rename_columns, inplace=True)
if column_edits:
for column_edit in column_edits:
data_subset[column_edit[0]] = data_subset[column_edit[0]].str.replace(column_edit[1], column_edit[2])
return data_subset
# information to import
files_to_import = [
('zaubacorp_garments_Initial_NIC_Set.csv', ['company_name', 'cin', 'address', 'authorised_capital', 'paid_up_capital', 'email'], 0),
('zaubacorp_textile_Initial_NIC_Set.csv', ['company_name', 'cin', 'address', 'authorised_capital', 'paid_up_capital', 'email'], 0),
('all_imports__garments_shipments_results_1_to_10_000_of_25_507.csv', ['Consignee', 'Consignee Full Address'], 0),
('all_imports__garments_shipments_results_1_to_10_000_of_25_507.csv', ['company_name', 'address', 'Shipper Email 1'], 0, {'Shipper Email 1':'email'}),
('Apparel_Industry_ Directly_connected_companies.csv', ['company_name', 'cin', 'address'], 1),
('Apparel_Industry_Companies_connected_to_violators.csv', ['company_name', 'cin', 'address'], 1),
('Apparel_Industry_Companies_connected_to_violators.csv', ['suspected violator'], 1, {'suspected violator':'company_name'}),
('badlist.csv', ['company_name', 'address'], 1),
('textile_Sector_Violators_Directly_sourced_matches.csv', ['company'], 1, {'company':'company_name'}),
('textile_Sector_Violators_Directly_sourced_matches.csv', ['group'], 1, {'group':'company_name'}),
('Marks and Spencers Data Match and Close.csv',['company_name', 'address', 'Total workers', r'% Female', 'Trade Union', 'Workers Commitee'], 0),
('facilities.csv', ['company_name', 'address', 'lat', 'lng','contributors'], 0, {'contributors':'Consignee'},[['Consignee', r'\([^)]*\)', '']])
]
# load the data
companies = pd.concat([load_data(*import_file) for import_file in files_to_import])
#Clean up the strings, put them in upper case (except emails), remove punctuation from names, remove low-info parts of strings
upper_columns = ['company_name', 'address', 'Consignee']
for upper_column in upper_columns:
companies[upper_column] = companies[upper_column].str.upper()
strip_columns = ['company_name']
for strip_column in strip_columns:
companies[strip_column] = companies[strip_column].str.strip()
replace_columns = [
['company_name', '.', ''],
['Consignee', r'\s*(LTD PVT|LIMITED PRIVATE|LTD|PVT|INC|LIMITED|PRIVATE|LTD PRIVATE|LIMITED PVT|PVT LTD|PRIVATE LIMITED|PRIVATE LTD|PVT LIMITED)\s*$',''],
['authorised_capital','₹',''],
['authorised_capital',',',''],
['paid_up_capital','₹',''],
['paid_up_capital',',','']
]
for replace_column in replace_columns:
companies[replace_column[0]] = companies[replace_column[0]].str.replace(replace_column[1], replace_column[2])
# save interim state to file for future reference
companies.to_csv("alldata.csv")
#The settings and training files will be retained once they have been run once, if you want to make changes to the entity model, you must delete them
input_file = "alldata.csv"
output_file = 'canonical_businesses.csv'
settings_file = 'business_dedupe_learned_settings_new'
training_file = 'csv_dedupe_training_new.json'
# the following code is based on the dedupe tutorial
print('importing data ...')
data_d = readData(input_file)
if os.path.exists(settings_file):
print('reading from', settings_file)
with open(settings_file, 'rb') as f:
deduper = dedupe.StaticDedupe(f)
else:
fields = [
{'field': 'company_name', 'type': 'String', 'has missing': True},
{'field': 'address', 'type': 'String', 'has missing': True},
{'field': 'cin', 'type':'String', 'has missing': True},
{'field': 'email', 'type': 'String', 'has missing': True}
]
deduper = dedupe.Dedupe(fields)
if os.path.exists(training_file):
print('reading labeled examples from ', training_file)
with open(training_file, 'rb') as f:
deduper.prepare_training(data_d, f)
else:
deduper.prepare_training(data_d)
#Active labeling
print('starting active labeling...')
dedupe.console_label(deduper)
deduper.train()
with open(training_file, 'w') as tf:
deduper.write_training(tf)
with open(settings_file, 'wb') as sf:
deduper.write_settings(sf)
print('clustering...')
clustered_dupes = deduper.partition(data_d, 0.3)
print('# duplicate sets', len(clustered_dupes))
#write to file with cluster IDs
companies['cluster'] = 0
companies['confidence'] = 0
cluster_membership = {}
for cluster_id, (records, scores) in enumerate(clustered_dupes):
for record_id, score in zip(records, scores):
record_id = int(record_id)
companies['cluster'].iloc[record_id] = cluster_id
companies['confidence'].iloc[record_id] = score
# cluster_membership[record_id] = {
# "Cluster ID": cluster_id,
# "confidence_score": score
# }
# assign a unique id to each company
companies['uid'] = range(len(companies))
# save dataframe with cluster info included
companies.to_csv("clustereddata.csv")
# #companies['labels'] = companies['labels'].fillna(0)
# #Start building clusters
# cluster_to_name = clusterMerge(companies,'cluster','company_name')
# cluster_to_address = clusterMerge(companies, 'cluster', 'address')
# name_to_cluster = clusterMerge(companies,'company_name','cluster')
# address_to_cluster = clusterMerge(companies, 'address', 'cluster')
# I took this out, but I'm not sure about that decision.
# One useful characteristic is how many trade partners an entity has
tradePartners = {}
companies['trade_partner_count'] = 0
for i,row in companies.iterrows():
current_company = row['cluster'] # grab the cluster id to refer to deduped company
if type(row['Consignee']) != float: # sometimes Consignees are floats (messy data)
partners = row['Consignee'].split("|") # partners are divided bypipes
if current_company not in tradePartners: # if we have not seen the comapny before
tradePartners[current_company] = set(partners) # save a set of the partners
else:
tradePartners[current_company].union(partners) # otherwise, add them to the set
# prep a dictionary to store the new information (we will turn this into a dataframe later),
# based on cluster information
refined_data = {}
column_names = list(companies.columns)
for i,row in companies.iterrows():
# if we've seen this cluster, count how many trade partners it has
if row['cluster'] in tradePartners:
row['trade_partner_count'] = len(tradePartners[row['cluster']])
# if we have not encountered the cluster yet transfer the information from the row to the dictionary
if row['cluster'] not in refined_data:
refined_data[row['cluster']] = dict(row)
else:
for column in column_names:
stored_val = refined_data[row['cluster']][column]
if stored_val == "":
refined_data[row['cluster']][column] = row[column]
if column == "labels":
# if the current value is 1 replace stored value
if row[column] == 1:
refined_data[row['cluster']][column] = row[column]
if column == "address":
# grab the longest address
if type(refined_data[row['cluster']][column]) == str and type(row[column]) == str:
if len(row[column]) > len(refined_data[row['cluster']][column]):
refined_data[row['cluster']][column] = row[column]
# Create a data frame out of the refined data
dict_list = [data_dict for data_dict in refined_data.values()]
data_add_geo = pd.DataFrame(dict_list,columns=column_names)
# Save interim data to file
data_add_geo.to_csv("deduped_data.csv")
# Get location information. If nothing has been saved, run google api. Otherwise grab files from address folder
# google maps api
key = 'redacted'
apiURL = "https://maps.googleapis.com/maps/api/geocode/json?address="
have_address = data_add_geo.dropna(subset=['address'])
address_directory = "addressresults"
if not os.path.isdir(address_directory):
os.mkdir(address_directory)
# get lat/lon for addressed where the info is not available yet
for i,row in have_address.iterrows():
address = row['address']
lat = row['lat']
lon = row['lng']
if np.isnan(lat) and np.isnan(lon):
row_id = str(row['uid'])
fname = row_id +".json"
if not os.path.isfile(os.path.join(address_directory,fname)):
# use google maps api to fetch lat/lon
address = urllib.parse.quote(address, safe='')
apiCall = f'{apiURL}{address}&key={key}'
print(f'Calling api for {fname}')
results = requests.get(apiCall)
results_json = results.json()
if "results" in results_json and len(results_json["results"]) > 0:
result = results_json["results"][0]
if "geometry" in result:
lat = result["geometry"]["location"]["lat"]
lon = result["geometry"]["location"]["lng"]
with open(os.path.join(address_directory,fname), 'w', encoding='utf8') as wf:
json.dump(results_json, wf)
else:
# load lat long from file
with open(os.path.join(address_directory,fname), 'r', encoding='utf8') as rf:
results_json = json.load(rf)
if "results" in results_json and len(results_json["results"]) > 0:
result = results_json["results"]
if "geometry" in result:
lat = result["geometry"]["location"]["lat"]
lon = result["geometry"]["location"]["lng"]
# save the info back to the dataframe
data_add_geo.loc[i, "lat"] = lat
data_add_geo.loc[i, "lng"] = lon
#Note: at one point I had implemented a K-means clustering algorithm here to try to capture city-level clusters in lat/lng pairs, but it performed worse than using the two features independently.
training_frame = data_add_geo.dropna(subset=['company_name','address','authorised_capital','paid_up_capital','trade_partner_count','labels','lat','lng'])
# columns to use in training/test, can be adjusted as you please:
training_columns = ["authorised_capital", "paid_up_capital", "trade_partner_count", "lat", "lng"]
#Downsample
training_frame_majority = training_frame[training_frame.labels == 0]
training_frame_minority = training_frame[training_frame.labels == 1]
# training_frame_minority.to_csv("highrisk.csv")
training_frame_majority_downsampled = training_frame_majority.sample(n=100, random_state=42)
# training_frame_majority_downsampled.to_csv("controlgroup.csv")
training_frame_downsampled = pd.concat([training_frame_majority_downsampled, training_frame_minority])
#Create numpy arrays for features and target
plotdata = training_frame_downsampled[training_columns]
X = training_frame_downsampled[training_columns].values
y = training_frame_downsampled['labels'].values
#importing train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.4,random_state=42, stratify=y)
#Rescaling the features
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
#Import the classifier
#I have tested KNN, Random Forest, and sklearn's Multi-Layer Perceptron, and in general I get better results with the RF.
classifier = KNeighborsClassifier(n_neighbors=1, weights = 'distance')
classifier.fit(X_train, y_train)
#pickle the model
import pickle
pickle.dump((classifier, scaler), open('rfendstate.p','wb'))
#Make the prediction
y_pred = classifier.predict(X_test)
#Plot the results using t_SNE
np.random.seed(42)
rndperm = np.random.permutation(plotdata.shape[0])
N = 80
df_merge = pd.DataFrame(data=X_train, columns=training_columns)
df_merge = df_merge[:N]
'''
'''
# tsne = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=250)
# tsne_results = tsne.fit_transform(df_merge.values)
# df_merge["label"] = y_train[:N]
# df_merge['tsne-2d-one'] = tsne_results[:,0]
# df_merge['tsne-2d-two'] = tsne_results[:,1]
# plt.figure(figsize=(16,10))
# sns.set_style("white")
# sns.scatterplot(
# x="tsne-2d-one", y="tsne-2d-two",
# hue="label",
# palette=sns.color_palette("dark", 2),
# data=df_merge,
# #legend='full',
# alpha=0.3
# )
# legend = plt.legend()
# legend.get_texts()[1].set_text('Low Risk')
# legend.get_texts()[2].set_text('High Risk')
# sns.despine()
# plt.savefig("RFPlot.png")
#plt.show()
#Print the output if you want. Otherwise write it to file.
#print(y_pred)
feature_imp = pd.Series(classifier.feature_importances_,index=training_frame[training_columns].columns).sort_values(ascending=False)
print(feature_imp)
'''
'''
from sklearn.metrics import classification_report, confusion_matrix
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
|
StarcoderdataPython
|
6680936
|
import json
import logging
class Akeneo_Exception(Exception):
pass
class Akeneo_RequestException(Akeneo_Exception):
response = None
def __init__(self, response):
self.response = response
request_body = response.request.body
status_code = response.status_code
if response.headers["content-type"] == "application/json":
response_body = response.json()
else:
response_body = response.text
super().__init__(
f"ERROR {status_code} {response.request.method} {response.url}\nData sent : {request_body}\nData recieved : {response_body}"
)
class Akeneo_NotFound(Akeneo_RequestException):
pass
class Akeneo_UnexpectedResponse(Akeneo_Exception):
expect = None
got = None
def __init__(self, expect, got):
self.expect = expect
self.got = got
super().__init__("Key %s not found in %s" % (expect, got))
|
StarcoderdataPython
|
6670395
|
<gh_stars>0
from base64 import b64encode
from hashlib import blake2b
import random
import re
import sqlite3 as sql
from datetime import date
import json
from src.constants import MAX_DAY_LIMIT, DIGEST_SIZE, SHORT_URL_SPECIFIER
from flask import Flask, jsonify, redirect, request
app = Flask(__name__)
def url_valid(url):
"""Validates a url by parsing it with a regular expression.
and checks if url letters are less than 250 characters
Parameters:
url - string representing a url to be validated.
Return values:
Boolean, indicating the validity of the url.
"""
return re.match(regex, url) is not None and len(url) <= 250
def shorten(url):
"""Shortens a url by generating a 9 byte hash, and then
converting it to a 12 character long base 64 url friendly string.
Parameters:
url - the url to be shortened.
Return values:
String, the unique shortened url, acting as a key for the entered long url.
"""
url_hash = blake2b(str.encode(url), digest_size=DIGEST_SIZE)
while url_hash in shortened:
url += str(random.randint(0, 9))
url_hash = blake2b(str.encode(url), digest_size=DIGEST_SIZE)
b64 = b64encode(url_hash.digest(), altchars=b'-_')
return b64.decode('utf-8')
def bad_request(message):
"""Takes a supplied message and attaches it to a HttpResponse with code 400.
Parameters:
message - string containing the error message.
Return values:
An object with a message string and a status_code set to 400.
"""
response = jsonify({'message': message})
response.status_code = 400
return response
@app.route('/shorten_url', methods=['POST', 'GET'])
def shorten_url():
"""endpoint that looks for a supplied string called "url",
contained inside a json or HTTP request object. Then validates this url and
either returns an error response as appropriate, or generates a
shortened url, stores the shortened url, and then returns it - if valid.
Parameters:
None. However, the global request object should contain the aforementioned json.
Return values:
A response signifying success or error.
Successes contain the shortened url, errors contain an appropriate message.
"""
if request.method == "GET":
url = request.args.get('url')
premium = request.args.get('premium')
custom_url = request.args.get('custom_url')
elif request.method == "POST":
url = request.json.get('url')
premium = request.json.get('premium')
custom_url = request.json.get('custom_url')
# finding the server's base url address
base_url_index = request.base_url
base_url = base_url_index[:base_url_index.rfind('/')+1]
if url[:4] != 'http':
url = 'http://' + url
if not url_valid(url):
return bad_request('Provided url is not valid.')
if premium:
if not custom_url:
return jsonify({"Error":"custom URL is not provided"}), 400
shortened_url = custom_url
elif custom_url:
return jsonify({"Error":"To use custom url, premium mebership is required!"}), 400
else:
# For redirection purposes, we want to append http at some point.
shortened_url = shorten(url)
shortened[shortened_url] = url
conn = sql.connect('urls.db')
cursor = conn.cursor()
today = date.today()
current_date = today.strftime("%Y/%m/%d").split('/')
cursor.execute('SELECT * FROM urls')
url_table_data = [[str(item) for item in results] for results in cursor.fetchall()][-1]
short_url = shortened_url
short_url_from_db = SHORT_URL_SPECIFIER+url_table_data[2]
original_url = url_table_data[1]
if short_url == short_url_from_db:
return (
jsonify(
{
'success': True,
'original_url': original_url,
'short_url': base_url+short_url_from_db
}
),
200,
)
else:
cursor.execute("INSERT INTO urls(original_url,short_url, url_creation_time) VALUES (?,?,?);",
(str(url),str(shortened_url), str(current_date)))
cursor.execute('SELECT * FROM urls WHERE short_url=?;', (str(shortened_url),))
conn.commit()
return_this = [[str(item) for item in results] for results in cursor.fetchall()][0]
shortened_url = "url/"+return_this[2]
return jsonify(
{'success': True,
'original_url': url,
'short_url': base_url+shortened_url
}), 201
@app.route('/'+SHORT_URL_SPECIFIER+'<alias>', methods=['GET'])
def get_shortened(alias):
"""GET endpoint that takes an alias (shortened url) and redirects if successfull.
Otherwise returns a bad request.
Arguments:
alias, the string representing a shortened url.
Return values:
A Flask redirect, with code 302.
"""
if alias not in shortened:
return bad_request('Unknown alias.')
url = shortened[alias]
con = sql.connect('urls.db')
cur = con.cursor()
try:
cur.execute("UPDATE urls SET visitors=visitors+1 WHERE short_url = ?", (str(alias),))
con.commit()
except Exception as e:
print(e)
return jsonify({"error": "Please check the short_url."}), 400
cur.close()
con.close()
return redirect(url, code=302)
@app.route('/stats/', methods=['GET', 'POST'])
def stats():
'''
Provides statistical data
----
parameters: short_url
string to be used to perform actions with the database
----
return values
json data, consists statistical information
'''
if request.method == "GET":
short_url = request.args.get('url')
elif request.method == "POST":
short_url = request.json.get('stats')
# Support for the case if user enters a full URL
if '/' in short_url:
short_url = short_url[short_url.rfind('/')+1:]
con = sql.connect('urls.db')
cur = con.cursor()
cur.execute("SELECT visitors FROM urls WHERE short_url = ?", (str(short_url),))
try:
visitors = int(cur.fetchone()[0])
except TypeError:
return jsonify({"Error":"Provided URL could not be found in database"}), 404
diffrence = remaning_days_validation(short_url, cur)
return jsonify({"visitors":visitors,"days_left":diffrence})
def remaning_days_validation(short_url, cur):
'''
provides information about remaning days before URL deletion
----
parameters: short_url, cur
first parameter is used to fetch corresponding data from database,
second one, is a cursor (cur) which is needed to comunicate with the database
----
returned data:
returns difference in days between current and user creation date
'''
# Fetching url creation time data from the database
cur.execute("SELECT url_creation_time FROM urls WHERE short_url = ?", (str(short_url),))
date_lst_in_str_format = cur.fetchone()[0].replace("'",'"')
date_of_url_creation = list(map(int,json.loads(date_lst_in_str_format))) # used json.loads and not ast.literal_eval() for code security purposes
# getting the current date
today = date.today()
current_date_in_str = today.strftime("%Y/%m/%d").split('/')
current_date = list(map(int,current_date_in_str))
# Checking the difference between url creation and current date
date1 = date(date_of_url_creation[0], date_of_url_creation[1], date_of_url_creation[2])
date2 = date(current_date[0], current_date[1], current_date[2])
diffrence_in = date2 - date1
diffrence = MAX_DAY_LIMIT - diffrence_in.days
if diffrence == 0:
cur.execute("DELETE from urls WHERE short_url= ?", (str(short_url),))
return diffrence
# Django url validator RegeEx https://github.com/django/django/blob/stable/1.3.x/django/core/validators.py#L45
# Slightly modified to not use ftp
regex = re.compile(
r'^(?:http)s?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'
r'localhost|'
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
r'(?::\d+)?'
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
shortened = {}
if __name__ == '__main__':
app.run(host='0.0.0.0', port = 5000)
|
StarcoderdataPython
|
9734572
|
<gh_stars>0
from rest_framework import serializers
from listings.models import Listing, HotelRoom, HotelRoomType, BookingInfo
class ListingSerializer(serializers.ModelSerializer):
class Meta:
model = Listing
fields = "__all__"
class HotelRoomSerializer(serializers.ModelSerializer):
class Meta:
model = HotelRoom
fields = "__all__"
class HotelRoomTypeSerializer(serializers.ModelSerializer):
class Meta:
model = HotelRoomType
fields = "__all__"
class BookingInfoSerializer(serializers.ModelSerializer):
class Meta:
model = BookingInfo
fields = "__all__"
|
StarcoderdataPython
|
11235589
|
a=input("Enter = ")
for i in range (len(a)):
for j in range (len(a)):
if(i==j or j==4-i):
print(a[j],end="")
else:
print(" ",end="")
print()
|
StarcoderdataPython
|
273839
|
<gh_stars>1-10
from django.test import TestCase
from friends.models import Follow
from users.models import User
from django.db.models import Q
from django.db import connection
#Tests if follow outputs the correct users for "followed" and "following"
class FollowTestCase(TestCase):
person1 = None
person2 = None
person3 = None
person4 = None
person5 = None
person6 = None
person7 = None
def setUp(self):
#create user objects
self.person1 = User.objects.create(username="person1",password="<PASSWORD>")
self.person2 = User.objects.create(username="person2",password="<PASSWORD>")
self.person3 = User.objects.create(username="person3",password="<PASSWORD>")
self.person4 = User.objects.create(username="person4",password="<PASSWORD>")
self.person5 = User.objects.create(username="person5",password="<PASSWORD>")
self.person6 = User.objects.create(username="person6",password="<PASSWORD>")
self.person7 = User.objects.create(username="person7",password="<PASSWORD>")
#create Follows
#p1 follows: 2,3,4
#p2 follows: 3
#p3 follows: 2,4
#p4 follows: 3,7
#p5 follows: no one
#p6 follows: 7
#p7 follows: 6,4
Follow.objects.create( user1 = self.person1,user2=self.person2)
Follow.objects.create(user1=self.person1,user2=self.person3)
Follow.objects.create(user1= self.person3,user2=self.person2)
Follow.objects.create(user1=self.person2,user2=self.person3)
Follow.objects.create(user1=self.person3,user2=self.person4)
Follow.objects.create(user1=self.person4,user2=self.person3)
Follow.objects.create(user1=self.person6,user2=self.person7)
Follow.objects.create(user1=self.person4,user2=self.person7)
Follow.objects.create(user1=self.person7,user2=self.person6)
Follow.objects.create(user1=self.person7,user2=self.person4)
Follow.objects.create(user1=self.person1,user2=self.person4)
def test_modelCreation(self):
follow_obj = Follow.objects.filter(user1=self.person1,user2=self.person2).get()
self.assertTrue(isinstance(follow_obj,Follow))
def test_followeesCorrect(self):
#tests if the follower has the correct followee's
queryset = Follow.objects.filter(user1=self.person1)
expected1= "person2"
expected2 = "person3"
expected3 = "person4"
notexpected = "person5"
for person in queryset:
follower = person.user2.username
self.assertNotEqual(follower,notexpected)
correct = False
if follower == expected1 or follower == expected2 or follower==expected3:
correct = True
self.assertTrue(correct, "Follower does not have correct followees")
def test_followersCorrect(self):
#tests if the followees have the correct follower's
follower_objs = Follow.objects.filter(user2=self.person3)
expected1 = 'person1'
expected2 = 'person2'
expected3 = 'person4'
notexpected= 'person 6'
for person in follower_objs:
follower = person.user1.username
self.assertNotEqual(follower,notexpected)
if follower == expected1 or follower == expected2 or follower==expected3:
correct = True
self.assertTrue(correct, "Followee does not have correct followers")
def test_correctImmediateFriends(self):
#tests for immediate friends, aka user x and user y follow each other
friends = self.queryFriends(self.person3)
expected = {self.person2,self.person4}
self.assertEqual(friends,expected,"Immediate friends are not correct")
def test_falsePositiveImmediateFriends(self):
#tests to make sure that no false positives appear
friends = self.queryFriends(self.person5)
expected = set() #person 5 has no friends
self.assertEqual(friends,expected,"False positive for immediate friends")
def test_correctMutuals(self):
#tests if the user has the correct set of mutual friends
#Assumes that that the users are already following each other
#does not include immediate friends
#Testing for person4
#Expected outcome: person2, person6
mutuals = set()
target = self.person4
friends = self.queryFriends(target)
for friend in friends:
fof = self.queryFriends(friend)
for person in fof:
mutuals.add(person)
mutuals.remove(target)
expected = {self.person2,self.person6}
self.assertEqual(mutuals,expected, "Incorrect mutual friends")
def test_falsePositiveMutuals(self):
#tests for false positives for mutual friends
mutuals = set()
target = self.person1
friends = self.queryFriends(target)
for friend in friends:
fof = self.queryFriends(friend)
for person in fof:
mutuals.add(person)
expected = set() #person1 only follows, so empty set
self.assertEqual(mutuals,expected, "False positive for mutual friends")
def queryFriends(self,target):
friendsOfTarget = set()
timesAppeared = []
person_query = Follow.objects.filter(Q(user1=target) | Q(user2=target))
for followobj in person_query:
if followobj.user1 == target :
if followobj.user2 in timesAppeared:
friendsOfTarget.add(followobj.user2)
else:
timesAppeared.append(followobj.user2)
elif followobj.user2 == target:
if followobj.user1 in timesAppeared:
friendsOfTarget.add(followobj.user1)
else:
timesAppeared.append(followobj.user1)
return friendsOfTarget
|
StarcoderdataPython
|
3508621
|
<reponame>zatcsc/capreolus
# @Collection.register
# class MSMarco(Collection):
# module_name = "msmarco"
# config_keys_not_in_path = ["path"]
# collection_type = "TrecCollection"
# generator_type = "DefaultLuceneDocumentGenerator"
# config_spec = [ConfigOption("path", "/GW/NeuralIR/nobackup/msmarco/trec_format", "path to corpus")]
|
StarcoderdataPython
|
325665
|
# coding=utf-8
# Copyright 2022 The Reach ML Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared utilities for image preprocessing."""
import tensorflow as tf
def stack_images_channelwise(obs, batch_size):
# Use static shapes for hist, width, height, and channels since TPUs prefer
# static shapes for some image ops. The batch size passed in may still be
# dynamic.
nhist = obs.get_shape()[1]
nw = obs.get_shape()[2]
nh = obs.get_shape()[3]
nc = obs.get_shape()[4]
obs = tf.reshape(obs, tf.concat([[batch_size], [nw, nh, nc * nhist]], axis=0))
return obs
def preprocess(images, target_height, target_width):
"""Converts to [0,1], stacks, resizes."""
# Scale to [0, 1].
images = tf.image.convert_image_dtype(images, dtype=tf.float32)
# Stack images channel-wise.
batch_size = tf.shape(images)[0]
images = stack_images_channelwise(images, batch_size)
# Resize to target height and width.
images = tf.image.resize(images, [target_height, target_width])
return images
|
StarcoderdataPython
|
1756289
|
'''
Module functions to properly interact with Database
'''
import sqlite3
def add_data(atr, connection):
with connection:
c = connection.cursor()
c.execute("INSERT INTO tempos_atr_2019 (IDAtracacao,"
"TEsperaAtracacao,"
"TEsperaInicioOp,"
"TOperacao,"
"TEsperaDesatracacao,"
"TAtracado,"
"TEstadia) VALUES (:idatr, :tespatr, :tespin, :top, :tespout, :ttot, :testad)",
{'idatr': atr['idatr'],
'tespatr': atr['tespatr'],
'tespin': atr['tespin'],
'top': atr['top'],
'tespout': atr['tespout'],
'ttot': atr['ttot'],
'testad': atr['testad']})
return {'Status': 'ok'}
def find_atr_exact(atrid, connection):
c = connection.cursor()
c.execute("SELECT * FROM atrstats WHERE IDAtracacao=:AtrID", {'AtrID': atrid})
result = c.fetchall()
if len(result) == 0:
message = f'Employee {atrid} not found'
result = {'Status': 'ok', 'Message': message}
return result
return {'Status': 'ok', 'Message': result}
def find_imo_exact(imo, connection):
c = connection.cursor()
c.execute("SELECT * FROM atrstats WHERE [NdoIMO]=:NIMO", {'NIMO': imo})
result = c.fetchall()
if len(result) == 0:
message = f'Ship {imo} not found'
result = {'Status': 'ok', 'Message': message}
return result
return {'Status': 'ok', 'Message': result}
def find_trips_mmyyyy(month, year, connection):
c = connection.cursor()
datestring = f'___{month}/{year}'
c.execute(f"SELECT * FROM atrstats WHERE [Data Chegada] LIKE '{datestring}%' ORDER BY [DataChegada]")
result = c.fetchall()
# No ship found
if len(result) == 0:
message = f'Ship not found'
result = {'Status': 'ok', 'Message': message}
return result
return {'Status': 'ok', 'Message': result}
def find_ships_by_trips(imolist, connection):
c = connection.cursor()
atrid = ", ".join(map(str, imolist))
query = f"SELECT * FROM atrstats WHERE IDAtracacao IN ({atrid})"
c.execute(query)
result = c.fetchall()
if len(result) == 0:
message = f'Load ID {atrid} not found'
result = {'Status': 'ok', 'Message': message}
return result
return {'Status': 'ok', 'Message': result}
def find_imolist_exact(imolist, connection):
'''
loadid = ", ".join(map(str, loadid))
query = f"SELECT * FROM loadsinfo WHERE IDAtracacao IN ({loadid})"
:param imolist:
:param connection:
:return:
'''
#imolist = ", ".join(map(str, imolist))
c = connection.cursor()
c.execute(f"SELECT * FROM atrstats WHERE [NdoIMO] IN ({imolist})")
result = c.fetchall()
# No ship found
if len(result) == 0:
message = f'Ship not found'
result = {'Status': 'ok', 'Message': message}
return result
return {'Status': 'ok', 'Message': result}
def find_load_exact(loadid, connection):
c = connection.cursor()
loadid = ", ".join(map(str, loadid))
query = f"SELECT * FROM loadsinfo WHERE IDAtracacao IN ({loadid})"
c.execute(query)
result = c.fetchall()
if len(result) == 0:
message = f'Load ID {loadid} not found'
result = {'Status': 'ok', 'Message': message}
return result
return {'Status': 'ok', 'Message': result}
def find_port_atr(atrid, connection):
c = connection.cursor()
loadid = ", ".join(map(str, loadid))
query = f"SELECT * FROM loadsinfo WHERE IDAtracacao IN ({loadid})"
c.execute(query)
result = c.fetchall()
if len(result) == 0:
message = f'Load ID {loadid} not found'
result = {'Status': 'ok', 'Message': message}
return result
return {'Status': 'ok', 'Message': result}
def find_port_loads(portid, connection):
c = connection.cursor()
'''
"SELECT * FROM atrstats WHERE IDAtracacao=:AtrID", {'AtrID': atrid}
Destino=:PortID
'''
query = f"SELECT * FROM loadsinfo WHERE Origem=:PortID " \
f"UNION " \
f"SELECT * FROM loadsinfo WHERE dESTINO=:PortID"
c.execute(query, {'PortID': portid})
result = c.fetchall()
if len(result) == 0:
message = f'Load ID {portid} not found'
result = {'Status': 'ok', 'Message': message}
return result
return {'Status': 'ok', 'Message': result}
#print(find_load_exact([900859, 941672], connection=sqlite3.connect('./Main/database/data/atr_info.db')))
def find_imo_blank(connection):
c = connection.cursor()
c.execute("SELECT * FROM atrstats WHERE [NdoIMO] IS NULL "
"OR [NdoIMO] = 0 "
"OR [NdoIMO] = ' ' "
"OR TRIM([NdoIMO]) = ''")
#c.execute("SELECT * FROM atrstats")
result = c.fetchall()
if len(result) == 0:
message = f'ShipS not found'
result = {'Status': 'ok', 'Message': message}
return result
return {'Status': 'ok', 'Message': result}
def count(connection):
c = connection.cursor()
#c.execute("SELECT * FROM atrstats WHERE [NdoIMO] IS NULL OR [NdoIMO] = 0 OR [NdoIMO] = ' ' ")
c.execute("SELECT COUNT(*) FROM atrstats")
result = c.fetchall()
if len(result) == 0:
message = f'ShipS not found'
result = {'Status': 'ok', 'Message': message}
return result
return {'Status': 'ok', 'Message': result}
def find_employee_close(namelike, connection):
c = connection.cursor()
likename = f'%{namelike}%'
c.execute("SELECT * FROM employees_list WHERE name LIKE :name", {'name': likename})
result = c.fetchall()
if len(result) == 0:
message = f'Employee name similar to {namelike} not found.'
result = {'Status': 'ok', 'Message': message}
return result
return {'Status': 'ok', 'Message': result}
def find_employee_roles(rolelike, connection):
c = connection.cursor()
likename = f'%{rolelike}%'
c.execute("SELECT * FROM employees_list WHERE role LIKE :role", {'role': likename})
result = c.fetchall()
if len(result) == 0:
message = f'Employee role similar to {rolelike} not found.'
result = {'Status': 'ok', 'Message': message}
return result
return {'Status': 'ok', 'Message': result}
def find_employee_exactid(empid, connection):
c = connection.cursor()
c.execute("SELECT * FROM employees_list WHERE id=:id", {'id': empid})
result = c.fetchall()
if len(result) == 0:
message = f'Employee {empid} not found'
result = {'Status': 'ok', 'Message': message}
return result
return {'Status': 'ok', 'Message': result}
def update_role(employeeid, newrole, connection):
with connection:
c = connection.cursor()
c.execute("SELECT * FROM employees_list WHERE id=:id", {'id': employeeid})
result = c.fetchall()
if len(result) > 0:
c.execute("""UPDATE employees_list SET role = :role
WHERE id = :id""",
{'id': employeeid, 'role': newrole})
message = f'Employee {employeeid}, changed role to {newrole}.'
return {'Status': 'ok', 'Message': message}
message = f'There is no occurrence of id {employeeid}'
return {'Status': 'error', 'Message': message}
def remove_employee(employeeid, connection):
with connection:
c = connection.cursor()
c.execute("SELECT * FROM employees_list WHERE id=:id", {'id': employeeid})
result = c.fetchall()
if len(result) > 0:
c.execute("DELETE from employees_list WHERE id = :id", {'id': employeeid})
message = f'Removed employee ID: {employeeid} from database.'
return {'Status': 'ok', 'Message': message}
message = f'There is no occurrence of id {employeeid}'
return {'Status': 'error', 'Message': message}
#conn = sqlite3.connect('./data/atr_info.db')
#print(find_trips_mmyyyy(month='01', year='2010', connection=conn))
|
StarcoderdataPython
|
1649123
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# <NAME>
#
import sys
import os
import pandas as pd
def main():
# Args
in_ot = '../../output/nearest_gene/180830/nearest_gene.tsv.gz'
in_vep = 'output/vcf_nearest_gene.txt'
# Load
ot = pd.read_csv(in_ot, sep='\t', header=0, nrows=1000000)
vep = pd.read_csv(in_vep, sep='\t', comment='#', header=None).iloc[:, [1, 13]]
vep.columns = ['loc', 'info']
# Parse VEP fields
vep['vep_chrom'], vep['vep_pos'] = vep['loc'].str.split(':').str
vep['vep_nearest'] = vep['info'].apply(lambda x: x.split('NEAREST=')[-1])
vep = vep.loc[:, ['vep_chrom', 'vep_pos', 'vep_nearest']]
# Parse ot fields
ot['ot_chrom'], ot['ot_pos'], ot['ot_ref'], ot['ot_alt'] = (
ot.varid.str.split('_').str
)
# Merge ot and vep
merged = pd.merge(ot, vep, left_on=['ot_chrom', 'ot_pos'],
right_on=['vep_chrom', 'vep_pos'], how='inner')
merged = merged.drop_duplicates(subset=['varid'])
# Find none matching rows
non_match = (merged.ensemblid_protein_coding != merged.vep_nearest)
print("There are {0} of {1} variants that don't match the VEP output".format(
non_match.sum(), merged.shape[0]))
return 0
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
5115185
|
"""
Longest Word: Given a list of words, write a program to find the longest word made of other words
in the list.
Assume
- A word could be formed by any number of other words.
- A composed word contains only given words, with no gap in between.
(17.15, p583)
SOLUTION: DP with memoization to cache the results.
O(W lg W + WC) time, where W is number of words and C is number of characters.
O(W + C) space: memo table and depth of call stack
"""
def _is_composed(word, is_original, memo):
"""
:param word:
:param is_original: If it is the original word, do not retrieve result from memo table.
:param memo: memo table to cache the results of previous recurses
:return: True if this word is composed of other words, False otherwise.
"""
if not is_original and word in memo:
return memo[word]
for i in range(1, len(word)): # O(C) time
left = word[:i]
right = word[i:]
if left in memo and memo[left] is True and _is_composed(right, False, memo):
return True
memo[word] = False
return False
def longest_word(words):
memo = {}
for word in words:
memo[word] = True
# start with longest word first, so sort in descending length
words.sort(key=len, reverse=True) # O(W lg W) time, sort in-place
for word in words: # O(WC) time
if _is_composed(word, is_original=True, memo=memo):
return word
return None
|
StarcoderdataPython
|
4973901
|
# ============================================================================
# FILE: output.py
# AUTHOR: momotaro <<EMAIL>>
# License: MIT license
# ============================================================================
from .base import Base
import re
class Source(Base):
def __init__(self, vim):
super().__init__(vim)
self.name = 'output'
self.kind = 'word'
self.syntax_name = 'deniteSource_output'
def on_init(self, context):
command = ' '.join(context['args'])
if not command:
command = self.vim.call('input',
'Please input Vim command: ', context['input'])
context['__command'] = command
def gather_candidates(self, context):
message = self.vim.call('denite#util#redir', context['__command'])
message = re.sub('^(\r\n|\n)', '', message)
return list(map(lambda x: { 'word': x, 'action__text': x },
re.split('\r\n|\n', message)))
def highlight_syntax(self):
self.vim.command('syntax include @Vim syntax/vim.vim')
self.vim.command('syntax region ' + self.syntax_name + 'Vim'
' start=// end=/$/ contains=@Vim containedin=' + self.syntax_name)
|
StarcoderdataPython
|
229559
|
import argparse
import time
import json
import csv
import re
import pandas as pd
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
from requests_html import HTMLSession, HTML
from lxml.etree import ParserError
from credential import username, password, email
#list of elements to scrape
post_links=[]
post_ids=[]
shares=[]
dates=[]
times=[]
likes=[]
comments=[]
texts=[]
#function to scroll from 0 position to end position
def scroll_to_bottom(driver):
old_position = 0
new_position = None
while new_position != old_position:
time.sleep(.5)
# Get old scroll position
old_position = driver.execute_script(
("return (window.pageYOffset !== undefined) ?"
" window.pageYOffset : (document.documentElement ||"
" document.body.parentNode || document.body);"))
# Sleep and Scroll
time.sleep(1)
driver.execute_script((
"var scrollingElement = (document.scrollingElement ||"
" document.body);scrollingElement.scrollTop ="
" scrollingElement.scrollHeight;"))
# Get new position
new_position = driver.execute_script(
("return (window.pageYOffset !== undefined) ?"
" window.pageYOffset : (document.documentElement ||"
" document.body.parentNode || document.body);"))
#calling chrome driver to login
driver = webdriver.Chrome()
driver.get(f"https://m.facebook.com/{username}/")
driver.find_element_by_css_selector("a._4n43").click()
time.sleep(2)
driver.find_element_by_name("email").send_keys(email)
driver.find_element_by_name("pass").send_keys(password)
driver.find_element_by_name("login").click()
time.sleep(1)
scroll_to_bottom(driver)
page_source = driver.page_source
#scraping html page data
soup = BeautifulSoup(page_source, 'lxml')
section=soup.findAll('div',{'class':'_3drp'})
for a in section:
#for scraping post link and id
link=a.find('a',attrs={'class':'_5msj'})
post_link=link['href']
part = post_link.split('&')[0]
post_id=part.split('=')[1]
post_links.append(post_link)
post_ids.append(post_id)
#for scraping date and time of post
post_date=a.find('abbr')
post_data=post_date.get_text(strip=True).split('at')
date=post_data[0]
time=post_data[1]
dates.append(date)
times.append(time)
#for scraping like of post
like=a.find('div', attrs={'class':'_1g06'})
if(len(like) == 0):
like ="0 likes"
likes.append(like.get_text(strip=True))
#for scraping text of post
text=a.find('div',{'class':'_5rgt _5nk5 _5msi'})
post_text=text.find('span')
if(len(post_text)==0):
post_text =" "
texts.append(post_text.get_text(strip=True))
#for scraping comment and share of post
comm_shar=a.findAll('span', attrs={'class':'_1j-c'})
comments.append(comm_shar[0].get_text(strip=True))
shares.append(comm_shar[1].get_text(strip=True))
#Appending all the list data to a pd dataframe
df = pd.DataFrame({'dates':dates,'Time':times,'Post Links':post_links,'Post Ids':post_ids,'Text':texts,'like':likes,'Comment':comments ,'Shares':shares})
#converting pd raw data to csv file
df.to_csv('facebook_scraped_post.csv', index=False, encoding='utf-16')
|
StarcoderdataPython
|
3215856
|
<reponame>mosout/oneflow
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList, type_name_to_flow_type
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
def _test_argsort(test_case, data_shape, axis, descending, data_type, device):
input = flow.tensor(
np.random.randn(*data_shape),
dtype=type_name_to_flow_type[data_type],
device=flow.device(device),
)
of_out = flow.argsort(input, dim=axis, descending=descending)
np_input = -input.numpy() if descending else input.numpy()
np_out = np.argsort(np_input, axis=axis)
test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))
def _test_tensor_argsort(test_case, data_shape, axis, descending, data_type, device):
input = flow.tensor(
np.random.randn(*data_shape),
dtype=type_name_to_flow_type[data_type],
device=flow.device(device),
)
of_out = input.argsort(dim=axis, descending=descending)
np_input = -input.numpy() if descending else input.numpy()
np_out = np.argsort(np_input, axis=axis)
test_case.assertTrue(np.array_equal(of_out.numpy().shape, np_out.shape))
test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))
@flow.unittest.skip_unless_1n1d()
class TestArgsort(flow.unittest.TestCase):
def test_argsort(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [_test_argsort, _test_tensor_argsort]
arg_dict["data_shape"] = [(2, 6, 5, 4), (3, 4, 8)]
arg_dict["axis"] = [-1, 0, 2]
arg_dict["descending"] = [True, False]
arg_dict["data_type"] = ["double", "float32", "int32"]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest(auto_backward=False, check_graph=False)
def test_argsort_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor(ndim=4).to(device)
y = torch.argsort(
x, dim=random(low=-4, high=4).to(int), descending=random_bool()
)
return y
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
3461732
|
<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class InvoiceOrderInfo(object):
def __init__(self):
self._article_code = None
self._article_fee = None
self._article_id = None
self._article_name = None
self._buy_date = None
self._end_date = None
self._ext_json = None
self._fact_total_fee = None
self._invoice_kind = None
self._item_code = None
self._item_id = None
self._item_name = None
self._order_id = None
self._order_type = None
self._provider_key = None
self._start_date = None
self._tax_feature = None
@property
def article_code(self):
return self._article_code
@article_code.setter
def article_code(self, value):
self._article_code = value
@property
def article_fee(self):
return self._article_fee
@article_fee.setter
def article_fee(self, value):
self._article_fee = value
@property
def article_id(self):
return self._article_id
@article_id.setter
def article_id(self, value):
self._article_id = value
@property
def article_name(self):
return self._article_name
@article_name.setter
def article_name(self, value):
self._article_name = value
@property
def buy_date(self):
return self._buy_date
@buy_date.setter
def buy_date(self, value):
self._buy_date = value
@property
def end_date(self):
return self._end_date
@end_date.setter
def end_date(self, value):
self._end_date = value
@property
def ext_json(self):
return self._ext_json
@ext_json.setter
def ext_json(self, value):
self._ext_json = value
@property
def fact_total_fee(self):
return self._fact_total_fee
@fact_total_fee.setter
def fact_total_fee(self, value):
self._fact_total_fee = value
@property
def invoice_kind(self):
return self._invoice_kind
@invoice_kind.setter
def invoice_kind(self, value):
self._invoice_kind = value
@property
def item_code(self):
return self._item_code
@item_code.setter
def item_code(self, value):
self._item_code = value
@property
def item_id(self):
return self._item_id
@item_id.setter
def item_id(self, value):
self._item_id = value
@property
def item_name(self):
return self._item_name
@item_name.setter
def item_name(self, value):
self._item_name = value
@property
def order_id(self):
return self._order_id
@order_id.setter
def order_id(self, value):
self._order_id = value
@property
def order_type(self):
return self._order_type
@order_type.setter
def order_type(self, value):
self._order_type = value
@property
def provider_key(self):
return self._provider_key
@provider_key.setter
def provider_key(self, value):
self._provider_key = value
@property
def start_date(self):
return self._start_date
@start_date.setter
def start_date(self, value):
self._start_date = value
@property
def tax_feature(self):
return self._tax_feature
@tax_feature.setter
def tax_feature(self, value):
self._tax_feature = value
def to_alipay_dict(self):
params = dict()
if self.article_code:
if hasattr(self.article_code, 'to_alipay_dict'):
params['article_code'] = self.article_code.to_alipay_dict()
else:
params['article_code'] = self.article_code
if self.article_fee:
if hasattr(self.article_fee, 'to_alipay_dict'):
params['article_fee'] = self.article_fee.to_alipay_dict()
else:
params['article_fee'] = self.article_fee
if self.article_id:
if hasattr(self.article_id, 'to_alipay_dict'):
params['article_id'] = self.article_id.to_alipay_dict()
else:
params['article_id'] = self.article_id
if self.article_name:
if hasattr(self.article_name, 'to_alipay_dict'):
params['article_name'] = self.article_name.to_alipay_dict()
else:
params['article_name'] = self.article_name
if self.buy_date:
if hasattr(self.buy_date, 'to_alipay_dict'):
params['buy_date'] = self.buy_date.to_alipay_dict()
else:
params['buy_date'] = self.buy_date
if self.end_date:
if hasattr(self.end_date, 'to_alipay_dict'):
params['end_date'] = self.end_date.to_alipay_dict()
else:
params['end_date'] = self.end_date
if self.ext_json:
if hasattr(self.ext_json, 'to_alipay_dict'):
params['ext_json'] = self.ext_json.to_alipay_dict()
else:
params['ext_json'] = self.ext_json
if self.fact_total_fee:
if hasattr(self.fact_total_fee, 'to_alipay_dict'):
params['fact_total_fee'] = self.fact_total_fee.to_alipay_dict()
else:
params['fact_total_fee'] = self.fact_total_fee
if self.invoice_kind:
if hasattr(self.invoice_kind, 'to_alipay_dict'):
params['invoice_kind'] = self.invoice_kind.to_alipay_dict()
else:
params['invoice_kind'] = self.invoice_kind
if self.item_code:
if hasattr(self.item_code, 'to_alipay_dict'):
params['item_code'] = self.item_code.to_alipay_dict()
else:
params['item_code'] = self.item_code
if self.item_id:
if hasattr(self.item_id, 'to_alipay_dict'):
params['item_id'] = self.item_id.to_alipay_dict()
else:
params['item_id'] = self.item_id
if self.item_name:
if hasattr(self.item_name, 'to_alipay_dict'):
params['item_name'] = self.item_name.to_alipay_dict()
else:
params['item_name'] = self.item_name
if self.order_id:
if hasattr(self.order_id, 'to_alipay_dict'):
params['order_id'] = self.order_id.to_alipay_dict()
else:
params['order_id'] = self.order_id
if self.order_type:
if hasattr(self.order_type, 'to_alipay_dict'):
params['order_type'] = self.order_type.to_alipay_dict()
else:
params['order_type'] = self.order_type
if self.provider_key:
if hasattr(self.provider_key, 'to_alipay_dict'):
params['provider_key'] = self.provider_key.to_alipay_dict()
else:
params['provider_key'] = self.provider_key
if self.start_date:
if hasattr(self.start_date, 'to_alipay_dict'):
params['start_date'] = self.start_date.to_alipay_dict()
else:
params['start_date'] = self.start_date
if self.tax_feature:
if hasattr(self.tax_feature, 'to_alipay_dict'):
params['tax_feature'] = self.tax_feature.to_alipay_dict()
else:
params['tax_feature'] = self.tax_feature
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = InvoiceOrderInfo()
if 'article_code' in d:
o.article_code = d['article_code']
if 'article_fee' in d:
o.article_fee = d['article_fee']
if 'article_id' in d:
o.article_id = d['article_id']
if 'article_name' in d:
o.article_name = d['article_name']
if 'buy_date' in d:
o.buy_date = d['buy_date']
if 'end_date' in d:
o.end_date = d['end_date']
if 'ext_json' in d:
o.ext_json = d['ext_json']
if 'fact_total_fee' in d:
o.fact_total_fee = d['fact_total_fee']
if 'invoice_kind' in d:
o.invoice_kind = d['invoice_kind']
if 'item_code' in d:
o.item_code = d['item_code']
if 'item_id' in d:
o.item_id = d['item_id']
if 'item_name' in d:
o.item_name = d['item_name']
if 'order_id' in d:
o.order_id = d['order_id']
if 'order_type' in d:
o.order_type = d['order_type']
if 'provider_key' in d:
o.provider_key = d['provider_key']
if 'start_date' in d:
o.start_date = d['start_date']
if 'tax_feature' in d:
o.tax_feature = d['tax_feature']
return o
|
StarcoderdataPython
|
6539495
|
file = open('./input')
canvas = {}
position = (0, 0)
direction = 0
input = 1
paint = True
def output(x):
global input
global position
global direction
global paint
if paint:
canvas[position] = x
else:
if x == 0:
direction -= 1
if x == 1:
direction += 1
if direction < 0:
direction = 3
elif direction > 3:
direction = 0
if direction == 0: # up
position = (position[0] - 1, position[1])
if direction == 1: # right
position = (position[0], position[1] + 1)
if direction == 2: # down
position = (position[0] + 1, position[1])
if direction == 3: # left
position = (position[0], position[1] - 1)
input = canvas[position] if position in canvas else 0
paint = not paint
def getValue(mode, raw, mem):
if mode == 0:
# position
if (raw >= len(mem)):
mem.extend([0 for _ in range(raw - len(mem) + 1)])
return mem[raw]
if mode == 1:
# value
return raw
if mode == 2:
# relative
if (raw + relativeBase >= len(mem)):
mem.extend([0 for _ in range(raw + relativeBase - len(mem) + 1)])
return mem[raw + relativeBase]
def assign(target, mem, value, mode=0):
if mode == 2:
target += relativeBase
if target >= len(mem):
mem.extend([0 for _ in range(target - len(mem) + 1)])
mem[target] = value
nums = [int(s) for s in file.readline().split(',')]
cur = 0
relativeBase = 0
while (nums[cur] % 100 != 99):
inst = nums[cur]
opcode = inst % 100
c = (inst // 100) % 10
b = (inst // 1000) % 10
a = (inst // 10000) % 10
if opcode == 1:
target = nums[cur + 3]
p1 = nums[cur + 1]
p2 = nums[cur + 2]
n1 = getValue(c, p1, nums)
n2 = getValue(b, p2, nums)
assign(target, nums, n1 + n2, a)
cur += 4
elif opcode == 2:
target = nums[cur + 3]
p1 = nums[cur + 1]
p2 = nums[cur + 2]
n1 = getValue(c, p1, nums)
n2 = getValue(b, p2, nums)
assign(target, nums, n1 * n2, a)
cur += 4
elif opcode == 3:
target = nums[cur + 1]
assign(target, nums, input, c)
cur += 2
elif opcode == 4:
p1 = nums[cur + 1]
output(getValue(c, p1, nums))
cur += 2
elif opcode == 5:
p1 = nums[cur + 1]
p2 = nums[cur + 2]
if (getValue(c, p1, nums)):
cur = getValue(b, p2, nums)
else:
cur += 3
elif opcode == 6:
p1 = nums[cur + 1]
p2 = nums[cur + 2]
if not (getValue(c, p1, nums)):
cur = getValue(b, p2, nums)
else:
cur += 3
elif opcode == 7:
p1 = nums[cur + 1]
p2 = nums[cur + 2]
target = nums[cur + 3]
n1 = getValue(c, p1, nums)
n2 = getValue(b, p2, nums)
assign(target, nums, 1 if n1 < n2 else 0, a)
cur += 4
elif opcode == 8:
p1 = nums[cur + 1]
p2 = nums[cur + 2]
target = nums[cur + 3]
n1 = getValue(c, p1, nums)
n2 = getValue(b, p2, nums)
assign(target, nums, 1 if n1 == n2 else 0, a)
cur += 4
elif opcode == 9:
p = nums[cur + 1]
n = getValue(c, p, nums)
relativeBase += n
cur += 2
else:
break
whiteLocs = [loc for loc in canvas if canvas[loc] == 1]
minY = min(whiteLocs, key=lambda loc: loc[0])[0]
maxY = max(whiteLocs, key=lambda loc: loc[0])[0]
minX = min(whiteLocs, key=lambda loc: loc[1])[1]
maxX = max(whiteLocs, key=lambda loc: loc[1])[1]
for row in range(minY, maxY + 1):
s = ''
for col in range(minX, maxX + 1):
if (row, col) in whiteLocs:
s += '#'
else:
s += ' '
print(s)
|
StarcoderdataPython
|
6401658
|
<gh_stars>100-1000
import numpy as np
import cv2
import tensorflow as tf
def postprocess_flow(flow):
"""
Function to visualize the flow.
Args:
flow : [H,W,2] optical flow
Returs:
grayscale image to visualize flow
"""
flow = flow[:,:,0] # do it dirty, ony first channel
min_flow = np.min(flow)
rescaled = flow + min_flow
max_rescaled = np.max(rescaled)
normalized = rescaled / max_rescaled
normalized = np.asarray(normalized / max_rescaled * 255, np.uint8)
normalized = cv2.cvtColor(normalized, cv2.COLOR_GRAY2BGR)
return normalized
def postprocess_image(image):
"""
Function to un-normalize images.
Args:
flow : [H,W,3] image
Returs:
grayscale image to visualize flow
"""
p_image = image + 0.5
un_normalized = np.asarray(p_image * 255, np.uint8)
un_normalized = cv2.cvtColor(un_normalized, cv2.COLOR_RGB2BGR)
return un_normalized
def postprocess_mask(mask):
"""
Function to un-normalize images.
Args:
flow : [H,W,3] image
Returs:
grayscale image to visualize flow
"""
# We want it in red
un_normalized = np.asarray(mask * 255.0, np.uint8)
tile = np.zeros_like(un_normalized, dtype=np.uint8)
un_normalized = np.concatenate((tile, un_normalized, tile), axis=-1)
#un_normalized = cv2.cvtColor(un_normalized, cv2.COLOR_RGB2BGR)
return un_normalized
def generate_error_map(image, losses, box_lenght):
"""
Function to overlap an error map to an image
Args:
image: input image
losses: list of losses, one for each masked part of the flow.
Returs:
error_map: overlapped error_heatmap and image.
"""
box_lenght = int(box_lenght)
# Assert that everything is correct
num_boxes = int(image.shape[0] / box_lenght) * int(image.shape[1] / box_lenght)
assert(num_boxes ==len(losses))
img_width = int(np.floor(image.shape[1] / box_lenght) * box_lenght)
img_height = int(np.floor(image.shape[0] / box_lenght) * box_lenght)
image = image[:img_height, :img_width]
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
heatmap = np.ones_like(image[:,:,0])
res_heatmap = np.reshape(heatmap, (box_lenght, box_lenght, num_boxes))
res_heatmap = res_heatmap * np.array(losses)
heatmap = np.zeros((img_height, img_width))
# ugly for loop, unable to solve atm
i = 0
for y in np.arange(0, img_height, step=box_lenght):
for x in np.arange(0, img_width, step=box_lenght):
# convert to x,y coordinates
heatmap[y: y+box_lenght, x: x+box_lenght] = res_heatmap[:,:,i]
i+=1
heatmap = np.asarray(heatmap / np.max(heatmap) * 255, dtype=np.uint8)
heatmap_img = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
final = cv2.addWeighted(heatmap_img, 0.5, postprocess_image(image), 0.5, 0)
return final
def tf_iou_computation(gt_masks, pred_masks):
epsilon = tf.constant(1e-8) # To avoid division by zero
pred_masks = tf.cast(pred_masks, tf.bool)
union=tf.reduce_sum(tf.cast(tf.logical_or(gt_masks, pred_masks),
dtype=tf.float32), axis=[1,2,3]) + epsilon
IoU = tf.reduce_sum(tf.cast(tf.logical_and(gt_masks, pred_masks),
dtype=tf.float32), axis=[1,2,3]) / union
return IoU
def disambiguate_forw_back(pred_masks, threshold=0.1):
border_th = tf.constant(0.6)
# Might be redundant but makes no assumption
pred_masks = tf.cast(pred_masks > threshold, tf.float32)
pred_masks_compl = 1.0 - pred_masks
scores = compute_boundary_score_tf(pred_masks)
scores = tf.reshape(scores, [-1,1,1,1]) < border_th
scores = tf.cast(scores, tf.float32)
forward_masks = scores * pred_masks + (1.0 - scores) * pred_masks_compl
return forward_masks
def compute_all_IoU(pred_masks, gt_masks, threshold=0.1):
gt_masks= gt_masks > 0.01
object_masks = disambiguate_forw_back(pred_masks, threshold)
IoU = tf_iou_computation(gt_masks=gt_masks, pred_masks=object_masks)
return IoU
def compute_boundary_score(segmentation):
"""
This score indicates how many image borders the segmentation
mask occupies. If lower than a threshold, then it indicates foreground,
else background. The threshold is generally set to 0.6, which means
that to be background, the mask has to (approx.) occupy more than two borders.
"""
H = segmentation.shape[0]
W = segmentation.shape[1]
up_bord = segmentation[0:2, :]
bottom_bord = segmentation[H-2:H, :]
left_bord = segmentation[:, 0:2]
right_bord = segmentation[:, W-2:W]
border_occ = np.sum(up_bord)+np.sum(bottom_bord)+np.sum(left_bord)+np.sum(right_bord)
border_occ /= 1.0*(up_bord.size+bottom_bord.size+left_bord.size+right_bord.size)
return border_occ
def compute_boundary_score_tf(segmentation):
"""
Same as above but in tensorflow"
"""
height, width = segmentation.get_shape().as_list()[1:3]
up_bord = segmentation[:,0:2, :, :]
bottom_bord = segmentation[:,height-2:height,:,:]
width_bord_size = 2.0 * width
left_bord = segmentation[:, :, 0:2, :]
right_bord = segmentation[:, :, width-2:width, :]
height_bord_size = 2.0 * height
border_occ = tf.reduce_sum(up_bord, axis=[1,2,3]) + \
tf.reduce_sum(bottom_bord, axis=[1,2,3]) + \
tf.reduce_sum(left_bord, axis=[1,2,3]) + \
tf.reduce_sum(right_bord, axis=[1,2,3]) # [B]
border_occ /= (2*width_bord_size + 2*height_bord_size)
return border_occ
|
StarcoderdataPython
|
3336865
|
<filename>python/guided/seeds/seeders/Seed.py
from typing import List, Tuple
from guided.model.Guide import TransportType, Guide
from guided.model.Location import Label, location
from guided.model.Row import Row
from guided.seeds.seeders.SeedUser import SeedUser
class Seed:
def __init__(self):
self.generated = False
self._users: List[SeedUser] = []
def user(self, username: str) -> SeedUser:
user = SeedUser(username)
self._users.append(user)
return user
def guide_ids(self) -> List[str]:
ids: List[str] = []
for user in self._users:
ids.extend(user.guide_ids())
return ids
def rows(self) -> List[Row]:
if self.generated:
raise Exception("Already generated")
self.generated = True
rows: List[Row] = []
for user in self._users:
rows.extend(user.rows())
return rows
if __name__ == '__main__':
seed = Seed()
theo = seed.user('theo')
theo.guide(
'Some guide',
TransportType.CAR
).spots([
('Home', Label.Worthing, 0),
('Horsham', 'asd', 'st'),
])
rows = seed.rows()
for row in rows:
print(row)
print(len(rows), 'rows')
|
StarcoderdataPython
|
88920
|
import apache_beam as beam
import tensorflow as tf
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from regnety.utils.image_utils import *
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
if isinstance(value, type(tf.constant(0))):
value = value.numpy() # BytesList won't unpack a string from an EagerTensor.
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def create_collection(list1, list2, list3):
final_list = [(list1[i], list2[i], list3[i]) for i in range(len(list1))]
with beam.Pipeline() as pipeline:
coll = pipeline | beam.Create(final_list)
return coll
class MakeImageDoFn(beam.DoFn):
def process(self, batch):
ret_examples = []
for info_tuple in batch:
filepath, label, synset = info_tuple
image_str = tf.io.read_file(filepath)
if is_png(filepath):
image_str = png_to_jpeg(image_str)
if is_cmyk(filepath):
image_str = cmyk_to_rgb(image_str)
image_tensor = tf.io.decode_jpeg(image_str)
height, width = 512, 512
if not is_rgb(image_tensor):
image_tensor = tf.image.grayscale_to_rgb(image_tensor)
image_tensor = tf.cast(tf.image.resize(image_tensor, (512, 512)), tf.uint8)
image_str = tf.io.encode_jpeg(image_tensor)
assert len(image_tensor.shape) == 3
try:
ret_examples.append(
tf.train.Example(
features=tf.train.Features(
feature={
"image": _bytes_feature(image_str),
"height": _int64_feature(height),
"width": _int64_feature(width),
"filename": _bytes_feature(
bytes(os.path.basename(filepath)).encode("utf8")
),
"label": _int64_feature(label),
"synset": _bytes_feature(bytes(synset).encode("utf8")),
}
)
).SerializeToString()
)
except:
ret_examples.append(
tf.train.Example(
features=tf.train.Features(
feature={
"image": _bytes_feature(image_str),
"height": _int64_feature(height),
"width": _int64_feature(width),
"filename": _bytes_feature(
bytes(os.path.basename(filepath), encoding="utf8")
),
"label": _int64_feature(label),
"synset": _bytes_feature(
bytes(synset, encoding="utf8")
),
}
)
).SerializeToString()
)
return ret_examples
def __call__(self, *args):
self.process(*args)
class MakeExampleDoFn(beam.DoFn):
def process(self, batch):
examples = []
for example in batch:
examples.append(example.SerializeToSrring())
return examples
def __call__(self, *args):
self.process(*args)
|
StarcoderdataPython
|
4987129
|
"""
MIT License
Copyright (c) 2018 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import serial
import struct
import configuration
maxangle = 45 # TODO: make this generally configurable
def bytetohex(bytearray):
"""
Returns hexadecimal string representation of byte array
Copied from StackOverflow
https://stackoverflow.com/questions/19210414/byte-array-to-hex-string
"""
return ''.join('{:02x}'.format(x) for x in bytearray)
class dmfe_wrapper:
"""
Class that implements the rover motor control methods for David M Flynn
Enterprises motor control boards.
"""
def __init__(self):
self.sp = None
def check_sp(self):
""" Raises error if we haven't opened serial port yet. """
if self.sp == None:
raise ValueError("DMFE serial communication is not available.")
def connect(self):
"""
Read serial port connection parameters from JSON configuration file
and open the port.
"""
# Read parameter file
config = configuration.configuration("dmfe")
connectparams = config.load()['connect']
# Open serial port with parameters
s = serial.Serial()
s.baudrate = connectparams['baudrate']
s.port = connectparams['port']
s.timeout = connectparams['timeout']
s.open()
if s.is_open:
self.sp = s
def close(self):
"""
Closes down the serial port
"""
if self.sp.is_open:
self.sp.close()
self.sp = None
def send(self, device_id, command, data=b'\x00\x00\x00'):
"""
Send a command to a DMFE bus device, taking care of the header and
checksum calculation for a command packet.
"""
self.check_sp()
packet = bytearray([0xDD, 0xDD])
# Sender is master with ID of 1
packet.append(1)
# Append receiver ID
if device_id < 2 or device_id > 0xFE:
raise ValueError("Device ID {} is out of valid range 0x02 to 0xFE".format(device_id))
packet.append(device_id)
# Append command
packet.append(command) # TBD: need to validate command?
# Append data
packet = packet + data # TBD: need to validate data is bytearray of size 3?
# Calculate & append checksum
checksum = 0
for i in range (2,8):
checksum = checksum ^ packet[i]
packet.append(checksum)
self.sp.write(packet)
def read_raw(self, length=100):
"""
Reads a stream of bytes from serial device and returns it without any
attempts at parsing or validation
"""
self.check_sp()
return bytearray(self.sp.read(length))
def read_ack(self):
"""
We expect to receive a single byte 0xFF as acknowledgement
"""
self.check_sp()
r = bytearray(self.sp.read(1))
if len(r) == 0:
raise ValueError("Expected single byte 0xFF in response but received no data.")
if r[0] != 255:
raise ValueError("Expected 0xFF in response but got {}".format(r[0]))
def read_dmfeserialservo(self):
"""
We expect a device identifier string
"""
self.check_sp()
r = self.sp.read(18).decode('utf-8')
if len(r) == 0:
raise ValueError("Expected DMFE identification string but received no data.")
if r == 'DMFE Serial Brushe':
raise ValueError("Expected device to be serial servo but is serial brushed controller.")
if r != 'DMFE Serial Servo\n':
raise ValueError("Expected 'DMFE Serial Servo' but received {}".format(r))
def read_dmfeserialbrushed(self):
"""
We expect a device identifier string
"""
self.check_sp()
r = self.sp.read(20).decode('utf-8')
if len(r) == 0:
raise ValueError("Expected DMFE identification string but received no data.")
if r == 'DMFE Serial Servo\n':
raise ValueError("Expected device to be serial brushed but is serial servo.")
if r != 'DMFE Serial Brushed\n':
raise ValueError("Expected 'DMFE Serial Brushed' but received {}".format(r))
def read_datapacket(self, expectedid):
"""
We expect a data packet originating from device ID 'expectedid'
Returns the 4-byte data array
"""
self.check_sp()
r = self.sp.read(7)
if len(r) != 7:
raise ValueError("Expected data packet of 7 bytes but received only {}".format(len(r)))
if self.bytes_to_int(r[0]) != expectedid:
raise ValueError("Expected data packet from device {} but received from {}".format(expectedid, r[0]))
if self.bytes_to_int(r[1]) != 1:
raise ValueError("Expected data packet for master id 1 but received ID {}".format(r[1]))
checksum = 0
for i in range (0,6):
checksum = checksum ^ self.bytes_to_int(r[i])
if checksum != self.bytes_to_int(r[6]):
raise ValueError("Calculated checksum of {} does not match transmitted checksum {}".format(checksum,r[6]))
return r[2:6]
def version(self, id):
""" Identifier string for this motor controller """
return "DMFE"
@staticmethod
def check_id(id):
""" Verifies servo ID is within range and inverted status is boolean"""
if not isinstance(id, (tuple,list)):
raise ValueError("DMFE identifier must be a tuple")
if not isinstance(id[0], int):
raise ValueError("DMFE device address must be an integer")
if id[0] < 2 or id[0] > 253:
raise ValueError("DMFE device address {} outside of valid range 2-253".format(id[0]))
if not isinstance(id[1], int):
raise ValueError("DMFE device center position must be an integer")
if not isinstance(id[2], bool):
raise ValueError("Inverted status must be a boolean")
return tuple(id)
@staticmethod
def bytes_to_int(bytes):
return int(bytes.encode('hex'), 16)
@staticmethod
def data1byte(data):
"""
Given parameter, pack it into a single byte. Pad remainder with zero
and return three element byte array
data1byte(2) returns b'\x02\x00\x00'
"""
return struct.pack("b",data) + b'\x00\x00'
@staticmethod
def data2byte(data):
"""
Given parameter, pack it into two bytes. Pad remainder with zero
and return three element byte array
data2byte(1024) returns b'\x00\x04\x00'
"""
return struct.pack("H",data) + b'\x00'
def power_percent(self, id, percentage):
""" Send brushed motor speed command to device 'id' at specified +/- 'percentage' """
did, center, inverted = self.check_id(id)
self.check_sp()
if inverted:
percentage = percentage * -1
pct = int(percentage)
if abs(pct) > 100:
raise ValueError("Motor power percentage {0} outside valid range from 0 to 100.".format(pct))
# 50 is wheel power maximum of Mr. Blue rover. TBD: Make this general and configurable
power = (percentage * 50) / 100
self.send(did, 0x87, self.data1byte(power))
self.read_ack()
def set_max_current(self, id, current):
""" Set maximum current allowed before tripping protection """
did, center, inverted = self.check_id(id)
self.check_sp()
# Not yet implemented
def init_velocity(self, id):
""" Initialize device at 'id' into velocity mode """
did, center, inverted = self.check_id(id)
self.check_sp()
# Not applicable to DMFE devices
def velocity(self,id,pct_velocity):
"""
Runs the device in motor mode at specified velocity
For DMFE brush motor devices, directly translates to power_percent.
"""
self.power_percent(id,pct_velocity)
def init_angle(self, id):
"""
Sets the device at 'id' into servo position mode
"""
did, center, inverted = self.check_id(id)
self.check_sp()
# Not applicable to DMFE devices
def maxangle(self, id):
"""
Notifies maximum servo angle allowed in angle()
"""
did, center, inverted = self.check_id(id)
self.check_sp()
return maxangle
def angle(self, id, angle):
did, center, inverted = self.check_id(id)
self.check_sp()
if abs(angle) > maxangle:
raise ValueError("Steering angle {} exceeded expected maximum of {}}".format(angle,maxangle))
if inverted:
angle = angle * -1
position = 2048 + (angle * 4096/360) # 0 min, 2048 center, 4096 max at 360 degrees
self.send(did, 0x82, self.data2byte(position))
self.read_ack()
def steer_setzero(self, id):
did, center, inverted = self.check_id(id)
self.check_sp()
# TODO: Support live adjustment
def input_voltage(self, id):
"""
Query DMFE controller's internal voltage monitor
"""
did, center, inverted = self.check_id(id)
self.check_sp()
self.send(did, 0x96)
resp = self.read_datapacket(did)
return self.bytes_to_int(resp[0])/18.8
if __name__ == "__main__":
"""
Command line interface to work with DMFE serial bus devices.
Implements a subset of functionality as-needed
"""
import argparse
parser = argparse.ArgumentParser(description="DMFE Serial Bus Device Command Line Utility")
parser.add_argument("-id", "--id", help="Device identifier integer 2-253, default is 2.", type=int, default=2)
group = parser.add_mutually_exclusive_group()
group.add_argument("-m", "--move", help="Move servo to specified position 0-4096", type=int)
group.add_argument("-s", "--spin", help="Spin the motor at a specified speed from 0 to 50", type=int)
group.add_argument("-v", "--voltage", help="Read current input voltage", action="store_true")
args = parser.parse_args()
c = dmfe_wrapper()
c.connect()
if args.move != None: # Explicit check against None because zero is a valid value
if args.move < 0 or args.move > 4096:
print("Move destination {} is outside valid range of 0 to 4096 (4096 = 360 degrees)".format(args.move))
else:
c.send(args.id,0xaa)
c.read_dmfeserialservo()
print("Moving device {} to position {}".format(args.id, args.move))
c.send(args.id, 0x82, c.data2byte(args.move))
c.read_ack()
elif args.spin != None: # Zero is a valid parameter
if args.spin < -50 or args.spin > 50:
print("Spin speed {} is outside valid range of -50 to 50".format(args.spin))
else:
c.send(args.id,0xaa)
c.read_dmfeserialbrushed()
print("Spinning motor {} at speed {}".format(args.id, args.spin))
c.send(args.id, 0x87, c.data1byte(args.spin))
c.read_ack()
elif args.voltage:
c.send(args.id, 0x96)
resp = c.read_datapacket(args.id)
print("Device {} reports {} which translates to {} volts".format(args.id, c.bytes_to_int(resp[0]), c.bytes_to_int(resp[0])/18.8))
else:
# None of the actions were specified? Show help screen.
parser.print_help()
c.close()
|
StarcoderdataPython
|
1765714
|
#! /usr/bin/python
from twisted.spread import pb
from twisted.internet import reactor
def main():
rootobj_def = pb.getObjectAt("localhost", 8800, 30)
rootobj_def.addCallbacks(got_rootobj)
obj2_def = getSomeObjectAt("localhost", 8800, 30, "two")
obj2_def.addCallbacks(got_obj2)
obj3_def = getSomeObjectAt("localhost", 8800, 30, "three")
obj3_def.addCallbacks(got_obj3)
reactor.run()
def got_rootobj(rootobj):
print "got root object:", rootobj
print "telling root object to do foo(A)"
rootobj.callRemote("foo", "A")
def got_obj2(obj2):
print "got second object:", obj2
print "telling second object to do foo(B)"
obj2.callRemote("foo", "B")
def got_obj3(obj3):
print "got third object:", obj3
print "telling third object to do foo(C)"
obj3.callRemote("foo", "C")
class my_ObjectRetrieval(pb._ObjectRetrieval):
def __init__(self, broker, d, objname):
pb._ObjectRetrieval.__init__(self, broker, d)
self.objname = objname
def connectionMade(self):
assert not self.term, "How did this get called?"
x = self.broker.remoteForName(self.objname)
del self.broker
self.term = 1
self.deferred.callback(x)
def getSomeObjectAt(host, port, timeout=None, objname="root"):
from twisted.internet import defer
from twisted.spread.pb import Broker, BrokerClientFactory
d = defer.Deferred()
b = Broker(1)
bf = BrokerClientFactory(b)
my_ObjectRetrieval(b, d, objname)
if host == "unix":
# every time you use this, God kills a kitten
reactor.connectUNIX(port, bf, timeout)
else:
reactor.connectTCP(host, port, bf, timeout)
return d
main()
|
StarcoderdataPython
|
1805630
|
<filename>flare/polgrad/ppo.py
import torch
import torch.nn as nn
import torch.nn.functional as f
import numpy as np
import gym
import pybullet_envs
import time
import flare.kindling as fk
from flare.kindling import utils
from typing import Optional, Any, Union, Callable, Tuple, List
import pytorch_lightning as pl
from torch.utils.data import DataLoader, Dataset
import sys
from flare.polgrad import BasePolicyGradient
class PPO(BasePolicyGradient):
def __init__(
self,
env,
ac = fk.FireActorCritic,
hidden_sizes = (64, 64),
steps_per_epoch = 4000,
minibatch_size = None,
gamma = 0.99,
lam = 0.97,
pol_lr = 3e-4,
val_lr = 1e-3,
train_iters = 80,
clipratio = 0.2,
maxkl = 0.01,
seed = 0,
hparams = None
):
super().__init__(
env,
ac,
hidden_sizes=hidden_sizes,
steps_per_epoch=steps_per_epoch,
minibatch_size=minibatch_size,
gamma=gamma,
lam=lam,
pol_lr=pol_lr,
val_lr=val_lr,
train_iters=train_iters,
seed = seed,
hparams= hparams
)
self.clipratio = clipratio
self.maxkl = maxkl
def calc_pol_loss(self, logps, logps_old, advs):
ratio = torch.exp(logps - logps_old)
clipped_adv = torch.clamp(ratio, 1 - self.clipratio, 1 + self.clipratio) * advs
pol_loss = -(torch.min(ratio * advs, clipped_adv)).mean()
kl = (logps_old - logps).mean().item()
return pol_loss, kl
def backward(self, trainer, loss, optimizer, optimizer_idx):
pass
def training_step(self, batch, batch_idx, optimizer_idx):
states, actions, advs, rets, logps_old = batch
if optimizer_idx == 0:
stops = 0
stopslst = []
policy, logps = self.ac.policy(states, actions)
pol_loss_old, kl = self.calc_pol_loss(logps, logps_old, advs)
for i in range(self.train_iters):
self.policy_optimizer.zero_grad()
policy, logps = self.ac.policy(states, actions)
pol_loss, kl = self.calc_pol_loss(logps, logps_old, advs)
if kl > 1.5 * self.maxkl:
stops += 1
stopslst.append(i)
break
pol_loss.backward()
self.policy_optimizer.step()
log = {
"PolicyLoss": pol_loss_old.item(),
"DeltaPolLoss": (pol_loss - pol_loss_old).item(),
"KL": kl,
"Entropy": policy.entropy().mean().item(),
"TimesEarlyStopped": stops,
"AvgEarlyStopStep": np.mean(stopslst) if len(stopslst) > 0 else 0
}
loss = pol_loss_old
elif optimizer_idx == 1:
values_old = self.ac.value_f(states)
val_loss_old = self.calc_val_loss(values_old, rets)
for i in range(self.train_iters):
self.value_optimizer.zero_grad()
values = self.ac.value_f(states)
val_loss = self.calc_val_loss(values, rets)
val_loss.backward()
self.value_optimizer.step()
delta_val_loss = (val_loss - val_loss_old).item()
log = {"ValueLoss": val_loss_old.item(), "DeltaValLoss": delta_val_loss}
loss = val_loss
self.tracker_dict.update(log)
return {"loss": loss, "log": log, "progress_bar": log}
def learn(
env_name,
epochs: Optional[int] = 100,
minibatch_size: Optional[int] = None,
steps_per_epoch: Optional[int] = 4000,
hidden_sizes: Optional[Union[Tuple, List]] = (64, 32),
gamma: Optional[float] = 0.99,
lam: Optional[float] = 0.97,
hparams = None,
seed = 0
):
from flare.polgrad.base import runner
minibatch_size = 4000 if minibatch_size is None else minibatch_size
runner(
env_name,
PPO,
epochs=epochs,
minibatch_size=minibatch_size,
hidden_sizes=(64, 32),
gamma=gamma,
lam=lam,
hparams=hparams,
seed = seed
)
|
StarcoderdataPython
|
3220951
|
<filename>deep_architect/contrib/deep_learning_backend/pytorch_ops.py
from math import ceil
import torch
import torch.nn as nn
import torch.nn.functional as F
from deep_architect.helpers.pytorch_support import siso_pytorch_module
def calculate_same_padding(h_in, w_in, stride, filter_size):
h_out = ceil(float(h_in) / float(stride))
w_out = ceil(float(w_in) / float(stride))
if (h_in % stride == 0):
pad_along_height = max(filter_size - stride, 0)
else:
pad_along_height = max(filter_size - (h_in % stride), 0)
if (h_in % stride == 0):
pad_along_width = max(filter_size - stride, 0)
else:
pad_along_width = max(filter_size - (h_in % stride), 0)
pad_top = pad_along_height // 2
pad_bottom = pad_along_height - pad_top
pad_left = pad_along_width // 2
pad_right = pad_along_width - pad_left
return (pad_left, pad_right, pad_top, pad_bottom)
def conv2d(h_num_filters,
h_filter_width,
h_stride=1,
h_dilation_rate=1,
h_use_bias=True):
def compile_fn(di, dh):
(_, channels, height, width) = di['in'].size()
padding = nn.ZeroPad2d(
calculate_same_padding(height, width, dh['stride'],
dh['filter_width']))
conv = nn.Conv2d(channels,
dh['num_filters'],
dh['filter_width'],
stride=dh['stride'],
dilation=dh['dilation_rate'],
bias=dh['use_bias'])
def fn(di):
x = padding(di['in'])
return {'out': conv(x)}
return fn, [conv, padding]
return siso_pytorch_module(
'Conv2D', compile_fn, {
'num_filters': h_num_filters,
'filter_width': h_filter_width,
'stride': h_stride,
'use_bias': h_use_bias,
'dilation_rate': h_dilation_rate,
})
def max_pool2d(h_kernel_size, h_stride=1):
def compile_fn(di, dh):
(_, _, height, width) = di['in'].size()
padding = nn.ZeroPad2d(
calculate_same_padding(height, width, dh['stride'],
dh['kernel_size']))
max_pool = nn.MaxPool2d(dh['kernel_size'], stride=dh['stride'])
def fn(di):
x = padding(di['in'])
return {'out': max_pool(x)}
return fn, [padding, max_pool]
return siso_pytorch_module('MaxPool2D', compile_fn, {
'kernel_size': h_kernel_size,
'stride': h_stride
})
def avg_pool2d(h_kernel_size, h_stride=1):
def compile_fn(di, dh):
(_, _, height, width) = di['in'].size()
padding = nn.ZeroPad2d(
calculate_same_padding(height, width, dh['stride'],
dh['kernel_size']))
avg_pool = nn.AvgPool2d(dh['kernel_size'], stride=dh['stride'])
def fn(di):
x = padding(di['in'])
return {'out': avg_pool(x)}
return fn, [padding, avg_pool]
return siso_pytorch_module('AvgPool2D', compile_fn, {
'kernel_size': h_kernel_size,
'stride': h_stride
})
def dropout(h_keep_prob):
def compile_fn(di, dh):
dropout_layer = nn.Dropout(p=dh['keep_prob'])
def fn(di):
return {'out': dropout_layer(di['in'])}
return fn, [dropout_layer]
return siso_pytorch_module('Dropout', compile_fn,
{'keep_prob': h_keep_prob})
def batch_normalization():
def compile_fn(di, dh):
(_, channels, _, _) = di['in'].size()
batch_norm = nn.BatchNorm2d(channels)
def fn(di):
return {'out': batch_norm(di['in'])}
return fn, [batch_norm]
return siso_pytorch_module('BatchNormalization', compile_fn, {})
def relu():
def compile_fn(di, dh):
def fn(di):
return {'out': F.relu(di['in'])}
return fn, []
return siso_pytorch_module('ReLU', compile_fn, {})
def global_pool2d():
def compile_fn(di, dh):
(_, _, height, width) = di['in'].size()
def fn(di):
x = F.avg_pool2d(di['in'], (height, width))
x = torch.squeeze(x, 2)
return {'out': torch.squeeze(x, 2)}
return fn, []
return siso_pytorch_module('GlobalAveragePool', compile_fn, {})
def fc_layer(h_num_units):
def compile_fn(di, dh):
(_, channels) = di['in'].size()
fc = nn.Linear(channels, dh['num_units'])
def fn(di):
return {'out': fc(di['in'])}
return fn, [fc]
return siso_pytorch_module('FCLayer', compile_fn,
{'num_units': h_num_units})
func_dict = {
'dropout': dropout,
'conv2d': conv2d,
'max_pool2d': max_pool2d,
'avg_pool2d': avg_pool2d,
'batch_normalization': batch_normalization,
'relu': relu,
'global_pool2d': global_pool2d,
'fc_layer': fc_layer
}
|
StarcoderdataPython
|
5170089
|
#!/usr/bin/env python
"""Tests the TMP007 sensor"""
import time
import logging
from nanpy import TMP007
from nanpy.serialmanager import SerialManager
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("TMP007")
connection = SerialManager(sleep_after_connect=2)
connection.open()
sensor = TMP007(connection=connection)
try:
while True:
object_temp = sensor.get_object_temperature()
die_temp = sensor.get_die_temperature()
logger.info("Object temperature: %.2f", object_temp)
logger.info("Die temperature: %.2f", die_temp)
time.sleep(4)
except KeyboardInterrupt:
pass
|
StarcoderdataPython
|
158914
|
<filename>AutoClean/AutoClean.py
import os
import sys
import pandas as pd
from loguru import logger
from AutoClean.Modules import *
class AutoClean:
def __init__(self, input_data, missing_num='auto', missing_categ='auto', encode_categ=['auto'], extract_datetime='s', outliers='winz', outlier_param=1.5, logfile=True, verbose=False):
'''
input_data (dataframe)..........Pandas dataframe
missing_num (str)...............define how NUMERICAL missing values are handled
'auto' = automated handling
'linreg' = uses Linear Regression for predicting missing values
'knn' = uses K-NN algorithm for imputation
'mean','median' or 'most_frequent' = uses mean/median/mode imputatiom
'delete' = deletes observations with missing values
'False' = skips this step
missing_categ (str).............define how CATEGORICAL missing values are handled
'auto' = automated handling
'logreg' = uses Logistic Regression for predicting missing values
'knn' = uses K-NN algorithm for imputation
'most_frequent' = uses mode imputatiom
'delete' = deletes observations with missing values
'False' = skips this step
encode_categ (list).............encode CATEGORICAL features, takes a list as input
['auto'] = automated encoding
['onehot'] = one-hot-encode all CATEGORICAL features
['label'] = label-encode all categ. features
to encode only specific features add the column name or index: ['onehot', ['col1', 2]]
'False' = skips this step
extract_datetime (str)..........define whether DATETIME type features should be extracted into separate features
to define granularity set to 'D'= day, 'M'= month, 'Y'= year, 'h'= hour, 'm'= minute or 's'= second
'False' = skips this step
outliers (str)..................define how outliers are handled
'winz' = replaces outliers through winsorization
'delete' = deletes observations containing outliers
oberservations are considered outliers if they are outside the lower and upper bound [Q1-1.5*IQR, Q3+1.5*IQR], where IQR is the interquartile range
to set a custom multiplier use the 'outlier_param' parameter
'False' = skips this step
outlier_param (int, float)......! recommended not to change default value
define the multiplier for the outlier bounds
logfile (bool)..................define whether to create a logile during the AutoClean process
logfile will be saved in working directory as "autoclean.log"
verbose (bool)..................define whether AutoClean logs will be printed in console
OUTPUT (dataframe)..............a cleaned Pandas dataframe, accessible through the 'output_data' instance
'''
self._initialize_logger(verbose, logfile)
output_data = input_data.copy()
self.missing_num = missing_num
self.missing_categ = missing_categ
self.outliers = outliers
self.encode_categ = encode_categ
self.extract_datetime = extract_datetime
self.outlier_param = outlier_param
# validate the input parameters
self._validate_params(output_data, verbose, logfile)
# initialize our class and start the autoclean process
self.output = self._clean_data(output_data, input_data)
print('Logfile saved to:', os.path.join(os.getcwd(), 'autoclean.log'))
def _initialize_logger(self, verbose, logfile):
# function for initializing the logging process
logger.remove()
if verbose == True:
logger.add(sys.stderr, format='{time:DD-MM-YYYY HH:mm:ss.SS} - {level} - {message}')
if logfile == True:
logger.add('autoclean.log', mode='w', format='{time:DD-MM-YYYY HH:mm:ss.SS} - {level} - {message}')
return
def _validate_params(self, df, verbose, logfile):
# function for validating the input parameters of the autolean process
logger.info('Started validation of input parameters...')
if type(df) != pd.core.frame.DataFrame:
raise ValueError('Invalid value for "df" parameter.')
if self.missing_num not in [False, 'auto', 'knn', 'mean', 'median', 'most_frequent', 'delete']:
raise ValueError('Invalid value for "missing_num" parameter.')
if self.missing_categ not in [False, 'auto', 'knn', 'most_frequent', 'delete']:
raise ValueError('Invalid value for "missing_categ" parameter.')
if self.outliers not in [False, 'winz', 'delete']:
raise ValueError('Invalid value for "outliers" parameter.')
if len(self.encode_categ) > 2 and not isinstance(self.encode_categ, list) and self.encode_categ[0] not in [False, 'auto', 'onehot', 'label']:
raise ValueError('Invalid value for "encode_categ" parameter.')
if len(self.encode_categ) == 2:
if not isinstance(self.encode_categ[1], list):
raise ValueError('Invalid value for "encode_categ" parameter.')
if not isinstance(self.outlier_param, int) and not isinstance(self.outlier_param, float):
raise ValueError('Invalid value for "outlier_param" parameter.')
if self.extract_datetime not in [False, 'D','M','Y','h','m','s']:
raise ValueError('Invalid value for "extract_datetime" parameter.')
if not isinstance(verbose, bool):
raise ValueError('Invalid value for "verbose" parameter.')
if not isinstance(logfile, bool):
raise ValueError('Invalid value for "logfile" parameter.')
logger.info('Completed validation of input parameters')
return
def _clean_data(self, df, input_data):
# function for starting the autoclean process
df = df.reset_index(drop=True)
df = MissingValues.handle(self, df)
df = Outliers.handle(self, df)
df = Adjust.convert_datetime(self, df)
df = EncodeCateg.handle(self, df)
df = Adjust.round_values(self, df, input_data)
logger.info('AutoClean completed successfully')
return df
|
StarcoderdataPython
|
6685089
|
"""End to end tests for CLI v2"""
from functools import partial
try:
from unittest import mock
except ImportError:
import mock
from click.testing import CliRunner
import pytest
from pipcompilemulti.cli_v2 import cli, read_config
from .utils import temp_dir
@pytest.fixture(autouse=True)
def requirements_dir():
"""Create temporary requirements directory for test time."""
with temp_dir() as tmp_dir:
patch = partial(patched_config, tmp_dir)
with mock.patch('pipcompilemulti.cli_v2.read_config', patch):
yield
@pytest.mark.parametrize('command', ['lock', 'upgrade', 'verify'])
def test_command_exits_with_zero(command):
"""Run requirements command on self"""
# pylint: disable=redefined-outer-name
runner = CliRunner()
result = runner.invoke(cli, [command])
assert result.exit_code == 0
def patched_config(base_dir):
"""Override base_dir in each section of config."""
config_sections = read_config()
for _, section in config_sections:
section['base_dir'] = base_dir
return config_sections
|
StarcoderdataPython
|
3456126
|
# coding=utf-8
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl.nn import GINConv
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
'''
num_layers: number of layers in the neural networks (EXCLUDING the input layer). If num_layers=1, this reduces to linear model.
input_dim: dimensionality of input features
hidden_dim: dimensionality of hidden units at ALL layers
output_dim: number of classes for prediction
device: which device to use
'''
super(MLP, self).__init__()
self.linear_or_not = True # default is linear model
self.num_layers = num_layers
if num_layers < 1:
raise ValueError("number of layers should be positive!")
elif num_layers == 1:
# Linear model
self.linear = nn.Linear(input_dim, output_dim)
else:
# Multi-layer model
self.linear_or_not = False
self.linears = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
self.linears.append(nn.Linear(input_dim, hidden_dim))
for layer in range(num_layers - 2):
self.linears.append(nn.Linear(hidden_dim, hidden_dim))
self.linears.append(nn.Linear(hidden_dim, output_dim))
for layer in range(num_layers - 1):
self.batch_norms.append(nn.BatchNorm1d(hidden_dim))
def forward(self, x):
if self.linear_or_not:
# If linear model
return self.linear(x)
else:
# If MLP
h = x
for layer in range(self.num_layers - 1):
h = F.relu(self.batch_norms[layer](self.linears[layer](h)))
return self.linears[self.num_layers - 1](h)
class GIN(nn.Module):
def __init__(self, input_dim, hidden_dim, out_dim, layers, gin_layer):
super().__init__()
self.encoder = nn.ModuleList()
for i in range(gin_layer):
if i == 0:
self.encoder.append(GINConv(MLP(input_dim, hidden_dim, hidden_dim, layers), 'sum', 0, True))
elif i == gin_layer - 1:
self.encoder.append(GINConv(MLP(hidden_dim, hidden_dim, out_dim, layers), 'sum', 0, True))
else:
self.encoder.append(GINConv(MLP(hidden_dim, hidden_dim, hidden_dim, layers), 'sum', 0, True))
def forward(self, g, h):
# x, edge_index = data.x, data.edge_index
for i, layer in enumerate(self.encoder):
h = layer(g, h)
return h
class AE(nn.Module):
def __init__(self, input_dim, hidden_dim, out_dim, layers):
super().__init__()
self.encoder = nn.ModuleList()
self.decoder = nn.ModuleList()
for i in range(layers):
if i == 0:
self.encoder.append(nn.Linear(input_dim, hidden_dim))
self.decoder.append(nn.Linear(out_dim, hidden_dim))
elif i == layers - 1:
self.encoder.append(nn.Linear(hidden_dim, out_dim))
self.decoder.append(nn.Linear(hidden_dim, input_dim))
else:
self.encoder.append(nn.Linear(hidden_dim, hidden_dim))
self.decoder.append(nn.Linear(hidden_dim, hidden_dim))
def encode(self, h):
for i, layer in enumerate(self.encoder):
h = layer(h)
h = F.relu(h)
return h
def forward(self, h):
h = self.encode(h)
out = h
for i, layer in enumerate(self.decoder):
out = layer(out)
out = F.relu(out)
return h, out
def get_embedding(self, h):
h = self.encode(h)
return h
class VAE(nn.Module):
def __init__(self, input_dim, hidden_dim, out_dim, layers):
super().__init__()
self.encoder = nn.ModuleList()
self.decoder = nn.ModuleList()
for i in range(layers - 1):
if i == 0:
self.encoder.append(nn.Linear(input_dim, hidden_dim))
self.decoder.append(nn.Linear(out_dim, hidden_dim))
else:
self.encoder.append(nn.Linear(hidden_dim, hidden_dim))
self.decoder.append(nn.Linear(hidden_dim, hidden_dim))
self.decoder.append(nn.Linear(hidden_dim, input_dim))
self.mu = nn.Linear(hidden_dim, out_dim)
self.sigma = nn.Linear(hidden_dim, out_dim)
for i, layer in enumerate(self.encoder):
nn.init.xavier_normal_(self.encoder[i].weight.data)
nn.init.normal_(self.encoder[i].bias.data)
for i, layer in enumerate(self.decoder):
nn.init.xavier_normal_(self.decoder[i].weight.data)
nn.init.normal_(self.decoder[i].bias.data)
nn.init.xavier_normal_(self.mu.weight.data)
nn.init.normal_(self.mu.bias.data)
nn.init.xavier_normal_(self.sigma.weight.data)
nn.init.normal_(self.sigma.bias.data)
def encode(self, h):
for i, layer in enumerate(self.encoder):
h = layer(h)
h = F.relu(h)
mu = self.mu(h)
sigma = self.sigma(h).exp()
epsilon = torch.from_numpy(np.random.normal(0, 1, sigma.size())).float().cuda()
z = mu + epsilon * sigma
return z
def forward(self, h):
h = self.encode(h)
out = h
for i, layer in enumerate(self.decoder):
out = layer(out)
out = F.relu(out)
return h, out
class Adversary(nn.Module):
def __init__(self, input_dim, hidden_dim, out_dim, layers):
super().__init__()
self.generator = nn.ModuleList()
for i in range(layers):
if i == 0:
self.generator.append(nn.Linear(input_dim, hidden_dim))
elif i == layers - 1:
self.generator.append(nn.Linear(hidden_dim, out_dim))
else:
self.generator.append(nn.Linear(hidden_dim, hidden_dim))
self.discriminator = nn.Sequential(
nn.Linear(out_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 1)
)
def encode(self, h):
for i, layer in enumerate(self.generator):
h = layer(h)
h = F.relu(h)
return h
def forward(self, pos, neg):
neg = self.encode(neg)
pos_lbl = self.discriminator(pos)
neg_lbl = self.discriminator(neg)
return pos_lbl, neg_lbl
class ARHOL(nn.Module):
def __init__(self, input_dim, hidden_dim, out_dim, pos_dim, layers):
super().__init__()
self.ae = AE(input_dim, hidden_dim, out_dim, layers)
self.generator = GIN(out_dim, hidden_dim, pos_dim, layers, layers)
self.discriminator = nn.Sequential(
nn.Linear(pos_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 1)
)
def forward(self, features, pos, g):
h, out = self.ae(features)
pos_lbl = self.discriminator(pos)
h = self.generator(g, h)
neg_lbl = self.discriminator(h.detach())
loss_ae = F.mse_loss(out, features)
loss_d = 0.5 * (F.binary_cross_entropy_with_logits(pos_lbl, torch.ones_like(pos_lbl)) +
F.binary_cross_entropy_with_logits(neg_lbl, torch.zeros_like(neg_lbl)))
loss_g = F.binary_cross_entropy_with_logits(self.discriminator(h), torch.ones_like(neg_lbl))
return loss_ae, loss_g, loss_d
def get_embedding(self, features):
h, _ = self.ae(features)
return h.cpu().data.numpy()
|
StarcoderdataPython
|
6642373
|
<gh_stars>0
import numpy as np
import random
import torch
import cv2
from PIL import ImageGrab
import requests
import bs4
from lxml import html
from keyboard import mouse
import keyboard
import pyautogui
import time
import pynput.keyboard
import os
from NET import *
|
StarcoderdataPython
|
3204860
|
<reponame>wstong999/AliOS-Things
import utime # 延时函数在utime库中
from driver import GPIO,I2C
import sht3x
from ssd1306 import SSD1306_I2C
hum_s = 0
oled = None
sht3xDev = None
humi_gpio = None
def sht3x_init():
global sht3xDev
i2cDev = I2C()
i2cDev.open("sht3x")
sht3xDev = sht3x.SHT3X(i2cDev)
def humi_ctrl_init():
global humi_gpio
humi_gpio = GPIO()
humi_gpio.open("hum_ctrl")
def start_hum():
humi_gpio.write(0)
def stop_hum():
humi_gpio.write(1)
def oled_init():
global oled
i2cObj = I2C()
i2cObj.open("ssd1306")
print("ssd1306 inited!")
oled = SSD1306_I2C(128, 64, i2cObj)
oled.fill(0) #清屏背景黑色
oled.text('welcome haas', 30, 5)
oled.text('auto humi', 30, 22)
oled.text(str('----------------------'),3,32)
oled.text('', 30, 40)
oled.show()
def oled_data_show(status,humi,time_arr):
global oled
oled.fill(0)
oled.text(str('%d-%02d-%02d'%(time_arr[0],time_arr[1],time_arr[2])),30,5)
oled.text(str('%02d:%02d:%02d'%(time_arr[3],time_arr[4],time_arr[5])),30,22)
oled.text(str('----------------------'),3,32)
if status == 1:
oled.text('open', 25, 40)
oled.text(str('%02d'%(humi)+'%H'),75,40)
elif status == 0:
oled.text('close', 25, 40)
oled.text(str('%02d'%(humi)+'%H'),75,40)
oled.show()
if __name__ == '__main__':
sht3x_init()
humi_ctrl_init()
oled_init()
while True:
humidity = sht3xDev.getHumidity()
if humidity <= 60.0:
if hum_s == 0:
hum_s = 1
print("start")
start_hum()
else :
if hum_s == 1:
hum_s = 0
print("stop")
stop_hum()
timeArray = utime.localtime()
oled_data_show(hum_s,int(humidity),timeArray)
utime.sleep(1)
|
StarcoderdataPython
|
3564691
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-26 14:50
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0108_sanitize_namespace_names'),
]
operations = [
migrations.AlterModelOptions(
name='content',
options={'ordering': ['namespace', 'repository', 'name', 'content_type']},
),
migrations.AlterModelOptions(
name='repositoryversion',
options={},
),
migrations.AlterUniqueTogether(
name='content',
unique_together=set([('namespace', 'repository', 'name', 'content_type')]),
),
]
|
StarcoderdataPython
|
1626575
|
<reponame>Sawyer-Middeleer/divvy-dash<gh_stars>0
from django.db import models
import json
import requests
class Station(models.Model):
num_bikes_disabled = models.IntegerField()
station_id = models.CharField(max_length=50)
num_bikes_available = models.IntegerField()
num_docks_available = models.IntegerField()
last_reported = models.DateTimeField(auto_now_add=False)
def __str__(self):
return self.station_id
|
StarcoderdataPython
|
1978895
|
#!/usr/bin/env python
import sys
import os
sys.path.append(os.getcwd()+"/../modules")
import pprint
from netaddr import *
import pynetbox
import csv
import yaml
# Custom NB modules
import my_netbox as nb_tools
try:
assert all(os.environ[env] for env in ['NETBOX_TOKEN'])
except KeyError as exc:
print(f"ERROR: ENVAR {e} not found", file=sys.stderr)
sys.exit()
NETBOX_URL = "http://localhost:8000"
NETBOX_TOKEN = os.environ['NETBOX_TOKEN']
nb = pynetbox.api(url=NETBOX_URL, token=NETBOX_TOKEN)
### Read from CSV for NetBox device data
nb_source_file = "nb_devices.csv"
# Stores info on created NB devices
nb_all_created_devices_count = 0
nb_all_created_devices = list()
# Stores all already created NetBox objects
nb_existing_devices_count = 0
nb_existing_devices = list()
# Stores non-existent NetBox objects
nb_non_existent_devices_count = 0
nb_non_existent_devices = list()
# Stores devices and attributes that will be created
nb_all_devices = list()
nb_all_devices_primaryIPs = dict()
nb_all_devices_mgmt_intf = dict()
# Stores IPs for duplicate checking
all_IPs = list()
unique_IPs = set()
duplicated_IPs = list()
try:
with open(nb_source_file) as f:
reader = csv.DictReader(f)
for row in reader:
nb_obj = None
nb_site = nb_tools.retrieve_nb_obj(nb,"dcim","sites",row['site'])
nb_rack = nb_tools.retrieve_nb_obj(nb,"dcim","racks",row['rack'])
nb_dtype = nb_tools.retrieve_nb_obj(nb,"dcim","device_types",row['device_type'])
nb_drole = nb_tools.retrieve_nb_obj(nb,"dcim","device_roles",row['device_role'])
nb_platform = nb_tools.retrieve_nb_obj(nb,"dcim","platforms",row['platform'])
# Verifies whether DCIM object exists
if (not (nb_site and nb_dtype and nb_drole and nb_rack and nb_platform) ):
nb_non_existent_devices_count += 1
nb_non_existent_devices.append(
[
row['name'],
row['site'],
row['rack'],
row['device_type'],
row['device_role'],
row['platform']
]
)
# Generates dict of values for PyNetbox to create object
if (nb_non_existent_devices_count == 0):
nb_obj = nb.dcim.devices.get(name=row['name'])
# Adds primary IPs to list for duplicate checking
all_IPs.append(row['primary_ipv4'])
if (not nb_obj):
nb_all_created_devices_count += 1
nb_all_devices.append(
dict(
name=row['name'],
site=nb_site.id,
platform=nb_platform.id,
device_type=nb_dtype.id,
device_role=nb_drole.id,
rack=nb_rack.id,
face=row['face'],
position=row['position'],
serial=row['serial'],
asset_tag=row['asset_tag'],
status=row['status'],
)
)
nb_all_created_devices.append(
[
row['name'],
row['device_type'],
row['site'],
row['rack'],
row['mgmt_intf'],
row['primary_ipv4']
]
)
nb_all_devices_primaryIPs[row['name']] = row['primary_ipv4']
nb_all_devices_mgmt_intf[row['name']] = row['mgmt_intf']
else:
nb_existing_devices_count += 1
nb_existing_devices.append(
[
nb_obj.name,
nb_obj.site.name,
nb_obj.rack.name,
nb_obj.serial,
nb_obj.asset_tag,
nb_obj.status.label
]
)
except FileNotFoundError as e:
print(f"ERROR: File {nb_source_file} not found", file=sys.stderr)
except pynetbox.core.query.RequestError as e:
print(f"ERROR: NetBox query request failed {e}", file=sys.stderr)
if (nb_existing_devices_count > 0):
title = "The following NetBox devices already exist"
headerValues = ["Name", "Site", "Rack", "Serial #", "Asset Tag", "Status"]
nb_tools.create_nb_log(title, headerValues, nb_existing_devices, 15, 36)
### Generates table of non-existent NetBox objects defined in CSV
if ( nb_non_existent_devices_count > 0 ):
title = "One or more of the following device attributes are invalid"
headerValues = ["Name", "Site", "Rack", "Device Type", "Device Role", "Platform"]
nb_tools.create_nb_log(title, headerValues, nb_non_existent_devices, 15, 30)
# Creates a set to remove duplicate IPs
# If length of set differs from list, indicates there are duplicate IPs
flag = len(set(all_IPs)) == len(all_IPs)
# Print results of verifying duplicated IPs
if(not flag):
for device_ip in all_IPs:
if (device_ip not in unique_IPs):
unique_IPs.add(device_ip)
else:
duplicated_IPs.append([device_ip,])
title = "The following IPs are duplicated"
headerValues = ["Duplicated IP Addresses"]
nb_tools.create_nb_log(title, headerValues, duplicated_IPs, 15, 12)
elif (nb_all_created_devices_count > 0):
try:
# Add devices to NetBox and store resulting object in "created_devs"
nb_created_devices = nb.dcim.devices.create(nb_all_devices)
for created_dev in nb_created_devices:
# Retrieve specific interface associated w/ created device
nb_primary_interface = nb.dcim.interfaces.filter(device=created_dev.name,name=nb_all_devices_mgmt_intf[created_dev.name])
# Create dict to store attributes for device's primary IP
primary_ip_addr_dict = dict(
address=nb_all_devices_primaryIPs[created_dev.name],
status=1,
description=f"Management IP for {created_dev.name}",
interface=nb_primary_interface[0].id,
)
# Create primary IP and assign to device's first interface
new_primary_ip = nb.ipam.ip_addresses.create(primary_ip_addr_dict)
# Retrieves created device, and sets the primary IP for the device
dev = nb.dcim.devices.get(created_dev.id)
dev.primary_ip4 = new_primary_ip.id
dev.save()
title = "The following NetBox objects were created"
headerValues = ["Device", "Type", "Site", "Rack", "Management Interface", "IP"]
nb_tools.create_nb_log(title, headerValues, nb_all_created_devices, 10, 36)
except pynetbox.core.query.RequestError as e:
print(f"ERROR: NetBox query request failed {e}", file=sys.stderr)
else:
print()
print(24*"*"," No NetBox devices were created ",24*"*")
print("\nAll defined devices already exist or there were errors for some of the objects")
|
StarcoderdataPython
|
1940493
|
<gh_stars>0
from collections import namedtuple
TaskCore = namedtuple('TaskCore', ['cached_data_loader', 'data_dir', 'target', 'pipeline', 'parser',
'classifier', 'cv_ratio', 'train', 'test'])
class Task(object):
"""
A Task computes some work and outputs a dictionary which will be cached on disk.
If the work has been computed before and is present in the cache, the data will
simply be loaded from disk and will not be pre-computed.
"""
def __init__(self, task_core):
self.task_core = task_core
def filename(self):
raise NotImplementedError("Implement this")
def run(self):
return self.task_core.cached_data_loader.load(self.filename(), self.load_data)
|
StarcoderdataPython
|
4849659
|
<reponame>dfm/celerite2
# -*- coding: utf-8 -*-
__all__ = ["terms", "GaussianProcess"]
def __set_compiler_flags():
import aesara
def add_flag(current, new):
if new in current:
return current
return f"{current} {new}"
current = aesara.config.gcc__cxxflags
current = add_flag(current, "-Wno-c++11-narrowing")
current = add_flag(current, "-fno-exceptions")
current = add_flag(current, "-fno-unwind-tables")
current = add_flag(current, "-fno-asynchronous-unwind-tables")
aesara.config.gcc__cxxflags = current
__set_compiler_flags()
from celerite2.pymc4 import terms
from celerite2.pymc4.celerite2 import GaussianProcess
try:
from celerite2.pymc4 import jax_support # noqa
except ImportError:
pass
|
StarcoderdataPython
|
8150389
|
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import render
from django.urls import reverse
from django.utils import timezone
from django.views import generic
from .models import Redditor
from .forms import SearchForm
class DetailView(generic.DetailView):
"""
The detail view for Redditors.
This class provides the view for the results of the analysis
on a redditor. If the redditor is not present in the database,
it gives the option of performing the analysis.
"""
model = Redditor
def get(self, request, *args, **kwargs):
"""Handle 404 exception by redirecting to the prefilled search form."""
try:
response = super().get(request, *args, **kwargs)
except Http404:
form = SearchForm(initial={'username': kwargs['pk']})
response = render(request, 'user/notfound.html', {'form': form})
return response
class SearchView(generic.edit.FormView):
"""
The search page view, with form processing.
This class provides the view for the search page (which is
also the homepage). It also provides the search form processing.
"""
form_class = SearchForm
template_name = 'user/search.html'
def form_valid(self, form):
"""Handle form creating a new entry if user is not in database, redirecting otheriwse."""
username = form.cleaned_data['username']
# if the user is not already in the database, create a new entry
if not Redditor.objects.filter(pk=username).exists():
new_redditor = Redditor(username=username,
analysis_date=timezone.now(),
result=0.1)
new_redditor.save()
return HttpResponseRedirect(reverse('user:detail', args=(username,)))
|
StarcoderdataPython
|
214868
|
from common.make_tx import make_airdrop_tx
from terra import util_terra
from terra.make_tx import make_lp_unstake_tx
def handle_unstake_and_claim(exporter, elem, txinfo):
txid = txinfo.txid
from_contract = elem["logs"][0]["events_by_type"]["from_contract"]
actions = from_contract["action"]
contract_addresses = from_contract["contract_address"]
amounts = from_contract.get("amount", None)
for i in range(len(actions)):
action = actions[i]
contract_address = contract_addresses[i]
amount_string = _align_amounts_to_actions(actions, amounts)[i]
if action == "transfer":
# Extract amount/currency for transfer action
currency = util_terra._lookup_address(contract_address, txid)
amount = util_terra._float_amount(amount_string, currency)
if currency == "LOOP":
row = make_airdrop_tx(txinfo, amount, currency)
else:
row = make_lp_unstake_tx(txinfo, amount, currency)
exporter.ingest_row(row)
def _align_amounts_to_actions(actions, amounts):
new_amounts = []
i = 0
for action in actions:
if action in ["Unstake"]:
new_amounts.append('0')
else:
new_amounts.append(amounts[i])
i += 1
return new_amounts
|
StarcoderdataPython
|
11315769
|
<reponame>oracle/accelerated-data-science<filename>ads/jobs/utils.py<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8; -*-
# Copyright (c) 2022 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
import os
import json
from oci.data_flow.models import Application
from ads.opctl.config.utils import read_from_ini
from ads.opctl.constants import (
ADS_DATAFLOW_CONFIG_FILE_NAME,
DEFAULT_ADS_CONFIG_FOLDER,
)
from ads.jobs import logger
from ads.common.utils import oci_key_profile
def get_dataflow_config(path=None, oci_profile=None):
if path:
dataflow_config_file_path = os.path.abspath(os.path.expanduser(path))
else:
dataflow_config_file_path = os.path.expanduser(
os.path.join(DEFAULT_ADS_CONFIG_FOLDER, ADS_DATAFLOW_CONFIG_FILE_NAME)
)
config = {}
if os.path.exists(dataflow_config_file_path):
parser = read_from_ini(dataflow_config_file_path)
if not oci_profile:
oci_profile = oci_key_profile()
if oci_profile in parser:
config = dict(parser[oci_profile])
if len(config) == 0:
logger.error(
f"Dataflow configuration with profile {oci_profile} not found."
)
raise ValueError(
f"Dataflow configuration with profile {oci_profile} not found."
)
return config
else:
logger.error(f"{dataflow_config_file_path} not found.")
return {}
class DataFlowConfig(Application):
def __init__(self, path: str = None, oci_profile: str = None):
"""Create a DataFlowConfig object. If a path to config file is given it is loaded from the path.
Parameters
----------
path : str, optional
path to configuration file, by default None
oci_profile : str, optional
oci profile to use, by default None
"""
self.config = get_dataflow_config(path, oci_profile)
self._script_bucket = None
self._archive_bucket = None
if len(self.config) > 0:
self._script_bucket = self.config.pop("script_bucket")
self._archive_bucket = self.config.pop("archive_bucket", None)
super().__init__(**self.config)
def __repr__(self):
config = json.loads(super().__repr__())
config["script_bucket"] = self.script_bucket
if self.archive_bucket:
config["archive_bucket"] = self.archive_bucket
return f"'{json.dumps({k: v for k, v in config.items() if v is not None})}'"
@property
def script_bucket(self):
"""Bucket to save user script. Also accept a prefix in the format of oci://<bucket-name>@<namespace>/<prefix>.
Returns
-------
str
script bucket (path)
"""
return self._script_bucket
@script_bucket.setter
def script_bucket(self, v: str):
self._script_bucket = v
@property
def archive_bucket(self):
"""Bucket to save archive zip. Also accept a prefix in the format of oci://<bucket-name>@<namespace>/<prefix>.
Returns
-------
str :
archive bucket (path)
"""
return self._archive_bucket
@archive_bucket.setter
def archive_bucket(self, v: str):
self._archive_bucket = v
|
StarcoderdataPython
|
370346
|
import time
import torch
from torch.nn.utils import clip_grad_norm_
from tqdm import tqdm
from custom_utils import get_tensorboard
from recbole.data import FullSortEvalDataLoader
from recbole.trainer import Trainer
from recbole.utils import set_color, get_gpu_usage, early_stopping, dict2str, EvaluatorType
class CustomTrainer(Trainer):
def __init__(self, config, model):
super(CustomTrainer, self).__init__(config, model)
if self.best_valid_result is None:
self.best_valid_result = {}
self.tensorboard = get_tensorboard(self.logger, model.__class__.__name__)
self.best_valid_result['avg_trn_time'] = self.best_valid_result.get('avg_trn_time', 0) * self.start_epoch
self.best_valid_result['avg_val_time'] = self.best_valid_result.get('avg_val_time', 0) * self.start_epoch
self.best_valid_result['avg_tst_time'] = self.best_valid_result.get('avg_tst_time', 0) * self.start_epoch
self.best_valid_result['max_gpu_usage'] = self.best_valid_result.get('max_gpu_usage', 0)
def _train_epoch(self, train_data, epoch_idx, loss_func=None, show_progress=False):
"""
This method now saves the average GPU usage
"""
self.model.train()
loss_func = loss_func or self.model.calculate_loss
total_loss = None
iter_data = (
tqdm(
train_data,
total=len(train_data),
ncols=100,
desc=set_color(f"Train {epoch_idx:>5}", 'pink'),
) if show_progress else train_data
)
for batch_idx, interaction in enumerate(iter_data):
interaction = interaction.to(self.device)
self.optimizer.zero_grad()
losses = loss_func(interaction)
if isinstance(losses, tuple):
loss = sum(losses)
loss_tuple = tuple(per_loss.item() for per_loss in losses)
total_loss = loss_tuple if total_loss is None else tuple(map(sum, zip(total_loss, loss_tuple)))
else:
loss = losses
total_loss = losses.item() if total_loss is None else total_loss + losses.item()
self._check_nan(loss)
loss.backward()
if self.clip_grad_norm:
clip_grad_norm_(self.model.parameters(), **self.clip_grad_norm)
self.optimizer.step()
if self.gpu_available and show_progress:
iter_data.set_postfix_str(set_color('GPU RAM: ' + get_gpu_usage(self.device), 'yellow'))
return total_loss
def fit(self, train_data, valid_data=None, verbose=True, saved=True, show_progress=False, callback_fn=None,
use_early_stopping=False):
"""
Unlike Trainer class, this custom trainer returns the time and memory consumption as well and makes the use
of Early Stopping optional
"""
if saved and self.start_epoch >= self.epochs:
self._save_checkpoint(-1)
self.eval_collector.data_collect(train_data)
for epoch_idx in range(self.start_epoch, self.epochs):
# train
training_start_time = time.time()
train_loss = self._train_epoch(train_data, epoch_idx, show_progress=show_progress)
self.train_loss_dict[epoch_idx] = sum(train_loss) if isinstance(train_loss, tuple) else train_loss
training_end_time = time.time()
self.best_valid_result['avg_trn_time'] += training_end_time - training_start_time
train_loss_output = \
self._generate_train_loss_output(epoch_idx, training_start_time, training_end_time, train_loss)
if verbose:
self.logger.info(train_loss_output)
self._add_train_loss_to_tensorboard(epoch_idx, train_loss)
# eval
if self.eval_step <= 0 or not valid_data:
if saved:
self._save_checkpoint(epoch_idx)
update_output = set_color('Saving current', 'blue') + ': %s' % self.saved_model_file
if verbose:
self.logger.info(update_output)
continue
if (epoch_idx + 1) % self.eval_step == 0:
valid_start_time = time.time()
valid_score, valid_result = self._valid_epoch(valid_data, show_progress=show_progress)
self.best_valid_score, self.cur_step, stop_flag, update_flag = early_stopping(
valid_score,
self.best_valid_score,
self.cur_step,
max_step=self.stopping_step,
bigger=self.valid_metric_bigger
)
valid_end_time = time.time()
self.best_valid_result['avg_val_time'] += valid_end_time - valid_start_time
valid_score_output = (set_color("epoch %d evaluating", 'green') + " [" + set_color("time", 'blue')
+ ": %.2fs, " + set_color("valid_score", 'blue') + ": %f]") % \
(epoch_idx, valid_end_time - valid_start_time, valid_score)
valid_result_output = set_color('valid result', 'blue') + ': \n' + dict2str(valid_result)
if verbose:
self.logger.info(valid_score_output)
self.logger.info(valid_result_output)
self.tensorboard.add_scalar('Valid_score', valid_score, epoch_idx)
if update_flag:
if saved:
self._save_checkpoint(epoch_idx)
update_output = set_color('Saving current best', 'blue') + ': %s' % self.saved_model_file
if verbose:
self.logger.info(update_output)
self.best_valid_result.update(valid_result)
if callback_fn:
callback_fn(epoch_idx, valid_score)
if stop_flag and use_early_stopping:
stop_output = 'Finished training, best eval result in epoch %d' % \
(epoch_idx - self.cur_step * self.eval_step)
if verbose:
self.logger.info(stop_output)
break
if self.gpu_available:
gpu_usage = torch.cuda.max_memory_reserved(self.device) / 1024 ** 3
self.best_valid_result['max_gpu_usage'] = max(self.best_valid_result['max_gpu_usage'], gpu_usage)
del train_loss, valid_score, valid_result
torch.cuda.empty_cache()
self.best_valid_result['avg_trn_time'] /= self.epochs
self.best_valid_result['avg_val_time'] /= self.epochs
self._add_hparam_to_tensorboard(self.best_valid_score)
return self.best_valid_score, self.best_valid_result
@torch.no_grad()
def evaluate(self, eval_data, load_best_model=True, model_file=None, show_progress=False):
if not eval_data:
return
if load_best_model:
if model_file:
checkpoint_file = model_file
else:
checkpoint_file = self.saved_model_file
checkpoint = torch.load(checkpoint_file)
self.model.load_state_dict(checkpoint['state_dict'])
self.model.load_other_parameter(checkpoint.get('other_parameter'))
message_output = 'Loading model structure and parameters from {}'.format(checkpoint_file)
self.logger.info(message_output)
start_eval_time = time.time()
self.model.eval()
if isinstance(eval_data, FullSortEvalDataLoader):
eval_func = self._full_sort_batch_eval
if self.item_tensor is None:
self.item_tensor = eval_data.dataset.get_item_feature().to(self.device)
else:
eval_func = self._neg_sample_batch_eval
if self.config['eval_type'] == EvaluatorType.RANKING:
self.tot_item_num = eval_data.dataset.item_num
iter_data = (
tqdm(
eval_data,
total=len(eval_data),
ncols=100,
desc=set_color(f"Evaluate ", 'pink'),
) if show_progress else eval_data
)
for batch_idx, batched_data in enumerate(iter_data):
interaction, scores, positive_u, positive_i = eval_func(batched_data)
if self.gpu_available and show_progress:
iter_data.set_postfix_str(set_color('GPU RAM: ' + get_gpu_usage(self.device), 'yellow'))
self.eval_collector.eval_batch_collect(scores, interaction, positive_u, positive_i)
self.eval_collector.model_collect(self.model)
struct = self.eval_collector.get_data_struct()
result = self.evaluator.evaluate(struct)
if load_best_model:
end_eval_time = time.time()
self.best_valid_result['avg_tst_time'] += end_eval_time - start_eval_time
return result
|
StarcoderdataPython
|
6480659
|
<reponame>crashb/Cryptopals
# solution to http://cryptopals.com/sets/3/challenges/23
# clone a MT19937 PRNG from its output
import time
from Challenge21 import MT19937
# undoes the right-shift and XOR tempering operation on a value in MT19937.
# returns untempered value (int)
def unShiftRightXOR(value, shift):
result = 0
# part length is equal to shift length - iterate through parts of value
for i in range(0, 32, shift):
# get part of value - use a bit mask of length shift
partMask = ((2**shift - 1) << (32 - shift)) >> i
part = value & partMask
# xor the next part
value ^= part >> shift
# add part to result
result |= part
return result
# undoes the left-shift and XOR tempering operation on a value in MT19937.
# also takes a mask value to untemper the value
# returns untempered value (int)
def unShiftLeftXOR(value, shift, mask):
result = 0
# part length is equal to shift length - iterate through parts of value
for i in range(0, 32, shift):
# get part of value
partMask = (0xFFFFFFFF >> (32 - shift)) << i
part = partMask & value
# xor the next part
value ^= (part << shift) & mask
# add part to result
result |= part
return result
# fully untempers a given value from MT19937 to get the untempered matrix value
# returns untempered value (int)
def untemper(value):
result = value
result = unShiftRightXOR(result, 18)
result = unShiftLeftXOR(result, 15, 0xEFC60000)
result = unShiftLeftXOR(result, 7, 0x9D2C5680)
result = unShiftRightXOR(result, 11)
return result
# gets 624 random numbers from initialPRNG (MT19937) and untempers each of them.
# these untempered values are spliced into another PRNG - the state of the PRNG is duplicated.
# returns newPRNG (MT19937)
def cloneMT19937(initialPRNG):
newPRNG = MT19937(0)
for i in range(0, 624):
untempered = untemper(initialPRNG.extract_number())
newPRNG.mt[i] = untempered
return newPRNG
if __name__ == "__main__":
currentTime = int(time.time())
randomGen = MT19937(currentTime)
clonedGen = cloneMT19937(randomGen)
print("Cloned output: " + str(clonedGen.extract_number()))
print("Actual output: " + str(randomGen.extract_number()))
|
StarcoderdataPython
|
1984054
|
<gh_stars>10-100
import numpy as np
import torch
import pdb
import torch.nn as nn
from torch.autograd import Variable
# from pyquaternion import Quaternion
inds = np.array([0, -1, -2, -3, 1, 0, 3, -2, 2, -3, 0, 1, 3, 2, -1, 0]).reshape(4,4)
def hamilton_product(q1, q2):
q_size = q1.size()
# q1 = q1.view(-1, 4)
# q2 = q2.view(-1, 4)
q1_q2_prods = []
for i in range(4):
# Hack to make 0 as positive sign. add 0.01 to all the values..
q2_permute_0 = q2[:, np.abs(inds[i][0])]
q2_permute_0 = q2_permute_0 * np.sign(inds[i][0] + 0.01)
q2_permute_1 = q2[:, np.abs(inds[i][1])]
q2_permute_1 = q2_permute_1 * np.sign(inds[i][1] + 0.01)
q2_permute_2 = q2[:, np.abs(inds[i][2])]
q2_permute_2 = q2_permute_2 * np.sign(inds[i][2] + 0.01)
q2_permute_3 = q2[:, np.abs(inds[i][3])]
q2_permute_3 = q2_permute_3 * np.sign(inds[i][3] + 0.01)
q2_permute = torch.stack([q2_permute_0, q2_permute_1, q2_permute_2, q2_permute_3], dim=1)
q1q2_v1 = torch.sum(q1 * q2_permute, dim=1).view(-1, 1)
q1_q2_prods.append(q1q2_v1)
q_ham = torch.cat(q1_q2_prods, dim=1)
# q_ham = q_ham.view(q_size)
return q_ham
def rotate_quat(q1, q2):
# q1 is N x 4
# q2 is N x 4
return hamilton_product(q1, q2)
def quat_conjugate(quat):
# quat = quat.view(-1, 4)
q0 = quat[:, 0]
q1 = -1 * quat[:, 1]
q2 = -1 * quat[:, 2]
q3 = -1 * quat[:, 3]
q_conj = torch.stack([q0, q1, q2, q3], dim=1)
return q_conj
def get_random_quat():
# q = Quaternion.random()
q = (np.random.rand(4)*2 -1)
q = q/np.linalg.norm(q)
# q_n = np.array(q.elements, dtype=np.float32)
quat = Variable(torch.from_numpy(q).float()).view(1, -1).view(1, -1)
return quat, q
# def convert_quat_to_euler(quat):
# q0 = Quaternion(quat.cpu().numpy())
# return q0.degrees, q0.axis
def test_hamilton_product():
conjugate_quat_module = quat_conjugate
quat1, q1 = get_random_quat()
quat1_c = conjugate_quat_module(quat1)
quat1 = quat1.repeat(10, 1)
quat1_c = quat1_c.repeat(10, 1)
quat_product = hamilton_product(quat1, quat1_c)
assert np.abs(1 - torch.mean(torch.sum(quat_product.view(-1, 4), 1)).item()) < 1E-4, 'Test1 error hamilton product'
quat1, q1 = get_random_quat()
quat2, q2 = get_random_quat()
quat_product = hamilton_product(quat1, quat2).data.numpy().squeeze()
import transformations
q_product = transformations.quaternion_multiply(q1, q2)
# q_product = np.array((q1 * q2).elements, dtype=np.float32)
assert np.mean(np.abs(quat_product - q_product)) < 1E-4, 'Error in hamilton test 2'
if __name__ == "__main__":
test_hamilton_product()
|
StarcoderdataPython
|
3525758
|
<reponame>Anthlis/My_100_Days_Of_Python
def my_function():
try:
1 / 0
except ZeroDivisionError:
pass
if __name__ == "__main__":
import timeit
setup = "from __main__ import my_function"
print(timeit.timeit("my_function()", setup=setup))
|
StarcoderdataPython
|
3394685
|
<gh_stars>1-10
from tfx.orchestration import data_types
from tfx import v1 as tfx
import os
import sys
SCRIPT_DIR = os.path.dirname(
os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__)))
)
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, "..")))
from utils import config, custom_components
def create_pipeline(
num_epochs: data_types.RuntimeParameter,
learning_rate: data_types.RuntimeParameter,
use_gpu: bool,
) -> tfx.dsl.Pipeline:
"""Implements the penguin pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = tfx.components.CsvExampleGen(input_base=config.DATA_ROOT)
# Generate hyperparameters.
hyperparams_gen = custom_components.hyperparameters_gen(
num_epochs=num_epochs,
learning_rate=learning_rate
).with_id("HyperparamsGen")
# NEW: Configuration for Vertex AI Training.
# This dictionary will be passed as `CustomJobSpec`.
vertex_job_spec = {
"project": config.GCP_PROJECT,
"worker_pool_specs": [
{
"machine_spec": {
"machine_type": "n1-standard-4",
},
"replica_count": 1,
"container_spec": {
"image_uri": "gcr.io/tfx-oss-public/tfx:{}".format(tfx.__version__),
},
}
],
}
if use_gpu:
# See https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec#acceleratortype
# for available machine types.
vertex_job_spec["worker_pool_specs"][0]["machine_spec"].update(
{"accelerator_type": "NVIDIA_TESLA_K80", "accelerator_count": 1}
)
# Trains a model using Vertex AI Training.
# NEW: We need to specify a Trainer for GCP with related configs.
trainer = tfx.extensions.google_cloud_ai_platform.Trainer(
module_file=config.MODULE_FILE,
examples=example_gen.outputs["examples"],
train_args=tfx.proto.TrainArgs(num_steps=100),
eval_args=tfx.proto.EvalArgs(num_steps=5),
hyperparameters=hyperparams_gen.outputs["hyperparameters"],
custom_config={
tfx.extensions.google_cloud_ai_platform.ENABLE_UCAIP_KEY: True,
tfx.extensions.google_cloud_ai_platform.UCAIP_REGION_KEY: config.GCP_REGION,
tfx.extensions.google_cloud_ai_platform.TRAINING_ARGS_KEY: vertex_job_spec,
"use_gpu": use_gpu,
},
)
# Pushes the model to a filesystem destination.
pusher = tfx.components.Pusher(
model=trainer.outputs["model"],
push_destination=tfx.proto.PushDestination(
filesystem=tfx.proto.PushDestination.Filesystem(
base_directory=config.SERVING_MODEL_DIR
)
),
)
components = [
example_gen,
hyperparams_gen,
trainer,
pusher,
]
return tfx.dsl.Pipeline(
pipeline_name=config.PIPELINE_NAME, pipeline_root=config.PIPELINE_ROOT, components=components
)
|
StarcoderdataPython
|
4951103
|
import re
def test_all_information_on_home_page(app):
contact_from_home_page =app.contact.get_contact_list()[0]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_home_page.firstname == contact_from_edit_page.firstname
assert contact_from_home_page.lastname == contact_from_edit_page.lastname
assert contact_from_home_page.address == clear(contact_from_edit_page.address)
assert contact_from_home_page.all_emails == merge_contact_info_emails(contact_from_edit_page)
def test_compare_information_home_page_db(app,db):
contacts_from_home_page = app.contact.get_contact_list()
for contact in contacts_from_home_page:
assert contact.firstname == db.get_contact_by_id(contact.id).firstname
assert contact.lastname == db.get_contact_by_id(contact.id).lastname
assert clear(contact.address) == clear(db.get_contact_by_id(contact.id).address)
assert contact.all_emails == merge_contact_info_emails(db.get_contact_by_id(contact.id))
def clear(s):
return re.sub("[() -]","",s)
def merge_contact_info_emails(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.email,contact.email2,contact.email3]))))
|
StarcoderdataPython
|
4918955
|
_base_url = 'http://www.biomart.org/biomart/martservice'
def _attribute_xml( attribute ):
"Returns xml suitable for inclusion into query"
return '<Attribute name = "%s" />' % attribute
def _filter_xml( name, value ):
"Returns xml suitable for inclusion into query"
return '<Filter name = "%s" value = "%s"/>' % ( name, value )
def _meta_query( fields ):
import urllib
return urllib.urlopen(
_base_url,
urllib.urlencode( fields )
)
def _convert_tab_separated( handle ):
for l in handle:
l = l.strip()
if '' != l:
yield l.strip().split( '\t' )
def registry():
for f in _convert_tab_separated(
_meta_query(
{
'type' : 'registry',
}
)
): yield f
#print '\n'.join([str(x) for x in registry()])
#raise
def datasets( mart = 'ensembl' ):
for f in _convert_tab_separated(
_meta_query(
{
'type' : 'datasets',
'mart' : mart
}
)
): yield f
#print '\n'.join([str(x) for x in datasets( 'sequence' ) if -1 != x[1].find('mmus')])
#raise
def configuration( dataset = 'mmusculus_gene_ensembl' ):
for f in _convert_tab_separated(
_meta_query(
{
'type' : 'configuration',
'dataset' : dataset
}
)
): yield f
#'\n'.join([str(c) for c in configuration()])
#raise
def attributes( dataset = 'mmusculus_gene_ensembl' ):
for f in _convert_tab_separated(
_meta_query(
{
'type' : 'attributes',
'dataset' : dataset
}
)
): yield f
def filters( dataset = 'mmusculus_gene_ensembl' ):
for f in _convert_tab_separated(
_meta_query(
{
'type' : 'filters',
'dataset' : dataset
}
)
): yield f
#for f in filters():
# if f[0].find('go') != -1:
# print f
def _query( xml ):
"""Execute query and return result"""
import urllib
data = urllib.urlencode( [ ( 'query', xml ) ] )
return urllib.urlopen(
_base_url,
data
)
_query_xml_template = """
<!DOCTYPE Query>
<Query
virtualSchemaName = "default"
Header = "1"
count = "%s"
softwareVersion = "0.5" >
<Dataset name = "%s" interface = "default" >
%s
%s
</Dataset>
</Query>"""
class Query( object ):
"""A biomart query"""
def __init__(
self,
filters = [ ],
attributes = [ ],
dataset_name = 'mmusculus_gene_ensembl'
):
self.filters = filters
self.attributes = attributes
self.dataset_name = dataset_name
def _build_xml( self, count = False ):
"""Builds an xml query to send to biomart web service"""
if count: count_string = '1'
else: count_string = ''
return _query_xml_template % (
count_string,
self.dataset_name,
'\n'.join( [ _attribute_xml( a ) for a in self.attributes ] ),
'\n'.join( [ _filter_xml( n, v ) for n, v in self.filters ] )
)
def get_count( self ):
handle = _query( self._build_xml( count = True ) )
result = handle.read()
try:
return int( result )
except ValueError:
raise RuntimeError( 'Did not count from server: %s' % result )
def __call__( self ):
return _query( self._build_xml( count = False ) )
if '__main__' == __name__:
Q = Query(
filters = [
#( 'ensembl_gene_id', 'ENSMUSG00000029754' ),
#( 'ensembl_gene_id', 'ENSMUSG00000020330' ),
( 'go', 'GO:0004872' ),
( 'go', 'GO:0005540' ),
],
attributes = [
'ensembl_gene_id',
'ensembl_transcript_id',
],
)
print Q.get_count()
|
StarcoderdataPython
|
12823051
|
<gh_stars>1-10
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('accounts.views',
url(r'^login/$', 'login', name='login'),
url(r'^logout/$', 'logout', name='logout'),
url(r'^register/$', 'register', name='register'),
url(r'^list/$', 'userlist', name='userlist'),
url(r'^profile/(?P<user_id>\d+)/$', 'profile', name='profile'),
)
|
StarcoderdataPython
|
1840575
|
""" Class description goes here. """
from collections import namedtuple
import logging
from dataclay.commonruntime.Runtime import getRuntime
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = '2016 Barcelona Supercomputing Center (BSC-CNS)'
logger = logging.getLogger(__name__)
DCLAY_PROPERTY_PREFIX = "_dataclay_property_"
DCLAY_GETTER_PREFIX = "$$get"
DCLAY_SETTER_PREFIX = "$$set"
DCLAY_REPLICATED_SETTER_PREFIX = "$$rset"
PreprocessedProperty = namedtuple('PreprocessedProperty', field_names=[
'name', 'position', 'type', 'beforeUpdate', 'afterUpdate', 'inMaster'])
class DynamicProperty(property):
"""DataClay implementation of the `property` Python mechanism.
This class is similar to property but is not expected to be used with
decorators. Instead, the initialization is done from the ExecutionGateway
metaclass, containing the required information about the property
"""
__slots__ = ("p_name",)
def __init__(self, property_name):
logger.debug("Initializing DynamicProperty %s", property_name)
"""Initialize the DynamicProperty with the name of its property.
Not calling super deliberately.
The semantics and behaviour changes quite a bit from the property
built-in, here we only store internally the name of the property and
use dataClay friendly setters and getters.
"""
self.p_name = property_name
def __get__(self, obj, type_=None):
"""Getter for the dataClay property
If the object is loaded, perform the getter to the local instance (this
is the scenario for local instances and Execution Environment fully
loaded instances).
If the object is not loaded, perform a remote execution (this is the
scenario for client remote instances and also Execution Environment
non-loaded instances, which may either "not-yet-loaded" or remote)
"""
is_exec_env = getRuntime().is_exec_env()
logger.debug("Calling getter for property %s in %s", self.p_name,
"an execution environment" if is_exec_env else "the client")
if (is_exec_env and obj.is_loaded()) or (not is_exec_env and not obj.is_persistent()):
try:
obj.set_dirty(True) # set dirty = true for language types like lists, dicts, that are get and modified. TODO: improve this.
return object.__getattribute__(obj, "%s%s" % (DCLAY_PROPERTY_PREFIX, self.p_name))
except AttributeError:
logger.warning("Received AttributeError while accessing property %s on object %r",
self.p_name, obj)
logger.debug("Internal dictionary of the object: %s", obj.__dict__)
raise
else:
return getRuntime().execute_implementation_aux(DCLAY_GETTER_PREFIX + self.p_name, obj, (), obj.get_hint())
def __set__(self, obj, value):
"""Setter for the dataClay property
See the __get__ method for the basic behavioural explanation.
"""
logger.debug("Calling setter for property %s", self.p_name)
is_exec_env = getRuntime().is_exec_env()
if (is_exec_env and obj.is_loaded()) or (not is_exec_env and not obj.is_persistent()):
object.__setattr__(obj, "%s%s" % (DCLAY_PROPERTY_PREFIX, self.p_name), value)
if is_exec_env:
obj.set_dirty(True)
else:
getRuntime().execute_implementation_aux(DCLAY_SETTER_PREFIX + self.p_name, obj, (value,), obj.get_hint())
class ReplicatedDynamicProperty(DynamicProperty):
def __init__(self, property_name, before_method, after_method, in_master):
logger.debug("Initializing ReplicatedDynamicProperty %s | BEFORE = %s | AFTER = %s | INMASTER = %s", property_name, before_method, after_method, in_master)
super(ReplicatedDynamicProperty, self).__init__(property_name)
self.beforeUpdate = before_method
self.afterUpdate = after_method
self.inMaster = in_master
def __set__(self, obj, value):
"""Setter for the dataClay property
See the __get__ method for the basic behavioural explanation.
"""
logger.debug("Calling replicated setter for property %s", self.p_name)
is_client = not getRuntime().is_exec_env()
if is_client and not obj.is_persistent():
object.__setattr__(obj, "%s%s" % (DCLAY_PROPERTY_PREFIX, self.p_name), value)
elif not is_client and not obj.is_loaded():
getRuntime().execute_implementation_aux(DCLAY_SETTER_PREFIX + self.p_name, obj, (value,), obj.get_hint())
else:
if self.inMaster:
logger.debug("Calling update in master [%s] for property %s with value %s", obj.get_master_location, self.p_name, value)
getRuntime().execute_implementation_aux('__setUpdate__', obj, (obj, self.p_name, value, self.beforeUpdate, self.afterUpdate), obj.get_master_location())
else:
logger.debug("Calling update locally for property %s with value %s", self.p_name, value)
obj.__setUpdate__(obj, self.p_name, value, self.beforeUpdate, self.afterUpdate)
obj.set_dirty(True)
|
StarcoderdataPython
|
188672
|
<filename>tests/test_opensearch.py
import contextlib
import json
import os
import unittest
from collections import deque
from copy import deepcopy
from pprint import pformat
from typing import TYPE_CHECKING
from urllib.parse import parse_qsl, urlparse
import mock
import pytest
from pyramid import testing
from pyramid.testing import DummyRequest
from pywps.inout.inputs import LiteralInput
from tests.utils import setup_mongodb_processstore
from weaver.processes import opensearch
from weaver.processes.constants import OPENSEARCH_AOI, OPENSEARCH_END_DATE, OPENSEARCH_START_DATE
from weaver.processes.opensearch import _make_specific_identifier # noqa: W0212
from weaver.utils import get_any_id
from weaver.wps_restapi.processes import processes
if TYPE_CHECKING:
from typing import Dict
from weaver.typedefs import DataSourceOpenSearch
OSDD_URL = "http://geo.spacebel.be/opensearch/description.xml"
COLLECTION_IDS = {
"sentinel2": "EOP:IPT:Sentinel2",
"probav": "EOP:VITO:PROBAV_P_V001",
"deimos": "DE2_PS3_L1C",
}
def assert_json_equals(json1, json2):
def ordered_json(obj):
if isinstance(obj, dict):
return sorted((str(k), ordered_json(v)) for k, v in obj.items())
elif isinstance(obj, list):
return sorted(ordered_json(x) for x in obj)
else:
return str(obj)
json1_lines = pformat(ordered_json(json1)).split("\n")
json2_lines = pformat(ordered_json(json2)).split("\n")
for line1, line2 in zip(json1_lines, json2_lines):
assert line1 == line2
def get_test_file(*args):
return os.path.join(os.path.dirname(__file__), *args)
def load_json_test_file(filename):
return json.load(open(get_test_file("opensearch/json", filename)))
def make_request(**kw):
request = DummyRequest(**kw)
if request.registry.settings is None:
request.registry.settings = {}
request.registry.settings["weaver.url"] = "localhost"
request.registry.settings["weaver.wps_path"] = "/ows/wps"
return request
class WpsHandleEOITestCase(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def get_dummy_payload():
return {
"processDescription": {
"process": {
"identifier": "workflow_stacker_sfs_id",
"title": "Application StackCreation followed by SFS dynamically added by POST /processes",
"owsContext": {
"offering": {
"code": "http://www.opengis.net/eoc/applicationContext/cwl",
"content": {
"href": "http://some.host/applications/cwl/multisensor_ndvi.cwl"
},
}
},
}
}
}
def get_opensearch_payload():
return load_json_test_file("opensearch_deploy.json")
def test_transform_execute_parameters_wps():
def make_input(id_, value):
input_ = LiteralInput(id_, "", data_type="string")
input_.data = value
return input_
def make_deque(id_, value):
input_ = make_input(id_, value)
return id_, deque([input_])
inputs = dict(
[
make_deque(OPENSEARCH_START_DATE, "2018-01-30T00:00:00.000Z"),
make_deque(OPENSEARCH_END_DATE, "2018-01-31T23:59:59.999Z"),
make_deque(OPENSEARCH_AOI, "100.4,15.3,104.6,19.3"),
make_deque("files", COLLECTION_IDS["sentinel2"]),
make_deque("output_file_type", "GEOTIFF"),
make_deque("output_name", "stack_result.tif"),
]
)
mocked_query = ["file:///something.SAFE"]
files_inputs = [make_input("files", "opensearch" + m) for m in mocked_query]
expected = dict(
[
make_deque("output_file_type", "GEOTIFF"),
make_deque("output_name", "stack_result.tif"),
("files", deque(files_inputs)),
]
)
with mock.patch.object(opensearch.OpenSearchQuery, "query_datasets", return_value=mocked_query):
eo_image_source_info = make_eo_image_source_info("files", COLLECTION_IDS["sentinel2"])
mime_types = {"files": eo_image_source_info["files"]["mime_types"]}
transformed = opensearch.query_eo_images_from_wps_inputs(inputs, eo_image_source_info, mime_types)
def compare(items):
return sorted([(k, [v.data for v in values]) for k, values in items.items()])
assert compare(transformed) == compare(expected)
# FIXME: move appropriately when adding BoundingBox support (https://github.com/crim-ca/weaver/issues/51)
@pytest.mark.skip(reason="The user-provided bbox is now a comma delimited string, not a WKT.")
def test_load_wkt():
data = [
("POLYGON ((100 15, 104 15, 104 19, 100 19, 100 15))", "100.0,15.0,104.0,19.0"),
(
"LINESTRING (100 15, 104 15, 104 19, 100 19, 100 15)",
"100.0,15.0,104.0,19.0",
),
("LINESTRING (100 15, 104 19)", "100.0,15.0,104.0,19.0"),
("MULTIPOINT ((10 10), (40 30), (20 20), (30 10))", "10.0,10.0,40.0,30.0"),
("POINT (30 10)", "30.0,10.0,30.0,10.0"),
]
for wkt, expected in data:
assert opensearch.load_wkt(wkt) == expected
def test_deploy_opensearch():
store = setup_mongodb_processstore()
class MockDB(object):
def __init__(self, *args):
pass
def get_store(self, *_): # noqa: E811
return store
def _get_mocked(req):
return req.registry.settings
# mock db functions called by add_local_process
with contextlib.ExitStack() as stack:
stack.enter_context(mock.patch("weaver.wps_restapi.processes.processes.get_db", side_effect=MockDB))
stack.enter_context(mock.patch("weaver.wps_restapi.processes.utils.get_db", side_effect=MockDB))
stack.enter_context(mock.patch("weaver.wps_restapi.processes.utils.get_settings", side_effect=_get_mocked))
stack.enter_context(mock.patch("weaver.processes.utils.get_db", side_effect=MockDB))
stack.enter_context(mock.patch("weaver.processes.utils.get_settings", side_effect=_get_mocked))
# given
opensearch_payload = get_opensearch_payload()
initial_payload = deepcopy(opensearch_payload)
request = make_request(json=opensearch_payload, method="POST")
process_id = get_any_id(opensearch_payload["processDescription"]["process"])
# when
response = processes.add_local_process(request)
# then
assert response.code == 201
assert response.json["deploymentDone"]
process = store.fetch_by_id(process_id)
assert process
assert process.package
assert process.payload
assert_json_equals(process.payload, initial_payload)
def test_handle_eoi_unique_aoi_unique_toi(): # noqa
inputs = load_json_test_file("eoimage_inputs_example.json")
expected = load_json_test_file("eoimage_unique_aoi_unique_toi.json")
output = opensearch.EOImageDescribeProcessHandler(
inputs
).to_opensearch(unique_aoi=True, unique_toi=True)
assert_json_equals(output, expected)
def test_handle_eoi_unique_aoi_non_unique_toi():
inputs = load_json_test_file("eoimage_inputs_example.json")
expected = load_json_test_file("eoimage_unique_aoi_non_unique_toi.json")
output = opensearch.EOImageDescribeProcessHandler(
inputs
).to_opensearch(unique_aoi=True, unique_toi=False)
assert_json_equals(output, expected)
def test_handle_eoi_non_unique_aoi_unique_toi():
inputs = load_json_test_file("eoimage_inputs_example.json")
expected = load_json_test_file("eoimage_non_unique_aoi_unique_toi.json")
output = opensearch.EOImageDescribeProcessHandler(
inputs
).to_opensearch(unique_aoi=False, unique_toi=True)
assert_json_equals(output, expected)
def test_get_additional_parameters():
data = {
"additionalParameters": [
{
"role": "http://www.opengis.net/eoc/applicationContext",
"parameters": [
{"name": "UniqueAOI", "values": ["true"]},
{"name": "UniqueTOI", "values": ["true"]},
],
}
]
}
params = opensearch.get_additional_parameters(data)
assert ("UniqueAOI", ["true"]) in params
assert ("UniqueTOI", ["true"]) in params
def get_template_urls(collection_id):
settings = {
"weaver.request_options": {
"requests": [
# description schema can be *extremely* slow to respond, but it does eventually
{"url": "http://geo.spacebel.be/opensearch/description.xml", "method": "get", "timeout": 180}
]
}
}
all_fields = set()
opq = opensearch.OpenSearchQuery(collection_id, osdd_url=OSDD_URL, settings=settings)
template = opq.get_template_url()
params = parse_qsl(urlparse(template).query)
param_names = list(sorted(p[0] for p in params))
if all_fields:
all_fields = all_fields.intersection(param_names)
else:
all_fields.update(param_names)
fields_in_all_queries = list(sorted(all_fields))
expected = [
"bbox",
"endDate",
"geometry",
"httpAccept",
"lat",
"lon",
"maximumRecords",
"name",
"parentIdentifier",
"radius",
"startDate",
"startRecord",
"uid",
]
assert not set(expected) - set(fields_in_all_queries)
@pytest.mark.skip(reason="Collection 'sentinel2' dataset series cannot be found (decommission).")
@pytest.mark.slow
@pytest.mark.online
@pytest.mark.testbed14
def test_get_template_sentinel2():
get_template_urls(COLLECTION_IDS["sentinel2"])
@pytest.mark.skip(reason="Collection 'probav' dataset series cannot be found (decommission).")
@pytest.mark.online
@pytest.mark.testbed14
def test_get_template_probav():
get_template_urls(COLLECTION_IDS["probav"])
def inputs_unique_aoi_toi(files_id):
return {
OPENSEARCH_AOI: deque([LiteralInput(OPENSEARCH_AOI, "Area", data_type="string")]),
OPENSEARCH_START_DATE: deque(
[LiteralInput(OPENSEARCH_START_DATE, "Start Date", data_type="string")]
),
OPENSEARCH_END_DATE: deque([LiteralInput(OPENSEARCH_END_DATE, "End Date", data_type="string")]),
files_id: deque(
[LiteralInput(files_id, "Collection of the data.", data_type="string", max_occurs=4)]
),
}
def inputs_non_unique_aoi_toi(files_id):
end_date = _make_specific_identifier(OPENSEARCH_END_DATE, files_id)
start_date = _make_specific_identifier(OPENSEARCH_START_DATE, files_id)
aoi = _make_specific_identifier(OPENSEARCH_AOI, files_id)
return {
aoi: deque([LiteralInput(aoi, "Area", data_type="string")]),
start_date: deque([LiteralInput(start_date, "Area", data_type="string")]),
end_date: deque([LiteralInput(end_date, "Area", data_type="string")]),
files_id: deque(
[LiteralInput(files_id, "Collection of the data.", data_type="string", max_occurs=4)]
),
}
def query_param_names(unique_aoi_toi, identifier):
end_date, start_date, aoi = OPENSEARCH_END_DATE, OPENSEARCH_START_DATE, OPENSEARCH_AOI
if not unique_aoi_toi:
end_date = _make_specific_identifier(end_date, identifier)
start_date = _make_specific_identifier(start_date, identifier)
aoi = _make_specific_identifier(aoi, identifier)
return end_date, start_date, aoi
def sentinel2_inputs(unique_aoi_toi=True):
sentinel_id = "image-sentinel2"
end_date, start_date, aoi = query_param_names(unique_aoi_toi, sentinel_id)
if unique_aoi_toi:
inputs = inputs_unique_aoi_toi(sentinel_id)
else:
inputs = inputs_non_unique_aoi_toi(sentinel_id)
inputs[sentinel_id][0].data = COLLECTION_IDS["sentinel2"]
inputs[end_date][0].data = u"2018-01-31T23:59:59.999Z"
inputs[start_date][0].data = u"2018-01-30T00:00:00.000Z"
# inputs[aoi][0].data = u"POLYGON ((100 15, 104 15, 104 19, 100 19, 100 15))"
inputs[aoi][0].data = u"100.0, 15.0, 104.0, 19.0"
eo_image_source_info = make_eo_image_source_info(sentinel_id, COLLECTION_IDS["sentinel2"])
return inputs, eo_image_source_info
def probav_inputs(unique_aoi_toi=True):
probav_id = "image-probav"
end_date, start_date, aoi = query_param_names(unique_aoi_toi, probav_id)
if unique_aoi_toi:
inputs = inputs_unique_aoi_toi(probav_id)
else:
inputs = inputs_non_unique_aoi_toi(probav_id)
inputs[probav_id][0].data = COLLECTION_IDS["probav"]
inputs[end_date][0].data = u"2018-01-31T23:59:59.999Z"
inputs[start_date][0].data = u"2018-01-30T00:00:00.000Z"
# inputs[aoi][0].data = u"POLYGON ((100 15, 104 15, 104 19, 100 19, 100 15))"
inputs[aoi][0].data = u"100.0, 15.0, 104.0, 19.0"
eo_image_source_info = make_eo_image_source_info(
probav_id, COLLECTION_IDS["probav"]
)
return inputs, eo_image_source_info
def make_eo_image_source_info(name, collection_id):
# type: (str, str) -> Dict[str, DataSourceOpenSearch]
return {
name: {
"collection_id": collection_id,
"accept_schemes": ["http", "https"],
"mime_types": ["application/zip"],
"rootdir": "",
"ades": "http://localhost:5001",
"osdd_url": "http://geo.spacebel.be/opensearch/description.xml",
}
}
def deimos_inputs(unique_aoi_toi=True):
deimos_id = "image-deimos"
end_date, start_date, aoi = query_param_names(unique_aoi_toi, deimos_id)
inputs = inputs_unique_aoi_toi(deimos_id)
inputs[deimos_id][0].data = COLLECTION_IDS["deimos"]
inputs[start_date][0].data = u"2008-01-01T00:00:00Z"
inputs[end_date][0].data = u"2009-01-01T00:00:00Z"
# inputs[aoi][0].data = u"MULTIPOINT ((-117 32), (-115 34))"
inputs[aoi][0].data = u"-117, 32, -115, 34"
eo_image_source_info = make_eo_image_source_info(deimos_id, COLLECTION_IDS["deimos"])
return inputs, eo_image_source_info
@pytest.mark.xfail(reason="Record not available anymore although server still up and reachable.")
@pytest.mark.online
@pytest.mark.testbed14
def test_query_sentinel2():
inputs, eo_image_source_info = sentinel2_inputs()
mime_types = {k: eo_image_source_info[k]["mime_types"] for k in eo_image_source_info}
data = opensearch.query_eo_images_from_wps_inputs(inputs, eo_image_source_info, mime_types)
assert len(data["image-sentinel2"]) == inputs["image-sentinel2"][0].max_occurs
@pytest.mark.xfail(reason="Cannot login to protected 'probav' opensearch endpoint.")
@pytest.mark.online
@pytest.mark.testbed14
def test_query_probav():
inputs, eo_image_source_info = probav_inputs()
mime_types = {k: eo_image_source_info[k]["mime_types"] for k in eo_image_source_info}
data = opensearch.query_eo_images_from_wps_inputs(inputs, eo_image_source_info, mime_types)
assert len(data["image-probav"]) == inputs["image-probav"][0].max_occurs
@pytest.mark.skip(reason="The server is not implemented yet.")
@pytest.mark.online
@pytest.mark.testbed14
def test_query_deimos():
inputs, eo_image_source_info = deimos_inputs()
mime_types = {k: eo_image_source_info[k]["mime_types"] for k in eo_image_source_info}
data = opensearch.query_eo_images_from_wps_inputs(inputs, eo_image_source_info, mime_types)
assert len(data["image-deimos"]) == inputs["image-deimos"][0].max_occurs
@pytest.mark.xfail(reason="Cannot login to protected 'probav' opensearch endpoint.")
@pytest.mark.online
@pytest.mark.testbed14
def test_query_non_unique():
inputs_s2, eo_image_source_info_s2 = sentinel2_inputs(unique_aoi_toi=False)
inputs_probav, eo_image_source_info_probav = probav_inputs(unique_aoi_toi=False)
inputs = inputs_s2
inputs.update(inputs_probav)
eo_image_source_info = eo_image_source_info_s2
eo_image_source_info.update(eo_image_source_info_probav)
mime_types = {k: eo_image_source_info[k]["mime_types"] for k in eo_image_source_info}
data = opensearch.query_eo_images_from_wps_inputs(inputs, eo_image_source_info, mime_types)
assert len(data["image-sentinel2"]) == inputs["image-sentinel2"][0].max_occurs
assert len(data["image-probav"]) == inputs["image-probav"][0].max_occurs
|
StarcoderdataPython
|
3313291
|
<reponame>protwis/Protwis
from build.management.commands.base_build import Command as BaseBuild
from build.management.commands.build_ligand_functions import *
from django.conf import settings
from django.db.models import Prefetch
from django.utils.text import slugify
from ligand.models import Ligand, LigandType, AssayExperiment, LigandVendors, LigandVendorLink
from protein.models import Protein
import datamol as dm
import datetime
import pandas as pd
class Command(BaseBuild):
help = "Import ChEMBL data from data repository"
bulk_size = 50000
def add_arguments(self, parser):
parser.add_argument("--test_run",
action="store_true",
help="Skip this during a test run",
default=False)
parser.add_argument('-u', '--purge',
action='store_true',
dest='purge',
default=False,
help='Purge existing ligand records')
def handle(self, *args, **options):
if options["test_run"]:
print("Skipping in test run")
return
if options['purge']:
# delete any existing ChEMBL bioactivity data
# For deleting the ligands - purse all ligand data using the GtP ligand build
print("Started purging ChEMBL bioactivity data")
self.purge_chembl_data()
print("Ended purging ChEMBL bioactivity data")
# Parse ChEMBL ligand data
print("Started building ChEMBL ligands")
self.build_chembl_ligands()
print("Ended building ChEMBL ligands")
# Parse ChEMBL bioactivity data
print("\n\nStarted building ChEMBL bioactivities")
self.build_chembl_bioactivities()
print("Ended building ChEMBL bioactivities")
# Parse ChEMBL/PubChem vendor data
print("\n\nStarted building PubChem vendor data")
self.build_pubchem_vendor_links()
print("Ended building PubChem vendor data")
@staticmethod
def purge_chembl_data():
Ligand.objects.all().delete()
@staticmethod
def build_chembl_ligands():
print("\n===============\n#1 Reading ChEMBL ligand data", datetime.datetime.now())
ligand_input_file = os.sep.join([settings.DATA_DIR, "ligand_data", "assay_data", "chembl_cpds.csv.gz"])
ligand_data = pd.read_csv(ligand_input_file, keep_default_na=False)
for column in ligand_data:
ligand_data[column] = ligand_data[column].replace({"":None})
print("Found", len(ligand_data), "ligands")
# Collect ChEMBL IDs from existing ligands
print("\n#2 Collecting ChEMBL IDs from existing ligands", datetime.datetime.now())
wr_chembl = WebResource.objects.get(slug="chembl_ligand")
wr_pubchem = WebResource.objects.get(slug="pubchem")
# Removing existing ligands based on ChEMBL IDs => second filter in loop necessary because of concatenated non-parent IDs
existing_ids = list(LigandID.objects.filter(web_resource=wr_chembl).values_list("index", flat=True).distinct())
filtered_ligands = ligand_data.loc[~ligand_data["molecule_chembl_id"].isin(existing_ids)].copy()
# Set ChEMBL ID as name for ligands without pref_name
filtered_ligands.loc[filtered_ligands['pref_name'].isnull(), 'pref_name'] = filtered_ligands.loc[filtered_ligands['pref_name'].isnull(), 'molecule_chembl_id']
# Parse all new small-molecule ChEMBL ligands
print("\n#3 Building new small-molecule ChEMBL ligands", datetime.datetime.now())
sm_data = filtered_ligands.loc[filtered_ligands["molecule_type"].isin(["Small molecule", "Oligosaccharide", None])].reset_index()
lig_entries = len(sm_data)
print("Found", lig_entries, "new small molecules")
# Additional matching via PubChem and InchiKeys
existing_cids = list(LigandID.objects.filter(web_resource=wr_pubchem).values_list("index", flat=True).distinct())
existing_inchis = list(Ligand.objects.exclude(inchikey=None).values_list("inchikey", flat=True).distinct())
smallmol = LigandType.objects.get(slug="small-molecule")
ligands = []
weblinks = []
for index, row in sm_data.iterrows():
insert = True
ids = [row['molecule_chembl_id']]
# Filtering on non-parent ChEMBL IDs
if row['other_ids'] != None:
extra_ids = row['other_ids'].split(";")
existing = list(set.intersection(set(extra_ids), set(existing_ids)))
if len(existing) > 0:
# Add missing link to parent ChEMBL ID
match = LigandID.objects.get(index=existing[0], web_resource=wr_chembl)
LigandID(index=row['molecule_chembl_id'], web_resource=wr_chembl, ligand_id = match.ligand_id).save()
print("Found existing non-parent ChEMBL", existing[0], "for parent", row['molecule_chembl_id'])
insert = False # use this switch in case this is the last one in the list skipping the bulk insert
else:
ids = ids + extra_ids
# Filtering on PubChem CIDs
if row['pubchem_cid'] != None:
cids = row['pubchem_cid'].split(";")
existing = list(set.intersection(set(cids), set(existing_cids)))
if len(existing) > 0:
# Add missing link to parent ChEMBL ID
match = LigandID.objects.get(index=existing[0], web_resource=wr_pubchem)
LigandID(index=row['molecule_chembl_id'], web_resource=wr_chembl, ligand_id = match.ligand_id).save()
insert = False # use this switch in case this is the last one in the list skipping the bulk insert
# For those rare cases in which neither the ChEMBL ID nor the PubChem ID was matched, but the InchiKey was
if insert and row['standard_inchi_key'] in existing_inchis:
ligand = Ligand.objects.get(inchikey=row['standard_inchi_key'])
LigandID(index=row['molecule_chembl_id'], web_resource=wr_chembl, ligand = ligand).save()
if row['pubchem_cid'] != None:
cids = row['pubchem_cid'].split(";")
for cid in cids:
LigandID(index=cid, web_resource=wr_pubchem, ligand = ligand).save()
insert = False
if insert:
# creating ligand
ligands.append(Ligand(name = row['pref_name'], ambiguous_alias = False))
ligands[-1].ligand_type = smallmol
ligands[-1].smiles = row['smiles']
ligands[-1].inchikey = row['standard_inchi_key']
ligands[-1].sequence = row["sequence"]
try:
input_mol = dm.to_mol(row['smiles'], sanitize=True)
if input_mol:
# Check if InChIKey has been set
if ligands[-1].inchikey == None:
ligands[-1].inchikey = dm.to_inchikey(input_mol)
# Cleaned InChIKey has been set
# ligands[-1].clean_inchikey = get_cleaned_inchikey(row['smiles'])
# Calculate RDkit properties
ligands[-1].mw = dm.descriptors.mw(input_mol)
ligands[-1].rotatable_bonds = dm.descriptors.n_rotatable_bonds(input_mol)
ligands[-1].hacc = dm.descriptors.n_hba(input_mol)
ligands[-1].hdon = dm.descriptors.n_hbd(input_mol)
ligands[-1].logp = dm.descriptors.clogp(input_mol)
except:
skip = True
# Adding ligand IDs
for id in ids:
weblinks.append({"link" : LigandID(index=id, web_resource=wr_chembl), "lig_idx" : len(ligands)-1})
if row['pubchem_cid'] != None:
cids = row['pubchem_cid'].split(";")
for cid in cids:
weblinks.append({"link" : LigandID(index=cid, web_resource=wr_pubchem), "lig_idx" : len(ligands)-1})
# BULK insert every X entries or last entry
if len(ligands) == Command.bulk_size or (index == lig_entries - 1):
Ligand.objects.bulk_create(ligands)
# Ligands have been inserted => update LigandIDs for pairing
for pair in weblinks:
pair["link"].ligand = ligands[pair["lig_idx"]]
LigandID.objects.bulk_create([pair["link"] for pair in weblinks])
print("Inserted", index + 1, "out of", lig_entries, "ligands")
ligands = []
weblinks = []
# Parse all new non-small-molecule ChEMBL ligands
print("\n#4 Building new non-small-molecule ChEMBL ligands", datetime.datetime.now())
nonsm_data = filtered_ligands.loc[~filtered_ligands["molecule_type"].isin(["Small molecule", "Oligosaccharide", None])]
print("Found", len(nonsm_data), "new non-small-molecules")
ligands = []
ligand_types = {"Unknown": "na", "Protein": "protein"}
weblinks = []
for index, row in nonsm_data.iterrows():
ids = {}
ids["smiles"] = row['smiles']
ids["sequence"] = row['sequence']
ids["inchikey"] = row['standard_inchi_key']
ids["chembl_ligand"] = row['molecule_chembl_id']
# Filter types
ligand = get_or_create_ligand(row['pref_name'], ids, ligand_types[row['molecule_type']], False, True)
# Add LigandIDs
if row['other_ids'] != None:
extra_ids = row['other_ids'].split(";")
existing = list(set.intersection(set(extra_ids), set(existing_ids)))
if len(existing) > 0:
continue # skip rest of creation
else:
for id in extra_ids:
weblinks.append(LigandID(ligand = ligand, index=id, web_resource=wr_chembl))
# Bulk insert all new ligandIDs
LigandID.objects.bulk_create(weblinks)
@staticmethod
def build_chembl_bioactivities():
AssayExperiment.objects.all().delete()
print("\n===============\n#1 Reading ChEMBL bioacitivity data")
bioactivity_input_file = os.sep.join([settings.DATA_DIR, "ligand_data", "assay_data", "chembl_bioactivity_data.csv.gz"])
bioactivity_data = pd.read_csv(bioactivity_input_file, dtype=str)
bio_entries = len(bioactivity_data)
print("Found", bio_entries, "bioactivities", datetime.datetime.now())
print("\n#2 Building ChEMBL ligands cache", datetime.datetime.now())
#ids = list(bioactivity_data["parent_molecule_chembl_id"].unique()) # not filtering is way faster
ligands = list(LigandID.objects.filter(index__startswith="CHEMBL").values_list("ligand_id", "index"))
lig_dict = {entry[1]: entry[0] for entry in ligands}
print("\n#3 Building ChEMBL proteins cache", datetime.datetime.now())
# NOTE => might need to switch to Accession as the Entry name changes more frequently
# If so, keep isoform notations in mind
names = list(bioactivity_data["Entry name"].unique())
proteins = list(Protein.objects.filter(entry_name__in=names).values_list("pk", "entry_name"))
prot_dict = {prot_entry[1]: prot_entry[0] for prot_entry in proteins}
print("\n#4 Building ChEMBL bioactivity entries", datetime.datetime.now())
bioacts = []
for index, row in bioactivity_data.iterrows():
bioacts.append(AssayExperiment())
bioacts[-1].ligand_id = lig_dict[row["parent_molecule_chembl_id"]]
bioacts[-1].protein_id = prot_dict[row["Entry name"]]
bioacts[-1].assay_type = row["assay_type"]
bioacts[-1].assay_description = row["assay_description"]
bioacts[-1].pchembl_value = row["pchembl_value"]
bioacts[-1].standard_value = row["standard_value"]
bioacts[-1].standard_relation = row["standard_relation"]
bioacts[-1].standard_type = row["standard_type"]
bioacts[-1].standard_units = row["standard_units"]
bioacts[-1].document_chembl_id = row["document_chembl_id"]
# BULK insert every X entries or last entry
if (len(bioacts) == Command.bulk_size) or (index == bio_entries - 1):
AssayExperiment.objects.bulk_create(bioacts)
print("Inserted", index, "out of", bio_entries, "bioactivities")
bioacts = []
@staticmethod
def build_pubchem_vendor_links():
LigandVendors.objects.all().delete()
print("\n===============\n#1 Reading and creating Vendors")
vendor_url = os.sep.join([settings.DATA_DIR, "ligand_data", "assay_data", "pubchem_vendor_list.csv.gz"])
vendor_data = pd.read_csv(vendor_url, dtype=str)
vendors = []
for index, row in vendor_data.iterrows():
vendors.append(LigandVendors(slug=slugify(row["SourceName"]), name = row["SourceName"], url = row["SourceURL"]))
LigandVendors.objects.bulk_create(vendors)
vendor_dict = {vendor.name : vendor.pk for vendor in vendors}
print("\n#2 Building ChEMBL ligands cache", datetime.datetime.now())
ligands = list(LigandID.objects.filter(index__startswith="CHEMBL").values_list("ligand_id", "index"))
lig_dict = {entry[1]: entry[0] for entry in ligands}
print("\n#3 Creating all vendor links", datetime.datetime.now())
vendor_links_url = os.sep.join([settings.DATA_DIR, "ligand_data", "assay_data", "pubchem_vendor_links.csv.gz"])
vendor_links_data = pd.read_csv(vendor_links_url, dtype=str)
links = []
for index, row in vendor_links_data.iterrows():
if len(row["SourceRecordURL"]) < 300:
links.append(LigandVendorLink(vendor_id=vendor_dict[row["SourceName"]], ligand_id = lig_dict[row["chembl_id"]], url = row["SourceRecordURL"], external_id = row["RegistryID"]))
LigandVendorLink.objects.bulk_create(links)
|
StarcoderdataPython
|
1751336
|
import os, sys, re, types
import matplotlib.pyplot as plt
import sqlalchemy
from SphinxReport.Renderer import *
from SphinxReport.Tracker import *
# for trackers_derived_sets and trackers_master
if not os.path.exists("conf.py"):
raise IOError( "could not find conf.py" )
exec(compile(open( "conf.py" ).read(), "conf.py", 'exec'))
def iterate_with_wrap( lines ):
"""iterate over lines in a Makefile
The iterator merges multi-line lines.
Comments before a variable are collected.
"""
comments, buf = [], []
keep = False
for line in lines:
if re.search( "Section parameters: start", line ):
keep = True
comments = []
continue
if re.search( "Section parameters: end", line ):
break
if line.startswith("#"):
comments.append( re.sub( "^#+", "", line[:-1] ) )
continue
if not line.strip():
comments = []
continue
if not keep: continue
if line.endswith("\\\n"):
buf.append( line[:-2].strip() )
continue
if buf:
buf.append( line.strip() )
yield " ".join( buf ), "\n".join(comments)
buf = []
comments = []
else:
yield line, "\n".join(comments)
comments = []
class MakefileParameters( Tracker ):
"""list parameters in the Makefile of a pipeline."""
@returnLabeledData
def __call__( self, track, slice = None ):
rx = re.compile("^(\S[^=]*)\s*=\s*(.*)")
infile = open( os.path.join( data_dir, track), "r" )
data = []
for line, comments in iterate_with_wrap( infile ):
a = rx.search( line )
if not a: continue
k,v = a.groups()
if k.endswith("?"): k=k[:-1]
if not v: v = "<empty>"
if comments:
data.append( (k, v + "\n\n" + comments ) )
else:
data.append( (k, v) )
infile.close()
return data
|
StarcoderdataPython
|
6628683
|
"""
Copyright 2017 <NAME> and <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tools.captureProcessFrame import *
from tools.determineDataTrends import *
from tools.processWhitePoints import *
from tools.recordGesture import *
from tools.filterData import *
from tools.recordData import *
from configparser import ConfigParser
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
config = ConfigParser()
config.read("config.ini")
# Image resolution of captured frames
IMG_SIZE = int(config.get("image", "imgResolution"))
# Size of the surrounding region utilized
# when appying a Gaussian blur on frames
BLUR_REGION = int(config.get("image", "blurRegion"))
# Cutoff for gray intensity value of pixels when thresholding frames
PIXEL_INTENSITY_THRESHOLD = int(config.get("image", "intensityThreshold"))
# Number of elements to analyze when calculating
# trends in x-axis and y-axis movement
DATA_WINDOW_SIZE = int(config.get("data", "windowSize"))
# Cutoff values for data points when being filtered
LOWER_OUTLIER_CUTOFF = int(config.get("data", "lowerCutoff"))
UPPER_OUTLIER_CUTOFF = int(config.get("data", "upperCutoff"))
# Cutoff values for calculated trends to compare with when detecting gestures
X_DATA_THRESHOLD = float(config.get("data", "xThreshold"))
Y_DATA_THRESHOLD = int(0.25 * IMG_SIZE)
# Zoom scale factor value to pass through the pipe for zoomDisplay.py
ZOOM_FACTOR = float(config.get("zoom", "scaleFactor"))
# Value at which the gesture detection will
# terminate and record all data in files
FRAME_COUNT_LIMIT = int(config.get("misc", "frameLimit"))
# Boolean value to determine whether or not to show debug statements
DEBUG = config.getboolean("misc", "debug")
# Initialize data lists
xData = []
xDataSample = []
xDataFiltered = []
yData = []
yDataSample = []
yDataFiltered = []
# Define camera settings and specify variable to store frame
camera = PiCamera()
camera.resolution = (IMG_SIZE, IMG_SIZE)
rgbFrame = PiRGBArray(camera, size=camera.resolution)
time.sleep(0.1)
frameCount = 0
frame1 = captureProcessFrame(camera, rgbFrame, BLUR_REGION)
while frameCount <= FRAME_COUNT_LIMIT:
# Increment the frame count each iteration
frameCount += 1
frame2 = captureProcessFrame(camera, rgbFrame, BLUR_REGION)
# Create an image based on the differences between
# the two frames and then enhance the result
diffImg = cv2.absdiff(frame1, frame2)
threshImg = cv2.threshold(diffImg, PIXEL_INTENSITY_THRESHOLD,
255, cv2.THRESH_BINARY)[1]
# Assign frame 1 to frame 2 for the next iteration of comparison
frame1 = frame2
whitePixelsData = processWhitePoints(threshImg)
xData.append(whitePixelsData[0])
yData.append(whitePixelsData[1])
# Analyze for trends when a full window of data points has been gathered
if len(xData) % DATA_WINDOW_SIZE == 0:
filteredDataWindows = filterData(DATA_WINDOW_SIZE, xData, yData,
LOWER_OUTLIER_CUTOFF,
UPPER_OUTLIER_CUTOFF)
# If no data points survived the filtering,
# continue to the next iteration
if filteredDataWindows is None:
continue
xWindowFiltered = filteredDataWindows[0]
yWindowFiltered = filteredDataWindows[1]
# Save all filtered data so they can be logged later
xDataFiltered += xWindowFiltered
yDataFiltered += yWindowFiltered
gestureDetected = determineDataTrends(xWindowFiltered, yWindowFiltered,
X_DATA_THRESHOLD,
Y_DATA_THRESHOLD)
if gestureDetected is not None:
recordGesture(gestureDetected, ZOOM_FACTOR)
if DEBUG:
print("[INFO] Gesture detected: " + gestureDetected)
recordData(xData, xDataFiltered, yData, yDataFiltered)
if DEBUG:
print("[INFO] Data recorded!")
|
StarcoderdataPython
|
3391114
|
"""
https://www.practicepython.org
MichelePratusevich
Exercise 35: Birthday Months
2 chilis
This exercise is Part 3 of 4 of the birthday data exercise series. The other exercises are:
Part 1, Part 2, and Part 4.
In the previous exercise we saved information about famous scientists’ names and birthdays
to disk. In this exercise, load that JSON file from disk, extract the months of all the
birthdays, and count how many scientists have a birthday in each month.
Your program should output something like:
{
"May": 3,
"November": 2,
"December": 1
}
"""
import json
import sys
from collections import Counter
def get_birthdays(file):
with open(file) as birthday_list_file:
return(json.load(birthday_list_file))
if __name__ == '__main__':
if len(sys.argv) == 2:
file = str(sys.argv[1])
else:
file = 'birthdays.json'
birthdays = get_birthdays(file)
print(Counter([item.split()[0] for item in birthdays.values()]))
# the above works, but output clunky because monthly data should be in calendar order.
# plus, don't have a month for Fibonacci, don't want to include his data
months = Counter([item.split()[0] for item in birthdays.values()])
for k, v in months.items():
if k == "January":
print(k, v)
for k, v in months.items():
if k == "February":
print(k, v)
for k, v in months.items():
if k == "March":
print(k, v)
for k, v in months.items():
if k == "April":
print(k, v)
for k, v in months.items():
if k == "May":
print(k, v)
for k, v in months.items():
if k == "June":
print(k, v)
for k, v in months.items():
if k == "July":
print(k, v)
for k, v in months.items():
if k == "August":
print(k, v)
for k, v in months.items():
if k == "September":
print(k, v)
for k, v in months.items():
if k == "October":
print(k, v)
for k, v in months.items():
if k == "November":
print(k, v)
for k, v in months.items():
if k == "December":
print(k, v)
|
StarcoderdataPython
|
1925787
|
<gh_stars>1-10
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E1101
import logging
import string
import threading
import colorama
from wlauto.core.bootstrap import settings
import wlauto.core.signal as signal
COLOR_MAP = {
logging.DEBUG: colorama.Fore.BLUE,
logging.INFO: colorama.Fore.GREEN,
logging.WARNING: colorama.Fore.YELLOW,
logging.ERROR: colorama.Fore.RED,
logging.CRITICAL: colorama.Style.BRIGHT + colorama.Fore.RED,
}
RESET_COLOR = colorama.Style.RESET_ALL
def init_logging(verbosity):
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
error_handler = ErrorSignalHandler(logging.DEBUG)
root_logger.addHandler(error_handler)
console_handler = logging.StreamHandler()
if verbosity == 1:
console_handler.setLevel(logging.DEBUG)
if 'colour_enabled' in settings.logging and not settings.logging['colour_enabled']:
console_handler.setFormatter(LineFormatter(settings.logging['verbose_format']))
else:
console_handler.setFormatter(ColorFormatter(settings.logging['verbose_format']))
else:
console_handler.setLevel(logging.INFO)
if 'colour_enabled' in settings.logging and not settings.logging['colour_enabled']:
console_handler.setFormatter(LineFormatter(settings.logging['regular_format']))
else:
console_handler.setFormatter(ColorFormatter(settings.logging['regular_format']))
root_logger.addHandler(console_handler)
logging.basicConfig(level=logging.DEBUG)
def add_log_file(filepath, level=logging.DEBUG):
root_logger = logging.getLogger()
file_handler = logging.FileHandler(filepath)
file_handler.setLevel(level)
file_handler.setFormatter(LineFormatter(settings.logging['file_format']))
root_logger.addHandler(file_handler)
class ErrorSignalHandler(logging.Handler):
"""
Emits signals for ERROR and WARNING level traces.
"""
def emit(self, record):
if record.levelno == logging.ERROR:
signal.send(signal.ERROR_LOGGED, self)
elif record.levelno == logging.WARNING:
signal.send(signal.WARNING_LOGGED, self)
class ColorFormatter(logging.Formatter):
"""
Formats logging records with color and prepends record info
to each line of the message.
BLUE for DEBUG logging level
GREEN for INFO logging level
YELLOW for WARNING logging level
RED for ERROR logging level
BOLD RED for CRITICAL logging level
"""
def __init__(self, fmt=None, datefmt=None):
super(ColorFormatter, self).__init__(fmt, datefmt)
template_text = self._fmt.replace('%(message)s', RESET_COLOR + '%(message)s${color}')
template_text = '${color}' + template_text + RESET_COLOR
self.fmt_template = string.Template(template_text)
def format(self, record):
self._set_color(COLOR_MAP[record.levelno])
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
d = record.__dict__
parts = []
for line in record.message.split('\n'):
d.update({'message': line.strip('\r')})
parts.append(self._fmt % d)
return '\n'.join(parts)
def _set_color(self, color):
self._fmt = self.fmt_template.substitute(color=color)
class LineFormatter(logging.Formatter):
"""
Logs each line of the message separately.
"""
def __init__(self, fmt=None, datefmt=None):
super(LineFormatter, self).__init__(fmt, datefmt)
def format(self, record):
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
d = record.__dict__
parts = []
for line in record.message.split('\n'):
d.update({'message': line.strip('\r')})
parts.append(self._fmt % d)
return '\n'.join(parts)
class BaseLogWriter(object):
def __init__(self, name, level=logging.DEBUG):
"""
File-like object class designed to be used for logging from streams
Each complete line (terminated by new line character) gets logged
at DEBUG level. In complete lines are buffered until the next new line.
:param name: The name of the logger that will be used.
"""
self.logger = logging.getLogger(name)
self.buffer = ''
if level == logging.DEBUG:
self.do_write = self.logger.debug
elif level == logging.INFO:
self.do_write = self.logger.info
elif level == logging.WARNING:
self.do_write = self.logger.warning
elif level == logging.ERROR:
self.do_write = self.logger.error
else:
raise Exception('Unknown logging level: {}'.format(level))
def flush(self):
# Defined to match the interface expected by pexpect.
return self
def close(self):
if self.buffer:
self.logger.debug(self.buffer)
self.buffer = ''
return self
def __del__(self):
# Ensure we don't lose bufferd output
self.close()
class LogWriter(BaseLogWriter):
def write(self, data):
data = data.replace('\r\n', '\n').replace('\r', '\n')
if '\n' in data:
parts = data.split('\n')
parts[0] = self.buffer + parts[0]
for part in parts[:-1]:
self.do_write(part)
self.buffer = parts[-1]
else:
self.buffer += data
return self
class LineLogWriter(BaseLogWriter):
def write(self, data):
self.do_write(data)
class StreamLogger(threading.Thread):
"""
Logs output from a stream in a thread.
"""
def __init__(self, name, stream, level=logging.DEBUG, klass=LogWriter):
super(StreamLogger, self).__init__()
self.writer = klass(name, level)
self.stream = stream
self.daemon = True
def run(self):
line = self.stream.readline()
while line:
self.writer.write(line.rstrip('\n'))
line = self.stream.readline()
self.writer.close()
|
StarcoderdataPython
|
5116836
|
# built-in
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
# app
from ..actions import get_lib_path, get_python, get_venv
from ..config import builders
from ..converters import CONVERTERS
from ..models import Requirement
from .base import BaseCommand
class ProjectRegisterCommand(BaseCommand):
"""Register a project in the system or in a venv.
"""
@staticmethod
def build_parser(parser) -> ArgumentParser:
builders.build_config(parser)
builders.build_from(parser)
builders.build_output(parser)
builders.build_venv(parser)
builders.build_other(parser)
parser.add_argument('name', nargs=REMAINDER, help='paths to install')
return parser
def __call__(self) -> bool:
if 'from' not in self.config:
self.logger.error('`--from` is required for this command')
return False
# no paths passed, register the current project globally
if not self.args.name:
project_path = Path(self.config['project'])
self.logger.info('registering the project globally...', extra=dict(
project_path=str(project_path),
))
return self._register(project_path=project_path)
# paths passed and venv for the current project exists,
# register passed paths in the current venv.
venv = get_venv(config=self.config)
if venv.exists():
self.logger.info('registering projects in the venv...', extra=dict(
venv_path=str(venv.path),
))
for path in self.args.name:
path = Path(path)
if not path.exists():
self.logger.error('project not found', extra=dict(path=str(path)))
return False
ok = self._register(project_path=path, lib_path=venv.lib_path)
if not ok:
return False
return True
# paths passed and no venv for the current project exists,
# register passed paths globally.
for path in self.args.name:
path = Path(path)
if not path.exists():
self.logger.error('project not found', extra=dict(path=str(path)))
return False
self.logger.info('registering the project globally...', extra=dict(
project_path=str(path),
))
ok = self._register(project_path=path)
if not ok:
return False
return True
def _register(self, project_path: Path, lib_path: Path = None) -> bool:
self._make_egg_info(
project_path=project_path,
from_format=self.config['from']['format'],
from_path=self.config['from']['path'],
)
python = get_python(self.config)
self.logger.debug('python found', extra=dict(python=str(python.path)))
# get site-packages dir path
if lib_path is None:
lib_path = get_lib_path(python_path=python.path)
if lib_path is None:
self.logger.error('cannot find site-packages path', extra=dict(
python=str(python.path),
))
return False
self.logger.debug('lib found', extra=dict(python=str(lib_path)))
# make egg-link
self.logger.info('creating egg-link...')
ok = self._upd_egg_link(lib_path=lib_path, project_path=project_path)
if not ok:
return False
# update pth
self._upd_pth(lib_path=lib_path, project_path=project_path)
self.logger.info('registered!', extra=dict(python=str(python.path.name)))
return True
def _make_egg_info(self, project_path: Path, from_format: str, from_path: str) -> None:
loader = CONVERTERS[from_format]
loader = loader.copy(project_path=project_path)
resolver = loader.load_resolver(path=from_path)
if loader.lock:
self.logger.warning('do not build project from lockfile!')
# We don't attach deps here.
# Use `deps convert` before to merge deps if you need it.
# Please, open an issue if it is a case for you.
# create egg-info
reqs = Requirement.from_graph(resolver.graph, lock=False)
self.logger.info('creating egg-info...')
dumper = CONVERTERS['egginfo']
dumper = dumper.copy(project_path=project_path)
dumper.dump(
path=project_path,
reqs=reqs,
project=resolver.graph.metainfo,
)
def _upd_egg_link(self, lib_path: Path, project_path: Path) -> bool:
# find the correct one egg-info
info_path = (project_path / project_path.name).with_suffix('.egg-info')
if not info_path.exists():
paths = list(project_path.glob('*.egg-info'))
if len(paths) != 1:
self.logger.warning('cannot find egg-info')
return False
info_path = paths[0]
info_path = info_path.resolve()
self.logger.debug('egg-info found', extra=dict(path=str(info_path)))
# create egg-link
link_path = (lib_path / info_path.name).with_suffix('.egg-link')
link_path.write_text(str(info_path) + '\n.')
self.logger.debug('egg-link created', extra=dict(path=str(link_path)))
return True
def _upd_pth(self, lib_path: Path, project_path: Path) -> None:
# read existing content
pth_path = lib_path / 'dephell.pth'
content = ''
if pth_path.exists():
content = pth_path.read_text()
# check if already added
paths = content.splitlines()
if str(project_path) in paths:
self.logger.debug('already registered in pth', extra=dict(path=str(pth_path)))
return
# add
project_path = project_path.resolve()
content = content.rstrip() + '\n' + str(project_path) + '\n'
pth_path.write_text(content.lstrip())
self.logger.debug('pth updated', extra=dict(path=str(pth_path)))
|
StarcoderdataPython
|
4898333
|
<gh_stars>0
"""
** Filtering Info **
To filter scenes by tags one should specify a filter function
Scene tags dict has following structure:
{
'day_time': one of {'kNight', 'kMorning', 'kAfternoon', 'kEvening'}
'season': one of {'kWinter', 'kSpring', 'kSummer', 'kAutumn'}
'track': one of {
'Moscow' , 'Skolkovo', 'Innopolis', 'AnnArbor', 'Modiin', 'TelAviv'}
'sun_phase': one of {'kAstronomicalNight', 'kTwilight', 'kDaylight'}
'precipitation': one of {'kNoPrecipitation', 'kRain', 'kSleet', 'kSnow'}
}
Full description of protobuf message is available at
tags.proto file in sources
** Split Configuration **
Training Data ('train')
'moscow__train': Moscow intersected with NO precipitation
Development Data ('development')
'moscow__development': Moscow intersected with NO precipitation
'ood__development': Skolkovo, Modiin, and Innopolis intersected with
(No precipitation, Rain and Snow)
Test Data ('test')
'moscow__test': Moscow intersected with NO precipitation
'ood__test': Ann-Arbor + Tel Aviv intersected with
(No precipitation, rain, snow and sleet)
"""
def filter_moscow_no_precipitation_data(scene_tags_dict):
"""
This will need to be further divided into train/validation/test splits.
"""
if (scene_tags_dict['track'] == 'Moscow' and
scene_tags_dict['precipitation'] == 'kNoPrecipitation'):
return True
else:
return False
def filter_ood_development_data(scene_tags_dict):
if (scene_tags_dict['track'] in ['Skolkovo', 'Modiin', 'Innopolis'] and
scene_tags_dict[
'precipitation'] in ['kNoPrecipitation', 'kRain', 'kSnow']):
return True
else:
return False
DATASETS_TO_FILTERS = {
'train': {
'moscow__train': filter_moscow_no_precipitation_data
},
'development': {
'moscow__development': filter_moscow_no_precipitation_data,
'ood__development': filter_ood_development_data
}
}
|
StarcoderdataPython
|
3506826
|
"""Helpers to process schemas."""
import itertools
import typing
from ... import types
from . import iterate
class TArtifacts(typing.NamedTuple):
"""The return value of _calculate_schema."""
schema_name: str
property_name: str
property_schema: typing.Any
TArtifactsIter = typing.Iterator[TArtifacts]
TArtifactsGroupedIter = typing.Iterator[typing.Tuple[str, TArtifactsIter]]
TSchemaIter = typing.Iterable[typing.Tuple[str, types.Schema]]
TGetSchemaArtifacts = typing.Callable[
[types.Schemas, str, types.Schema], TArtifactsIter
]
def get_artifacts(
*, schemas: types.Schemas, get_schema_artifacts: TGetSchemaArtifacts
) -> TArtifactsIter:
"""
Get all artifacts information from the schemas.
Assume schemas is valid.
Takes all schemas, retrieves all constructable schemas, for each schema retrieves
all artifacts and returns an iterable with all the captured back references.
Args:
schemas: The schemas to process.
get_schema_artifacts: Function that retrieves the artifacts for the schema.
Returns:
All backreference information.
"""
# Retrieve all constructable schemas
constructables = iterate.constructable(schemas=schemas)
# Retrieve all artifacts
artifacts_iters = map(
lambda args: get_schema_artifacts(schemas, *args), constructables
)
# Unpack nested iterators
return itertools.chain(*artifacts_iters)
TOutput = typing.TypeVar("TOutput")
TOutputIter = typing.Iterator[typing.Tuple[str, TOutput]]
TCalculateOutput = typing.Callable[[TArtifactsIter], TOutput]
def calculate_outputs(
*, artifacts: TArtifactsIter, calculate_output: TCalculateOutput
) -> TOutputIter:
"""
Convert artifacts iterator to an output iterator.
Algorithm:
1. sort and group by schema name and
2. call calculate_output on the grouped artifacts.
Args:
artifacts: The artifacts to convert.
calculate_output: Calculate the output from artifacts for a schema.
Returns:
An iterator with the converted output.
"""
# Sort and group
sorted_artifacts = sorted(artifacts, key=lambda backref: backref.schema_name)
grouped_artifacts = itertools.groupby(
sorted_artifacts, lambda backref: backref.schema_name
)
# Map to output
return map(lambda args: (args[0], calculate_output(args[1])), grouped_artifacts)
|
StarcoderdataPython
|
3414490
|
<gh_stars>10-100
#!/usr/bin/env python3
import os
from osgeo import gdal
import sys
sys.path.append('/foss_fim/src')
from utils.shared_variables import PREP_PROJECTION_CM
import shutil
from multiprocessing import Pool
import argparse
def reproject_dem(args):
raster_dir = args[0]
elev_cm = args[1]
elev_cm_proj = args[2]
reprojection = args[3]
if os.path.exists(elev_cm_proj):
os.remove(elev_cm_proj)
shutil.copy(elev_cm, elev_cm_proj)
print(f"Reprojecting {elev_cm_proj}")
gdal.Warp(elev_cm_proj,elev_cm_proj,dstSRS=reprojection)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Burn in NLD elevations')
parser.add_argument('-dem_dir','--dem-dir', help='DEM filename', required=True,type=str)
parser.add_argument('-j','--number-of-jobs',help='Number of processes to use. Default is 1.',required=False, default="1",type=int)
args = vars(parser.parse_args())
dem_dir = args['dem_dir']
number_of_jobs = args['number_of_jobs']
reproject_procs_list = []
for huc in os.listdir(dem_dir):
raster_dir = os.path.join(dem_dir,huc)
elev_cm = os.path.join(raster_dir, 'elev_cm.tif')
elev_cm_proj = os.path.join(raster_dir, 'elev_cm_proj.tif')
reproject_procs_list.append([raster_dir, elev_cm, elev_cm_proj, PREP_PROJECTION_CM])
# Multiprocess reprojection
with Pool(processes=number_of_jobs) as pool:
pool.map(reproject_dem, reproject_procs_list)
|
StarcoderdataPython
|
3557548
|
<reponame>bohdandrahan/Genome-diversity-model-Seed-Root
class World():
def __init__(self, abs_max = 1000):
self.abs_max = abs_max
|
StarcoderdataPython
|
11233775
|
<reponame>or-tal-robotics/mcl_pi<filename>particle_filter/laser_scan_get_map.py<gh_stars>1-10
#!/usr/bin/env python
import rospy
from sensor_msgs.msg import LaserScan
from nav_msgs.srv import GetMap
import numpy as np
from matplotlib import pyplot as plt
class MapClientLaserScanSubscriber(object):
def __init__(self):
rospy.Subscriber('/scan',LaserScan,self.get_scan)
static_map = rospy.ServiceProxy('static_map',GetMap)
self.z = rospy.wait_for_message("/scan",LaserScan)
#print self.z
self.map = static_map()
map_info = self.map.map.info
map_width = np.array(map_info.width) # map width
map_heghit = np.array(map_info.height) # map heghit
self.occupancy_grid = np.transpose(np.array(self.map.map.data).reshape(map_width, map_heghit)) # map
def get_scan(self, msg): # callback function for LaserScan topic
self.z = msg
def scan2cart(self, robot_origin = [0,0,0]):
map_info = self.map.map.info
map_width = np.array(map_info.width) # map width
map_heghit = np.array(map_info.height) # map heghit
# the robot orientation in relation to the world [m,m,rad]
mu_x = robot_origin[0]
mu_y = robot_origin[1]
theta = map_info.origin.orientation.z + robot_origin[2]
####################################################### need to add the robot localisation!!!!
# converting the laserscan measurements from polar coordinate to cartesian and create matrix from them.
n = len(self.z.ranges); i = np.arange(len(self.z.ranges))
angle = np.zeros(n); x = np.zeros(n); y = np.zeros(n)
angle = np.add(self.z.angle_min, np.multiply(i, self.z.angle_increment)) + theta
x = np.multiply(self.z.ranges, np.cos(angle)) + mu_x
y = np.multiply(self.z.ranges, np.sin(angle)) + mu_y
x[~np.isfinite(x)] = -1
y[~np.isfinite(y)] = -1
x = x / map_info.resolution
#x_r = np.abs(x_r)
#x = x.astype (int)
x[x > map_width] = -1
x[x < -map_width] = -1
y = y / map_info.resolution
#y_r = np.abs(y_r)
#y = y.astype (int)
y[y < -map_heghit] = -1
y[y > map_heghit] = -1
Y = np.stack((x,y))
return Y
def obs(self):
return np.argwhere(self.occupancy_grid == 100)*self.map.map.info.resolution + self.map.map.info.resolution*0.5 + self.map.map.info.origin.position.x
def loction_based(self, y = [0,0,0]):
Y = np.array([y[0] + self.scan2cart(y)[0,:],y[1] + self.scan2cart(y)[1,:]])
map_info = self.map.map.info
map_width = np.array(map_info.width) # map width
map_heghit = np.array(map_info.height) # map heghit
# the round values of x,y coordinate from the laserscan are the indexes of the matrix which contains 0 by default and 100 for detecting an object
matrix = np.zeros (shape = (map_width,map_heghit))
for ii in range (len(self.z.ranges)):
a_1 = int(Y[0,ii])
a_2 = int(Y[1,ii])
if a_1 != -1 and a_2 != -1:
matrix[int(Y[0,ii]),int(Y[1,ii])] = 100
return matrix
if __name__ == "__main__":
rospy.init_node('laser_scan', anonymous = True)
StaticMap = rospy.ServiceProxy('static_map',GetMap)
scan = MapClientLaserScanSubscriber()
r = rospy.Rate(1)
OC = scan.occupancy_grid
plt.ion()
fig = plt.figure()
while not rospy.is_shutdown():
M = scan.loction_based()
r.sleep()
plt.imshow(-M+OC)
fig.canvas.draw()
print('Drawing!')
rospy.spin()
pass
|
StarcoderdataPython
|
11387406
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import csv
import logging
import os
import shutil
from csv import DictWriter
from typing import (
Any, Dict, FrozenSet,
)
from pyhocon import ConfigFactory, ConfigTree
from databuilder.job.base_job import Job
from databuilder.loader.base_loader import Loader
from databuilder.models.graph_serializable import GraphSerializable
from databuilder.serializers import nebula_serializer
from databuilder.utils.closer import Closer
LOGGER = logging.getLogger(__name__)
class FsNebulaCSVLoader(Loader):
"""
Write vertex and edge CSV file(s) consumed by NebulaCsvPublisher.
It assumes that the record it consumes is instance of GraphSerializable
"""
# Config keys
VERTEX_DIR_PATH = 'vertex_dir_path'
EDGE_DIR_PATH = 'edge_dir_path'
FORCE_CREATE_DIR = 'force_create_directory'
SHOULD_DELETE_CREATED_DIR = 'delete_created_directories'
_DEFAULT_CONFIG = ConfigFactory.from_dict({
SHOULD_DELETE_CREATED_DIR: True,
FORCE_CREATE_DIR: False
})
def __init__(self) -> None:
self._vertex_file_mapping: Dict[Any, DictWriter] = {}
self._relation_file_mapping: Dict[Any, DictWriter] = {}
self._keys: Dict[FrozenSet[str], int] = {}
self._closer = Closer()
def init(self, conf: ConfigTree) -> None:
"""
Initializing FsNebulaCsvLoader by creating directory for vertex files
and edge files. Note that the directory defined in
configuration should not exist.
:param conf:
:return:
"""
conf = conf.with_fallback(FsNebulaCSVLoader._DEFAULT_CONFIG)
self._vertex_dir = conf.get_string(FsNebulaCSVLoader.VERTEX_DIR_PATH)
self._relation_dir = \
conf.get_string(FsNebulaCSVLoader.EDGE_DIR_PATH)
self._delete_created_dir = \
conf.get_bool(FsNebulaCSVLoader.SHOULD_DELETE_CREATED_DIR)
self._force_create_dir = conf.get_bool(
FsNebulaCSVLoader.FORCE_CREATE_DIR)
self._create_directory(self._vertex_dir)
self._create_directory(self._relation_dir)
def _create_directory(self, path: str) -> None:
"""
Validate directory does not exist, creates it, register deletion of
created directory function to Job.closer.
:param path:
:return:
"""
if os.path.exists(path):
if self._force_create_dir:
LOGGER.info('Directory exist. Deleting directory %s', path)
shutil.rmtree(path)
else:
raise RuntimeError(f'Directory should not exist: {path}')
os.makedirs(path)
def _delete_dir() -> None:
if not self._delete_created_dir:
LOGGER.warning('Skip Deleting directory %s', path)
return
LOGGER.info('Deleting directory %s', path)
shutil.rmtree(path)
# Directory should be deleted after publish is finished
Job.closer.register(_delete_dir)
def load(self, csv_serializable: GraphSerializable) -> None:
"""
Writes NebulaCsvSerializable into CSV files.
There are multiple CSV files that this method writes.
This is because there're not only vertex and edge, but also it
can also have different vertices, and edges.
Common pattern for both vertices and relations:
1. retrieve csv row (a dict where keys represent a header,
values represent a row)
2. using this dict to get a appropriate csv writer and write to it.
3. repeat 1 and 2
:param csv_serializable:
:return:
"""
vertex = csv_serializable.next_node()
while vertex:
vertex_dict = nebula_serializer.serialize_node(vertex)
key = (vertex.label, self._make_key(vertex_dict))
file_suffix = '{}_{}'.format(*key)
vertex_writer = self._get_writer(
vertex_dict,
self._vertex_file_mapping,
key,
self._vertex_dir,
file_suffix)
vertex_writer.writerow(vertex_dict)
vertex = csv_serializable.next_node()
relation = csv_serializable.next_relation()
while relation:
relation_dict = nebula_serializer.serialize_relationship(relation)
key2 = (relation.start_label,
relation.end_label,
relation.type,
self._make_key(relation_dict))
file_suffix = f'{key2[0]}_{key2[1]}_{key2[2]}'
relation_writer = self._get_writer(relation_dict,
self._relation_file_mapping,
key2,
self._relation_dir,
file_suffix)
relation_writer.writerow(relation_dict)
relation = csv_serializable.next_relation()
def _get_writer(self,
csv_record_dict: Dict[str, Any],
file_mapping: Dict[Any, DictWriter],
key: Any,
dir_path: str,
file_suffix: str
) -> DictWriter:
"""
Finds a writer based on csv record, key.
If writer does not exist, it's creates a csv writer and update the
mapping.
:param csv_record_dict:
:param file_mapping:
:param key:
:param file_suffix:
:return:
"""
writer = file_mapping.get(key)
if writer:
return writer
LOGGER.info('Creating file for %s', key)
file_out = open(f'{dir_path}/{file_suffix}.csv', 'w', encoding='utf8')
writer = csv.DictWriter(file_out, fieldnames=csv_record_dict.keys(),
quoting=csv.QUOTE_NONNUMERIC)
def file_out_close() -> None:
LOGGER.info('Closing file IO %s', file_out)
file_out.close()
self._closer.register(file_out_close)
writer.writeheader()
file_mapping[key] = writer
return writer
def close(self) -> None:
"""
Any closeable callable registered in _closer, it will close.
:return:
"""
self._closer.close()
def get_scope(self) -> str:
return "loader.filesystem_csv_nebula"
def _make_key(self, record_dict: Dict[str, Any]) -> int:
"""
Each unique set of record keys is assigned an increasing numeric key
"""
return self._keys.setdefault(
frozenset(record_dict.keys()), len(self._keys))
|
StarcoderdataPython
|
4892671
|
<filename>eth2/beacon/state_machines/forks/serenity/configs.py<gh_stars>0
from eth.constants import (
ZERO_ADDRESS,
)
from eth2.configs import Eth2Config
from eth2.beacon.constants import (
GWEI_PER_ETH,
)
from eth2.beacon.helpers import slot_to_epoch
from eth2.beacon.typing import (
Gwei,
Second,
Shard,
Slot,
)
GENESIS_SLOT = Slot(2**32)
SLOTS_PER_EPOCH = 2**6
SERENITY_CONFIG = Eth2Config(
# Misc
SHARD_COUNT=2**10, # (= 1,024) shards
TARGET_COMMITTEE_SIZE=2**7, # (= 128) validators
MAX_BALANCE_CHURN_QUOTIENT=2**5, # (= 32)
MAX_INDICES_PER_SLASHABLE_VOTE=2**12, # (= 4,096) votes
MAX_EXIT_DEQUEUES_PER_EPOCH=2**2, # (= 4)
SHUFFLE_ROUND_COUNT=90,
# Deposit contract
DEPOSIT_CONTRACT_ADDRESS=ZERO_ADDRESS, # TBD
DEPOSIT_CONTRACT_TREE_DEPTH=2**5, # (= 32)
# Gwei values
MIN_DEPOSIT_AMOUNT=Gwei(2**0 * GWEI_PER_ETH), # (= 1,000,000,000) Gwei
MAX_DEPOSIT_AMOUNT=Gwei(2**5 * GWEI_PER_ETH), # (= 32,000,000,00) Gwei
FORK_CHOICE_BALANCE_INCREMENT=Gwei(2**0 * GWEI_PER_ETH), # (= 1,000,000,000) Gwei
EJECTION_BALANCE=Gwei(2**4 * GWEI_PER_ETH), # (= 16,000,000,000) Gwei
# Initial values
GENESIS_FORK_VERSION=0,
GENESIS_SLOT=GENESIS_SLOT,
GENESIS_EPOCH=slot_to_epoch(GENESIS_SLOT, SLOTS_PER_EPOCH),
GENESIS_START_SHARD=Shard(0),
BLS_WITHDRAWAL_PREFIX_BYTE=b'\x00',
# Time parameters
SECONDS_PER_SLOT=Second(6), # seconds
MIN_ATTESTATION_INCLUSION_DELAY=2**2, # (= 4) slots
SLOTS_PER_EPOCH=SLOTS_PER_EPOCH, # (= 64) slots
MIN_SEED_LOOKAHEAD=2**0, # (= 1) epochs
ACTIVATION_EXIT_DELAY=2**2, # (= 4) epochs
EPOCHS_PER_ETH1_VOTING_PERIOD=2**4, # (= 16) epochs
MIN_VALIDATOR_WITHDRAWABILITY_DELAY=2**8, # (= 256) epochs
PERSISTENT_COMMITTEE_PERIOD=2**11, # (= 2,048) epochs
# State list lengths
SLOTS_PER_HISTORICAL_ROOT=2**13, # (= 8,192) slots
LATEST_ACTIVE_INDEX_ROOTS_LENGTH=2**13, # (= 8,192) epochs
LATEST_RANDAO_MIXES_LENGTH=2**13, # (= 8,192) epochs
LATEST_SLASHED_EXIT_LENGTH=2**13, # (= 8,192) epochs
# Reward and penalty quotients
BASE_REWARD_QUOTIENT=2**10, # (= 1,024)
WHISTLEBLOWER_REWARD_QUOTIENT=2**9, # (= 512)
ATTESTATION_INCLUSION_REWARD_QUOTIENT=2**3, # (= 8)
INACTIVITY_PENALTY_QUOTIENT=2**24, # (= 16,777,216)
MIN_PENALTY_QUOTIENT=2**5,
# Max operations per block
MAX_PROPOSER_SLASHINGS=2**4, # (= 16)
MAX_ATTESTER_SLASHINGS=2**0, # (= 1)
MAX_ATTESTATIONS=2**7, # (= 128)
MAX_DEPOSITS=2**4, # (= 16)
MAX_VOLUNTARY_EXITS=2**4, # (= 16)
MAX_TRANSFERS=2**4, # (= 16)
)
|
StarcoderdataPython
|
358189
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from library.python import resource
import pytest
import ssl
# import urllib2
class TestRequest(object):
@pytest.fixture
def ctx(self):
r = resource.find("/builtin/cacert")
# ssl.create_default_context expects unicode string for pem-coded certificates
r = r.decode('ascii', errors='ignore')
return ssl.create_default_context(cadata=r)
def test_certs(self, ctx):
assert any(
any(item[0] == ("commonName", "YandexInternalRootCA") for item in cert["subject"])
for cert in ctx.get_ca_certs()
)
assert any(
any(item[0] == ("commonName", "Certum Trusted Network CA") for item in cert["subject"])
for cert in ctx.get_ca_certs()
)
# def test_internal(self, ctx):
# connection = urllib2.urlopen("https://nanny.yandex-team.ru/", context=ctx)
# assert connection.read()
# def test_external(self, ctx):
# connection = urllib2.urlopen("https://docs.python.org/", context=ctx)
# assert connection.read()
|
StarcoderdataPython
|
62503
|
from dataclasses import dataclass
from typing import Dict
from typing import Optional
@dataclass(frozen=True)
class CurrentDestinationStatus:
number_of_pending_messages: Optional[int]
number_of_consumers: int
messages_enqueued: int
messages_dequeued: int
@dataclass(frozen=True)
class ConsumerStatus:
address_to_destination_details: Optional[str]
destination_name: str
session_id: Optional[int]
enqueues: Optional[int]
dequeues: Optional[int]
dispatched: Optional[int]
dispatched_queue: Optional[int]
prefetch: int
max_pending: Optional[int]
exclusive: bool
retroactive: Optional[bool]
@dataclass(frozen=True)
class MessageStatus:
message_id: Optional[str]
details: Dict
persistent: Optional[bool]
correlation_id: str
properties: Optional[Dict]
@dataclass(frozen=True)
class SubscriberSetup:
address_to_subscriber_details: str
subscriber_id: str
destination: str
pending_queue_size: int
dispatched_queue_size: int
dispatched_counter: int
enqueue_counter: int
dequeue_counter: int
|
StarcoderdataPython
|
123234
|
from azureml.core.model import Model
from azuremite.workspace import get_workspace
def model_register():
ws = get_workspace()
model = Model.register(workspace=ws, model_path="../artifacts/worst.pickle", model_name="worst-model")
return model
def get_model_path():
ws = get_workspace()
model_path = Model.get_model_path('worst-model', _workspace=ws)
return model_path
if __name__ == '__main__':
model_path = get_model_path()
print(model_path)
print("ok")
|
StarcoderdataPython
|
11287797
|
<reponame>ksbhatkana/ksbhat-for-python
import re
strr="Hi, This is Kumara subrahmanya bhat alias ksbhat"
f1=open("File1.txt")
strre=str(f1.read())
st=re.compile(r'[+91]{3}-[0-9]{10}')
mates=st.finditer(strre)
ls=[i for i in mates]
print(ls[:])
|
StarcoderdataPython
|
3397001
|
<filename>hknweb/tests/views/test_users.py
from django.conf import settings
from django.test import TestCase
from django.urls import reverse
from hknweb.events.tests.models.utils import ModelFactory
class UsersViewsTests(TestCase):
def setUp(self):
password = "<PASSWORD>"
user = ModelFactory.create_user(
username="test_user",
email="<EMAIL>",
)
user.set_password(password)
user.save()
self.user = user
self.password = password
def test_account_settings_get_returns_200(self):
self.client.login(username=self.user.username, password=self.password)
response = self.client.get(reverse("account-settings"))
self.assertEqual(response.status_code, 200)
def test_account_settings_verify_form_invalid_returns_302(self):
self.client.login(username=self.user.username, password=<PASSWORD>)
data = {}
response = self.client.post(reverse("account-settings"), data=data)
self.assertEqual(response.status_code, 302)
def test_account_settings_incorrect_current_password_returns_302(self):
self.client.login(username=self.user.username, password=<PASSWORD>)
data = {
"password": "<PASSWORD>",
}
response = self.client.post(reverse("account-settings"), data=data)
self.assertEqual(response.status_code, 302)
def test_account_settings_invalid_password_form_returns_302(self):
self.client.login(username=self.user.username, password=self.password)
data = {
"password": <PASSWORD>,
"change_password": True,
}
response = self.client.post(reverse("account-settings"), data=data)
self.assertEqual(response.status_code, 302)
def test_account_settings_valid_password_form_returns_302(self):
self.client.login(username=self.user.username, password=self.password)
data = {
"password": <PASSWORD>,
"change_password": True,
"new_password1": "<PASSWORD>",
"new_password2": "<PASSWORD>",
}
response = self.client.post(reverse("account-settings"), data=data)
self.assertEqual(response.status_code, 302)
def test_account_settings_invalid_profile_form_returns_302(self):
self.client.login(username=self.user.username, password=<PASSWORD>)
data = {
"password": <PASSWORD>,
"edit_profile": True,
"phone_number": "incorrect_phone_number",
}
response = self.client.post(reverse("account-settings"), data=data)
self.assertEqual(response.status_code, 302)
def test_account_settings_valid_profile_form_returns_302(self):
self.client.login(username=self.user.username, password=<PASSWORD>)
data = {
"password": <PASSWORD>,
"edit_profile": True,
"phone_number": "1234567890",
}
response = self.client.post(reverse("account-settings"), data=data)
self.assertEqual(response.status_code, 302)
def test_account_settings_missing_action_returns_302(self):
self.client.login(username=self.user.username, password=self.password)
data = {
"password": <PASSWORD>,
"incorrect_action_name": True,
}
response = self.client.post(reverse("account-settings"), data=data)
self.assertEqual(response.status_code, 302)
def test_account_create_get_returns_200(self):
response = self.client.get(reverse("account-create"))
self.assertEqual(response.status_code, 200)
def test_account_create_returns_302(self):
settings.DEBUG = True
data = {
"first_name": "test_first_name",
"last_name": "test_last_name",
"username": "test_username",
"email": "<EMAIL>",
"password1": "<PASSWORD>",
"password2": "<PASSWORD>",
}
response = self.client.post(reverse("account-create"), data=data)
self.assertEqual(response.status_code, 302)
settings.DEBUG = False
|
StarcoderdataPython
|
181007
|
#!/usr/bin/env python3
import random
import libyiban
import libyiban_ex
NEWS_K = 3
NEWS_CATEGORIES = [
libyiban_ex.XinHuaNews.CATEGORY.TECH,
libyiban_ex.XinHuaNews.CATEGORY.POLITICS,
libyiban_ex.XinHuaNews.CATEGORY.ENG_SCITECH_INTERNET
]
def latest_news(count_per_category):
'''
Prettify latest news into raw HTML, for YiBanGroup.new_article
'''
for category in NEWS_CATEGORIES:
for title, abstract, url in libyiban_ex.XinHuaNews.get(category, count_per_category):
yield (title, '<p>%s<a href="%s">阅读全文>></a></p>' % (abstract, url))
def main():
'''
Routine:
0. Get random news
1. Operate w/ a user-provided account (using `with`)
2. Show a list of all groups, let user to choose one
3. Show EGPA
4. Post the news & like
5. Post some replies regarding solitaire (reason: nostalgia)
'''
news = list(latest_news(NEWS_K))
with libyiban.YiBanAccount() as account:
groups = list(account.get_groups())
print('[I] Available groups:')
for i, group in enumerate(groups):
print('\t#%d - "%s"' % (i, group[0]))
print()
group_idxs = map(int, input('[?] Input groups\' indexes: ').split())
for i in group_idxs:
print('[I] Now processing #%d - "%s"' % (i, groups[i][0]))
group = groups[i][1]
print('\tEGPA = %.2f' % group.get_egpa())
for news_title, news_content in random.choices(news, k = NEWS_K):
_, article = group.new_article(news_title, news_content)
article.like()
solitaire = libyiban_ex.IdiomSolitaire(news_title)
for _ in range(NEWS_K):
article.new_reply(solitaire.get(), anonymous = True)
return 0
if __name__ == '__main__':
try:
exit(main())
except KeyboardInterrupt:
exit(130)
|
StarcoderdataPython
|
3202374
|
import os
import platform
import tempfile
import time
from pathlib import Path
from test.conftest import TEST_REF, conan_create_and_upload
from typing import List
from conan_app_launcher.core.conan import (ConanApi, ConanCleanup,
_create_key_value_pair_list)
from conan_app_launcher.core.conan_worker import (ConanWorker,
ConanWorkerElement)
from conans import __version__
from conans.model.ref import ConanFileReference
def test_conan_profile_name_alias_builder():
""" Test, that the build_conan_profile_name_alias returns human readable strings. """
# check empty - should return a default name
profile_name = ConanApi.build_conan_profile_name_alias({})
assert profile_name == "No Settings"
# check a partial
settings = {'os': 'Windows', 'arch': 'x86_64'}
profile_name = ConanApi.build_conan_profile_name_alias(settings)
assert profile_name == "Windows_x64"
# check windows
settings = {'os': 'Windows', 'os_build': 'Windows', 'arch': 'x86_64', 'arch_build': 'x86_64',
'compiler': 'Visual Studio', 'compiler.version': '16', 'compiler.toolset': 'v142', 'build_type': 'Release'}
profile_name = ConanApi.build_conan_profile_name_alias(settings)
assert profile_name == "Windows_x64_vs16_v142_release"
# check linux
settings = {'os': 'Linux', 'arch': 'x86_64', 'compiler': 'gcc',
'compiler.version': '7.4', 'build_type': 'Debug'}
profile_name = ConanApi.build_conan_profile_name_alias(settings)
assert profile_name == "Linux_x64_gcc7.4_debug"
def test_conan_short_path_root():
""" Test, that short path root can be read. """
new_short_home = Path(tempfile.gettempdir()) / "._myconan_short"
os.environ["CONAN_USER_HOME_SHORT"] = str(new_short_home)
conan = ConanApi()
if platform.system() == "Windows":
assert conan.get_short_path_root() == new_short_home
else:
assert not conan.get_short_path_root().exists()
os.environ.pop("CONAN_USER_HOME_SHORT")
def test_empty_cleanup_cache(base_fixture):
"""
Test, if a clean cache returns no dirs. Actual functionality is tested with gui.
It is assumed, that the cash is clean, like it would be on the CI.
"""
os.environ["CONAN_USER_HOME"] = str(Path(tempfile.gettempdir()) / "._myconan_home")
os.environ["CONAN_USER_HOME_SHORT"] = str(Path(tempfile.gettempdir()) / "._myconan_short")
paths = ConanCleanup(ConanApi()).get_cleanup_cache_paths()
assert not paths
os.environ.pop("CONAN_USER_HOME")
os.environ.pop("CONAN_USER_HOME_SHORT")
def test_conan_find_remote_pkg(base_fixture):
"""
Test, if search_package_in_remotes finds a package for the current system and the specified options.
The function must find exactly one pacakge, which uses the spec. options and corresponds to the
default settings.
"""
os.system(f"conan remove {TEST_REF} -f")
conan = ConanApi()
default_settings = dict(conan.client_cache.default_profile.settings)
pkgs = conan.get_matching_package_in_remotes(ConanFileReference.loads(TEST_REF), {"shared": "True"})
assert len(pkgs) > 0
pkg = pkgs[0]
assert {"shared": "True"}.items() <= pkg["options"].items()
for setting in default_settings:
if setting in pkg["settings"].keys():
assert default_settings[setting] in pkg["settings"][setting]
def test_conan_not_find_remote_pkg_wrong_opts(base_fixture):
"""
Test, if a wrong Option return causes an error.
Empty list must be returned and the error be logged.
"""
os.system(f"conan remove {TEST_REF} -f")
conan = ConanApi()
pkg = conan.get_matching_package_in_remotes(ConanFileReference.loads(TEST_REF), {"BogusOption": "True"})
assert not pkg
def test_conan_find_local_pkg(base_fixture):
"""
Test, if get_package installs the package and returns the path and check it again.
The bin dir in the package must exist (indicating it was correctly downloaded)
"""
os.system(f"conan install {TEST_REF} -u")
conan = ConanApi()
pkgs = conan.find_best_matching_packages(ConanFileReference.loads(TEST_REF))
assert len(pkgs) == 1
def test_get_path_or_install(base_fixture):
"""
Test, if get_package installs the package and returns the path and check it again.
The bin dir in the package must exist (indicating it was correctly downloaded)
"""
dir_to_check = "bin"
os.system(f"conan remove {TEST_REF} -f")
conan = ConanApi()
# Gets package path / installs the package
id, package_folder = conan.get_path_or_auto_install(ConanFileReference.loads(TEST_REF))
assert (package_folder / dir_to_check).is_dir()
# check again for already installed package
id, package_folder = conan.get_path_or_auto_install(ConanFileReference.loads(TEST_REF))
assert (package_folder / dir_to_check).is_dir()
def test_get_path_or_install_manual_options(capsys):
"""
Test, if a package with options can install.
The actual installaton must not return an error and non given options be merged with default options.
"""
# This package has an option "shared" and is fairly small.
os.system(f"conan remove {TEST_REF} -f")
conan = ConanApi()
id, package_folder = conan.get_path_or_auto_install(ConanFileReference.loads(TEST_REF), {"shared": "True"})
if platform.system() == "Windows":
assert (package_folder / "bin" / "python.exe").is_file()
elif platform.system() == "Linux":
assert (package_folder / "bin" / "python").is_file()
def test_install_with_any_settings(mocker, capfd):
"""
Test, if a package with <setting>=Any flags can install
The actual installaton must not return an error.
"""
# mock the remote response
os.system(f"conan remove {TEST_REF} -f")
# Create the "any" package
conan = ConanApi()
assert conan.install_package(
ConanFileReference.loads(TEST_REF),
{'id': '325c44fdb228c32b3de52146f3e3ff8d94dddb60', 'options': {}, 'settings': {
'arch_build': 'any', 'os_build': 'Linux', "build_type": "ANY"}, 'requires': [], 'outdated': False},)
captured = capfd.readouterr()
assert "ERROR" not in captured.err
assert "Cannot install package" not in captured.err
def test_compiler_no_settings(base_fixture, capfd):
"""
Test, if a package with no settings at all can install
The actual installaton must not return an error.
"""
conanfile = str(base_fixture.testdata_path / "conan" / "conanfile_no_settings.py")
ref = "example/1.0.0@local/no_sets"
conan_create_and_upload(conanfile, ref)
os.system(f"conan remove {ref} -f")
conan = ConanApi()
id, package_folder = conan.get_path_or_auto_install(ConanFileReference.loads(ref))
assert (package_folder / "bin").is_dir()
captured = capfd.readouterr()
assert "ERROR" not in captured.err
assert "Can't find a matching package" not in captured.err
os.system(f"conan remove {ref} -f")
def test_resolve_default_options(base_fixture):
"""
Test, if different kind of types of default options can be converted to a dict
Dict is expected.
"""
conan = ConanApi()
str_val = "option=value"
ret = conan._resolve_default_options(str_val)
assert ret.items()
tup_val = ("option=value", "options2=value2")
ret = conan._resolve_default_options(tup_val)
assert ret.items()
list_val = ["option=value", "options2=value2"]
ret = conan._resolve_default_options(list_val)
assert ret.items()
def test_create_key_value_list(base_fixture):
"""
Test, that key value pairs can be extracted as strings. No arrays or other tpyes supported.
The return value must be a list of strings in the format ["key1=value1", "key2=value2]
"Any" values are ignored. (case insensitive)
"""
inp = {"Key1": "Value1"}
res = _create_key_value_pair_list(inp)
assert res == ["Key1=Value1"]
inp = {"Key1": "Value1", "Key2": "Value2"}
res = _create_key_value_pair_list(inp)
assert res == ["Key1=Value1", "Key2=Value2"]
inp = {"Key1": "Value1", "Key2": "Any"}
res = _create_key_value_pair_list(inp)
assert res == ["Key1=Value1"]
def test_search_for_all_packages(base_fixture):
""" Test, that an existing ref will be found in the remotes. """
conan = ConanApi()
res = conan.search_recipe_alternatives_in_remotes(ConanFileReference.loads(TEST_REF))
ref = ConanFileReference.loads(TEST_REF) # need to convert @_/_
assert str(ref) in str(res)
def test_conan_worker(base_fixture, mocker):
"""
Test, if conan worker works on the queue.
It is expected,that the queue size decreases over time.
"""
conan_refs: List[ConanWorkerElement] = [{"ref_pkg_id": "m4/1.4.19@_/_", "options": {},
"settings": {}, "update": False, "auto_install": True},
{"ref_pkg_id": "zlib/1.2.11@conan/stable", "options": {"shared": "True"},
"settings": {}, "update": False, "auto_install": True}
]
mock_func = mocker.patch('conan_app_launcher.core.ConanApi.get_path_or_auto_install')
import conan_app_launcher.app as app
conan_worker = ConanWorker(ConanApi(), app.active_settings)
conan_worker.update_all_info(conan_refs, None)
time.sleep(3)
conan_worker.finish_working()
mock_func.assert_called()
assert conan_worker._conan_install_queue.qsize() == 0
|
StarcoderdataPython
|
3227115
|
<filename>lanzou/gui/dialogs/setting.py
import os
from PyQt5.QtCore import Qt, pyqtSignal
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import (QDialog, QLabel, QDialogButtonBox, QLineEdit, QCheckBox,
QHBoxLayout, QVBoxLayout, QFormLayout, QFileDialog)
from lanzou.gui.qss import dialog_qss_style
from lanzou.gui.others import MyLineEdit, AutoResizingTextEdit
from lanzou.debug import SRC_DIR
class SettingDialog(QDialog):
saved = pyqtSignal()
def __init__(self, parent=None):
super(SettingDialog, self).__init__(parent)
self._config = object
self.download_threads = 3
self.max_size = 100
self.timeout = 5
self.dl_path = None
self.time_fmt = False
self.to_tray = False
self.watch_clipboard = False
self.debug = False
self.set_pwd = False
self.set_desc = False
self.upload_delay = 0
self.allow_big_file = False
self.upgrade = True
self.pwd = ""
self.desc = ""
self.initUI()
self.setStyleSheet(dialog_qss_style)
def open_dialog(self, config):
""""打开前先更新一下显示界面"""
self._config = config
if self._config.name:
self.setWindowTitle(f"设置 <{self._config.name}>")
else:
self.setWindowTitle("设置")
self.cwd = self._config.path
self.set_values()
self.exec()
def show_values(self):
"""控件显示值"""
self.download_threads_var.setText(str(self.download_threads))
self.max_size_var.setText(str(self.max_size))
self.timeout_var.setText(str(self.timeout))
self.dl_path_var.setText(str(self.dl_path))
self.time_fmt_box.setChecked(self.time_fmt)
self.to_tray_box.setChecked(self.to_tray)
self.watch_clipboard_box.setChecked(self.watch_clipboard)
self.debug_box.setChecked(self.debug)
self.set_pwd_box.setChecked(self.set_pwd)
self.set_pwd_var.setEnabled(self.set_pwd)
self.set_pwd_var.setText(self.pwd)
self.set_desc_box.setChecked(self.set_desc)
self.set_desc_var.setEnabled(self.set_desc)
self.set_desc_var.setText(self.desc)
self.upload_delay_var.setText(str(self.upload_delay))
self.big_file_box.setChecked(self.allow_big_file)
self.big_file_box.setText(f"允许上传超过 {self.max_size}MB 的大文件")
self.big_file_box.setDisabled(True) # 关闭允许上传大文件设置入口
self.upgrade_box.setChecked(self.upgrade)
def set_values(self, reset=False):
"""设置控件对应变量初始值"""
settings = self._config.default_settings if reset else self._config.settings
self.download_threads = settings["download_threads"]
self.max_size = settings["max_size"]
self.timeout = settings["timeout"]
self.dl_path = settings["dl_path"]
self.time_fmt = settings["time_fmt"]
self.to_tray = settings["to_tray"]
self.watch_clipboard = settings["watch_clipboard"]
self.debug = settings["debug"]
self.set_pwd = settings["set_pwd"]
self.pwd = settings["<PASSWORD>"]
self.set_desc = settings["set_desc"]
self.desc = settings["desc"]
self.upload_delay = settings["upload_delay"]
if 'upgrade' in settings:
self.upgrade = settings["upgrade"]
self.show_values()
def get_values(self) -> dict:
"""读取输入控件的值"""
if self.download_threads_var.text():
self.download_threads = int(self.download_threads_var.text())
if self.max_size_var.text():
self.max_size = int(self.max_size_var.text())
if self.timeout_var.text():
self.timeout = int(self.timeout_var.text())
if self.upload_delay_var.text():
self.upload_delay = int(self.upload_delay_var.text())
self.dl_path = str(self.dl_path_var.text())
self.pwd = str(self.set_pwd_var.toPlainText())
self.desc = str(self.set_desc_var.toPlainText())
return {"download_threads": self.download_threads,
"max_size": self.max_size,
"timeout": self.timeout,
"dl_path": self.dl_path,
"time_fmt": self.time_fmt,
"to_tray": self.to_tray,
"watch_clipboard": self.watch_clipboard,
"debug": self.debug,
"set_pwd": self.set_pwd,
"pwd": self.<PASSWORD>,
"set_desc": self.set_desc,
"desc": self.desc,
"upload_delay": self.upload_delay,
"allow_big_file": self.allow_big_file,
"upgrade": self.upgrade}
def initUI(self):
self.setWindowTitle("设置")
logo = QLabel()
logo.setPixmap(QPixmap(SRC_DIR + "logo2.gif"))
logo.setStyleSheet("background-color:rgb(255,255,255);")
logo.setAlignment(Qt.AlignCenter)
self.download_threads_lb = QLabel("同时下载文件数")
self.download_threads_var = QLineEdit()
self.download_threads_var.setPlaceholderText("范围:1-9")
self.download_threads_var.setToolTip("范围:1-9")
self.download_threads_var.setInputMask("D")
self.max_size_lb = QLabel("分卷大小(MB)")
self.max_size_var = QLineEdit()
self.max_size_var.setPlaceholderText("普通用户最大100,vip用户根据具体情况设置")
self.max_size_var.setToolTip("普通用户最大100,vip用户根据具体情况设置")
self.max_size_var.setInputMask("D99")
self.timeout_lb = QLabel("请求超时(秒)")
self.timeout_var = QLineEdit()
self.timeout_var.setPlaceholderText("范围:1-99")
self.timeout_var.setToolTip("范围:1-99")
self.timeout_var.setInputMask("D9")
self.upload_delay_lb = QLabel("上传延时(秒)")
self.upload_delay_var = QLineEdit()
self.upload_delay_var.setPlaceholderText("范围:1-99")
self.upload_delay_var.setToolTip("范围:1-99")
self.upload_delay_var.setInputMask("D9")
self.dl_path_lb = QLabel("下载保存路径")
self.dl_path_var = MyLineEdit(self)
self.dl_path_var.clicked.connect(self.set_download_path)
self.time_fmt_box = QCheckBox("使用[年-月-日]时间格式")
self.time_fmt_box.setToolTip("文件上传日期显示格式")
self.to_tray_box = QCheckBox("关闭到系统托盘")
self.to_tray_box.setToolTip("点击关闭软件按钮是最小化软件至系统托盘")
self.watch_clipboard_box = QCheckBox("监听系统剪切板")
self.watch_clipboard_box.setToolTip("检测到系统剪切板中有符合规范的蓝奏链接时自动唤起软件,并提取")
self.debug_box = QCheckBox("开启调试日志")
self.debug_box.setToolTip("记录软件 debug 信息至 debug-lanzou-gui.log 文件")
self.set_pwd_box = QCheckBox("上传文件自动设置密码")
self.set_pwd_var = AutoResizingTextEdit()
self.set_pwd_var.setPlaceholderText(" 2-8 位数字或字母")
self.set_pwd_var.setToolTip("2-8 位数字或字母")
self.set_desc_box = QCheckBox("上传文件自动设置描述")
self.set_desc_var = AutoResizingTextEdit()
self.big_file_box = QCheckBox(f"允许上传超过 {self.max_size}MB 的大文件")
self.big_file_box.setToolTip("开启大文件上传支持 (功能下线)")
self.upgrade_box = QCheckBox("自动检测新版本")
self.upgrade_box.setToolTip("在软件打开时自动检测是否有新的版本发布,如有则弹出更新信息")
self.time_fmt_box.toggle()
self.time_fmt_box.stateChanged.connect(self.change_time_fmt)
self.to_tray_box.stateChanged.connect(self.change_to_tray)
self.watch_clipboard_box.stateChanged.connect(self.change_watch_clipboard)
self.debug_box.stateChanged.connect(self.change_debug)
self.set_pwd_box.stateChanged.connect(self.change_set_pwd)
self.set_pwd_var.editingFinished.connect(self.check_pwd)
self.set_desc_box.stateChanged.connect(self.change_set_desc)
self.big_file_box.stateChanged.connect(self.change_big_file)
self.upgrade_box.stateChanged.connect(self.change_upgrade)
buttonBox = QDialogButtonBox()
buttonBox.setOrientation(Qt.Horizontal)
buttonBox.setStandardButtons(QDialogButtonBox.Reset | QDialogButtonBox.Save | QDialogButtonBox.Cancel)
buttonBox.button(QDialogButtonBox.Reset).setText("重置")
buttonBox.button(QDialogButtonBox.Save).setText("保存")
buttonBox.button(QDialogButtonBox.Cancel).setText("取消")
buttonBox.button(QDialogButtonBox.Reset).clicked.connect(lambda: self.set_values(reset=True))
buttonBox.button(QDialogButtonBox.Save).clicked.connect(self.slot_save)
buttonBox.rejected.connect(self.reject)
form = QFormLayout()
form.setLabelAlignment(Qt.AlignRight)
form.setSpacing(10)
form.addRow(self.download_threads_lb, self.download_threads_var)
form.addRow(self.timeout_lb, self.timeout_var)
form.addRow(self.upload_delay_lb, self.upload_delay_var)
form.addRow(self.max_size_lb, self.max_size_var)
form.addRow(self.dl_path_lb, self.dl_path_var)
vbox = QVBoxLayout()
vbox.addWidget(logo)
vbox.addStretch(1)
vbox.addLayout(form)
vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addWidget(self.time_fmt_box)
hbox.addWidget(self.to_tray_box)
hbox.addWidget(self.watch_clipboard_box)
hbox.addWidget(self.debug_box)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox_2 = QHBoxLayout()
hbox_2.addWidget(self.set_pwd_box)
hbox_2.addWidget(self.set_pwd_var)
vbox.addLayout(hbox_2)
vbox.addStretch(1)
hbox_3 = QHBoxLayout()
hbox_3.addWidget(self.set_desc_box)
hbox_3.addWidget(self.set_desc_var)
vbox.addLayout(hbox_3)
hbox_4 = QHBoxLayout()
hbox_4.addWidget(self.big_file_box)
hbox_4.addWidget(self.upgrade_box)
vbox.addStretch(1)
vbox.addLayout(hbox_4)
vbox.addStretch(2)
vbox.addWidget(buttonBox)
self.setLayout(vbox)
self.setMinimumWidth(500)
def change_time_fmt(self, state):
if state == Qt.Checked:
self.time_fmt = True
else:
self.time_fmt = False
def change_to_tray(self, state):
if state == Qt.Checked:
self.to_tray = True
else:
self.to_tray = False
def change_watch_clipboard(self, state):
if state == Qt.Checked:
self.watch_clipboard = True
else:
self.watch_clipboard = False
def change_debug(self, state):
if state == Qt.Checked:
self.debug = True
else:
self.debug = False
def change_big_file(self, state):
if state == Qt.Checked:
self.allow_big_file = True
else:
self.allow_big_file = False
def change_upgrade(self, state):
if state == Qt.Checked:
self.upgrade = True
else:
self.upgrade = False
def change_set_pwd(self, state):
if state == Qt.Checked:
self.set_pwd = True
self.set_pwd_var.setDisabled(False)
else:
self.set_pwd = False
self.set_pwd_var.setDisabled(True)
def change_set_desc(self, state):
if state == Qt.Checked:
self.set_desc = True
self.set_desc_var.setDisabled(False)
else:
self.set_desc = False
self.set_desc_var.setDisabled(True)
def check_pwd(self):
pwd = self.set_pwd_var.toPlainText()
pwd = ''.join(list(filter(str.isalnum, pwd)))
if len(pwd) < 2:
pwd = ""
self.set_pwd_var.setText(pwd[:8])
def set_download_path(self):
"""设置下载路径"""
dl_path = QFileDialog.getExistingDirectory(self, "选择文件下载保存文件夹", self.cwd)
dl_path = os.path.normpath(dl_path) # windows backslash
if dl_path == self.dl_path or dl_path == ".":
return None
self.dl_path_var.setText(dl_path)
self.dl_path = dl_path
def slot_save(self):
"""保存槽函数"""
self._config.settings = self.get_values()
self.saved.emit()
self.close()
|
StarcoderdataPython
|
12843181
|
<reponame>MuhammedAshraf2020/ImageColorization
from processing import *
from decodingModel import *
# Using Transfer learning
feature_extract_model = VggModel()
#Decoding model
colorize = model()
#prepare data in hard disk
PrepareData(datapath = "/content/data" , save_file = "/content/processed/" ,
target_size = (224 , 224) , batch_size = 32 , feature_extract_model = feature_extract_model)
training_dir = "/content/processed"
num_train_samples = 1000
batch_size = 32
steps_per_epoch = np.floor(num_train_samples/batch_size)
epochs = 200
for i in range(epochs):
generator = data_generator_baseline(training_dir, num_train_samples, batch_size)
fit_history = colorize.fit_generator(generator, epochs=1, steps_per_epoch=steps_per_epoch, verbose=1)
if i % 10 == 0:
colorize.save('model_merge_' + str(i) + '.h5')
X = test_images(path = "/content/oldes" , shape = (224 , 224) , batch_size = 2 ,
feature_extract_model = feature_extract_model , model = colorize )
show_images(X , width = 20 , hight = 20 , columns = 2 , rows = 1)
|
StarcoderdataPython
|
5136623
|
<filename>src/meadowrun/azure_integration/mgmt_functions/clean_up/__init__.py
"""
This code cannot reference anything outside of mgmt_functions (as that's what gets
uploaded to the Azure function). We use relative imports which will work both in the
"regular" environment as well as in the Azure function
"""
import asyncio
import datetime
import logging
import os
from typing import List
from azure.containerregistry import ArtifactTagProperties
from azure.containerregistry.aio import ContainerRegistryClient
from azure.core.credentials import AzureNamedKeyCredential
from azure.data.tables.aio import TableClient
from azure.mgmt.storage.aio import StorageManagementClient
from ..azure_instance_alloc_stub import (
CONTAINER_IMAGE,
GRID_TASK_QUEUE,
LAST_USED_TABLE_NAME,
MEADOWRUN_RESOURCE_GROUP_NAME,
MEADOWRUN_STORAGE_ACCOUNT_KEY_VARIABLE,
MEADOWRUN_STORAGE_ACCOUNT_VARIABLE,
MEADOWRUN_SUBSCRIPTION_ID,
QUEUE_NAME_TIMESTAMP_FORMAT,
_MEADOWRUN_GENERATED_DOCKER_REPO,
_REQUEST_QUEUE_NAME_PREFIX,
_RESULT_QUEUE_NAME_PREFIX,
get_credential_aio,
meadowrun_registry_name,
)
# a queue that has been inactive for this time will get cleaned up
_QUEUE_INACTIVE_TIME = datetime.timedelta(hours=4)
# a container image has not been used for this time will get cleaned up
_CONTAINER_IMAGE_UNUSED_TIME = datetime.timedelta(days=4)
async def delete_old_task_queues() -> List[str]:
"""See _deregister_and_terminate_vms for why we return log statements"""
logs = []
storage_account_name = os.environ[MEADOWRUN_STORAGE_ACCOUNT_VARIABLE]
storage_account_key = os.environ[MEADOWRUN_STORAGE_ACCOUNT_KEY_VARIABLE]
async with TableClient(
f"https://{storage_account_name}.table.core.windows.net/",
LAST_USED_TABLE_NAME,
credential=AzureNamedKeyCredential(storage_account_name, storage_account_key),
) as table_client, StorageManagementClient(
get_credential_aio(), os.environ[MEADOWRUN_SUBSCRIPTION_ID]
) as queue_client:
# the last used records are keyed off of the job_id for the queue, whereas
# the queue names are in the form of {prefix}-{job_id}-{created_timestamp}
now = datetime.datetime.utcnow()
now_with_timezone = datetime.datetime.now(datetime.timezone.utc)
delete_tasks = []
last_used_records = {
item["RowKey"]: item.metadata["timestamp"]
async for item in table_client.query_entities(
f"PartitionKey eq '{GRID_TASK_QUEUE}'"
)
}
surviving_job_ids = set()
deleted_job_ids = set()
async for queue in queue_client.queue.list(
MEADOWRUN_RESOURCE_GROUP_NAME,
os.environ[MEADOWRUN_STORAGE_ACCOUNT_VARIABLE],
):
if not queue.name.startswith(
_REQUEST_QUEUE_NAME_PREFIX
) and not queue.name.startswith(_RESULT_QUEUE_NAME_PREFIX):
# this is not a meadowrun grid task queue
continue
# first parse the queue names, while deleting any queues that don't fit the
# expected patterns
prefix, sep, remainder = queue.name.partition("-")
job_id, sep, created_timestamp_string = remainder.rpartition("-")
if sep != "-":
logs.append(
"Queue name was not in the expected prefix-job_id-timestamp format:"
f" {queue.name}, deleting it"
)
delete_tasks.append(
asyncio.create_task(
queue_client.queue.delete(
MEADOWRUN_RESOURCE_GROUP_NAME,
storage_account_name,
queue.name,
)
)
)
continue
# next, if we have a last-used record that's too old, delete the queue,
# otherwise mark it as a "surviving" queue so that we know to keep the
# last_used record around
if job_id in last_used_records:
last_used = last_used_records[job_id]
if now_with_timezone - last_used > _QUEUE_INACTIVE_TIME:
logs.append(
f"Queue {queue.name} was last used at {last_used}, deleting"
)
deleted_job_ids.add(job_id)
delete_tasks.append(
asyncio.create_task(
queue_client.queue.delete(
MEADOWRUN_RESOURCE_GROUP_NAME,
storage_account_name,
queue.name,
)
)
)
else:
surviving_job_ids.add(job_id)
continue
# finally, we don't have any last-used records, so we have to use the
# created timestamp
try:
created_timestamp = datetime.datetime.strptime(
created_timestamp_string, QUEUE_NAME_TIMESTAMP_FORMAT
)
except ValueError:
logs.append(
f"Queue name {queue.name} is in the format prefix-job_id-timestamp,"
" but the timestamp cannot be parsed, deleting the queue"
)
delete_tasks.append(
asyncio.create_task(
queue_client.queue.delete(
MEADOWRUN_RESOURCE_GROUP_NAME,
storage_account_name,
queue.name,
)
)
)
continue
if now - created_timestamp > _QUEUE_INACTIVE_TIME:
logs.append(
f"Queue {queue.name} has no last used records and was created at "
f"{created_timestamp}, deleting"
)
delete_tasks.append(
asyncio.create_task(
queue_client.queue.delete(
MEADOWRUN_RESOURCE_GROUP_NAME,
storage_account_name,
queue.name,
)
)
)
continue
# now delete last_used records that don't correspond to any existing queues
for job_id in last_used_records.keys():
if job_id in surviving_job_ids:
continue
if job_id not in deleted_job_ids:
logs.append(
f"job_id {job_id} has a last_used record, but no existing queues, "
"deleting the last_used record now"
)
# if we did delete the corresponding queue, still delete the last_used
# record, just no need to log
delete_tasks.append(
asyncio.create_task(table_client.delete_entity(GRID_TASK_QUEUE, job_id))
)
if delete_tasks:
await asyncio.wait(delete_tasks)
return logs
async def delete_unused_images() -> List[str]:
"""See _deregister_and_terminate_vms for why we return log statements"""
logs = []
storage_account_name = os.environ[MEADOWRUN_STORAGE_ACCOUNT_VARIABLE]
storage_account_key = os.environ[MEADOWRUN_STORAGE_ACCOUNT_KEY_VARIABLE]
registry_name = meadowrun_registry_name(os.environ[MEADOWRUN_SUBSCRIPTION_ID])
async with TableClient(
f"https://{storage_account_name}.table.core.windows.net/",
LAST_USED_TABLE_NAME,
credential=AzureNamedKeyCredential(storage_account_name, storage_account_key),
) as table_client, ContainerRegistryClient(
f"{registry_name}.azurecr.io",
get_credential_aio(),
audience="https://management.azure.com",
) as acr_client:
delete_tasks = []
now = datetime.datetime.now(datetime.timezone.utc)
last_used_records = {
item["RowKey"]: item.metadata["timestamp"]
async for item in table_client.query_entities(
f"PartitionKey eq '{CONTAINER_IMAGE}'"
)
}
deleted_tags = set()
surviving_tags = set()
async for tag in acr_client.list_tag_properties(
_MEADOWRUN_GENERATED_DOCKER_REPO,
# copied and modified from
# azure.containerregistry.aio._async_container_registry_client.py:474 Can be
# deleted when
# https://github.com/Azure/azure-sdk-for-python/pull/24621/files is merged
cls=lambda objs: [
ArtifactTagProperties._from_generated(
o, repository=_MEADOWRUN_GENERATED_DOCKER_REPO # type: ignore
)
for o in objs
]
if objs
else [],
):
# first see if we have a last used record for this tag
if tag.name in last_used_records:
last_used = last_used_records[tag.name]
if now - last_used > _CONTAINER_IMAGE_UNUSED_TIME:
logs.append(
f"Image {tag.name} will be deleted, was last used at "
f"{last_used}"
)
delete_tasks.append(
asyncio.create_task(
acr_client.delete_tag(
_MEADOWRUN_GENERATED_DOCKER_REPO, tag.name
)
)
)
deleted_tags.add(tag.name)
else:
surviving_tags.add(tag.name)
continue
# if we don't have a last used record, use the last_updated property
if now - tag.last_updated_on > _CONTAINER_IMAGE_UNUSED_TIME:
logs.append(
f"Image {tag.name} will be deleted, has not been used and last "
f"updated at {tag.last_updated_on}"
)
delete_tasks.append(
asyncio.create_task(
acr_client.delete_tag(
_MEADOWRUN_GENERATED_DOCKER_REPO, tag.name
)
)
)
for tag_name in last_used_records.keys():
if tag_name in surviving_tags:
continue
if tag_name not in deleted_tags:
logs.append(
f"Image {tag_name} has a last_used record but the image does not "
"exist. Deleting the last_used record now"
)
# if we did delete the corresponding image, still delete the last_used
# record, just no need to log
delete_tasks.append(
asyncio.create_task(
table_client.delete_entity(CONTAINER_IMAGE, tag_name)
)
)
if delete_tasks:
await asyncio.wait(delete_tasks)
return logs
def main(myTimer) -> None: # type: ignore
"""
The entry point for the Azure function (see function.json). Configured to run every
minute by default.
myTime should be annotated as azure.functions.TimerRequest, but we'd rather not add
that dependency (it exists by default in the function runtime where this function
will actually run). Also, the variable cannot be snake_case, and can only be changed
if the corresponding name in function.json is changed.
"""
logs = asyncio.run(delete_old_task_queues())
for log in logs:
logging.info(log)
logs = asyncio.run(delete_unused_images())
for log in logs:
logging.info(log)
|
StarcoderdataPython
|
1718879
|
from abc import ABC
from typing import Dict, Sequence, Optional, List, Any
from allenact.base_abstractions.experiment_config import ExperimentConfig
from allenact.base_abstractions.sensor import Sensor
class GymBaseConfig(ExperimentConfig, ABC):
SENSORS: Optional[Sequence[Sensor]] = None
def _get_sampler_args(
self, process_ind: int, mode: str, seeds: List[int]
) -> Dict[str, Any]:
raise NotImplementedError
def train_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
return self._get_sampler_args(
process_ind=process_ind, mode="train", seeds=seeds
)
def valid_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
return self._get_sampler_args(
process_ind=process_ind, mode="valid", seeds=seeds
)
def test_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
return self._get_sampler_args(process_ind=process_ind, mode="test", seeds=seeds)
|
StarcoderdataPython
|
4928330
|
import os.path
import os
from os import mkdir, makedirs, rename, listdir
from os.path import join, exists, relpath, abspath
from data.base_dataset import BaseDataset, get_params, get_transform
from data.image_folder import make_dataset
from PIL import Image
import random
import numpy as np
class PairedNirDataset(BaseDataset):
"""A dataset class for paired image dataset.
It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}.
During test time, you need to prepare a directory '/path/to/data/test'.
"""
def __init__(self, opt, seed=100):
BaseDataset.__init__(self, opt)
if self.opt.dataset_name == 'oulu':
self.__init_oulu()
else:
raise NotImplementedError
random.seed(seed)
# apply the same transform to both A and B
transform_params = get_params(self.opt, Image.open(self.A_path[0]).convert('RGB').size)
self.A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1))
transform_params = get_params(self.opt, Image.open(self.B_path[0]).convert('RGB').size)
self.B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1))
def __init_oulu(self):
dir_A = join(self.root, 'NI/Strong')
dir_B = join(self.root, 'VL/Strong')
ds_A = sorted(make_dataset(dir_A, self.opt.max_dataset_size))
ds_B = sorted(make_dataset(dir_B, self.opt.max_dataset_size))
assert len(ds_A)>0 and len(ds_B)>0
self.input_nc = 3
self.output_nc = 3
self.A_path = []
self.B_path = []
self.label = []
idx = ds_A[0].split('/').index('NI')
for imp in ds_A:
spt = imp.split('/')
spt[0]='/'
spt[idx]='VL'
imp1 = join(*spt)
if imp1 in ds_B:
self.A_path.append(imp)
self.B_path.append(imp1)
self.label.append(int(spt[idx+2][1:])-1)
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) - - an image in the input domain
B (tensor) - - its corresponding image in the target domain
A_paths (str) - - image paths
B_paths (str) - - image paths (same as A_paths)
"""
# read a image given a random integer index
A_path = self.A_path[index]
B_path = self.B_path[index]
label = self.label[index]
A = Image.open(A_path).convert('RGB')
B = Image.open(B_path).convert('RGB')
# apply the same transform to both A and B
#transform_params = get_params(self.opt, A.size)
#A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1))
#transform_params = get_params(self.opt, B.size)
#B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1))
A = self.A_transform(A)
B = self.B_transform(B)
return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path, 'label': label}
def __len__(self):
"""Return the total number of images in the dataset."""
return len(self.label)
|
StarcoderdataPython
|
3315007
|
<filename>recognition/audio_settings.py<gh_stars>1-10
from typing import List
from audio import AudioSettings
def get_common_settings(settings: List[AudioSettings]) -> AudioSettings:
channels = settings[0].channels
sample_format = settings[0].sample_format
sample_rate = settings[0].sample_rate
for setting in settings:
if (channels != setting.channels
or sample_format != setting.sample_format
or sample_rate != setting.sample_rate):
raise Exception("Settings are not consistent")
return settings[0]
|
StarcoderdataPython
|
362976
|
#!/usr/bin/env python
#******************************************************************************
# Name: ingests1s2.py
# Purpose: Unpack and ingest time series of sentinel-1 vv, vh single pol
# SAR or VVVH dual pol diagonal only images
# exported from Earth Engine to and downloaded from from Google Drive
# to a series of 2-band images (dual pol diagonal only polarimetric matrix).
# If present, also unpack and ingest a single 4-band sentinel-2 image downloaded
# from earth engine in ZIP format.
#
# Usage:
# import ingests1s2
# ingests1s2.ingest(path,s1infile)
# or
# python ingests1s2.py [OPTIONS] path
#
# MIT License
#
# Copyright (c) 2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, getopt, gdal, os, re, time
from zipfile import ZipFile
from osgeo.gdalconst import GA_ReadOnly, GDT_Float32, GDT_Int16
def ingest(path,s1infile):
print '========================='
print ' Ingesting S1 (and S2)'
print '========================='
print time.asctime()
print 'Directory: %s'%path
print 'Sentinel-1 filename: %s'%s1infile
gdal.AllRegister()
start = time.time()
os.chdir(path)
try:
files = os.listdir(path)
for afile in files:
# unpack VNIR archive in path
if re.search('.zip',afile):
ZipFile(afile).extractall(path)
# get sorted list of VNIR files
files = os.listdir(path)
files1 = []
for afile in files:
if re.search('B[1-8].tif',afile):
files1.append(afile)
if len(files1) > 0:
files1.sort()
bands = len(files1)
outfn = path+'sentinel2.tif'
inDataset = gdal.Open(files1[0],GA_ReadOnly)
cols = inDataset.RasterXSize
rows = inDataset.RasterYSize
# ingest to a single file
driver = gdal.GetDriverByName('GTiff')
outDataset = driver.Create(outfn,cols,rows,bands,GDT_Int16)
projection = inDataset.GetProjection()
geotransform = inDataset.GetGeoTransform()
if geotransform is not None:
outDataset.SetGeoTransform(geotransform)
if projection is not None:
outDataset.SetProjection(projection)
for i in range(bands):
print 'writing band %i'%(i+1)
inDataset = gdal.Open(files1[i])
inBand = inDataset.GetRasterBand(1)
band = inBand.ReadAsArray(0,0,cols,rows)
outBand = outDataset.GetRasterBand(i+1)
outBand.WriteArray(band)
outBand.FlushCache()
inDataset = None
os.remove(files1[i].replace('.tif','.tfw'))
os.remove(files1[i])
outDataset = None
print 'created file %s' %outfn
# ingest the SAR image to a time series of files
infile = path+s1infile
inDataset = gdal.Open(infile,GA_ReadOnly)
driver = inDataset.GetDriver()
cols = inDataset.RasterXSize
rows = inDataset.RasterYSize
bands = inDataset.RasterCount
if bands == 2:
# dual pol diagonal only
for i in range(bands/2):
outfile = path+'sentinel1_VVVH_%i.tif'%(i+1)
geotransform = inDataset.GetGeoTransform()
projection = inDataset.GetProjection()
outDataset = driver.Create(outfile,cols,rows,2,GDT_Float32)
if geotransform is not None:
outDataset.SetGeoTransform(geotransform)
if projection is not None:
outDataset.SetProjection(projection)
inArray = inDataset.GetRasterBand(2*i+1).ReadAsArray(0,0,cols,rows)
outBand = outDataset.GetRasterBand(1)
outBand.WriteArray(inArray,0,0)
outBand.FlushCache()
outBand = outDataset.GetRasterBand(2)
inArray = inDataset.GetRasterBand(2*i+2).ReadAsArray(0,0,cols,rows)
outBand.WriteArray(inArray,0,0)
outBand.FlushCache()
outDataset = None
print 'created file %s'%outfile
else:
# single pol VV or VH
for i in range(bands):
outfile = path+'sentinel1_VV_%i.tif'%(i+1)
geotransform = inDataset.GetGeoTransform()
projection = inDataset.GetProjection()
outDataset = driver.Create(outfile,cols,rows,1,GDT_Float32)
if geotransform is not None:
outDataset.SetGeoTransform(geotransform)
if projection is not None:
outDataset.SetProjection(projection)
inArray = inDataset.GetRasterBand(i+1).ReadAsArray(0,0,cols,rows)
outBand = outDataset.GetRasterBand(1)
outBand.WriteArray(inArray,0,0)
outBand.FlushCache()
outDataset = None
print 'created file %s'%outfile
inDataset = None
print 'elapsed time: ' + str(time.time() - start)
except Exception as e:
print 'Error %s'%e
return None
def main():
usage = '''
Usage:
------------------------------------------------
python %s [OPTIONS] PATH S1_INFILENAME
Unpack and ingest time series of sentinel-1 vv, vh single pol
SAR or VVVH dual pol diagonal only images
exported from Earth Engine to and downloaded from from Google Drive
to a series of 2-band images (dual pol diagonal only polarimetric matrix).
If present, also unpack and ingest a single 4-band sentinel-2 image downloaded
from earth engine in ZIP format.
Options:
-h this help
--------------------------------------------'''%sys.argv[0]
options,args = getopt.getopt(sys.argv[1:],'h')
for option,_ in options:
if option == '-h':
print usage
return
if len(args) != 2:
print 'Incorrect number of arguments'
print usage
sys.exit(1)
ingest(args[0],args[1])
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
6488662
|
class EntityNotFoundException(Exception):
pass
class EntityAlreadyExistsException(Exception):
pass
|
StarcoderdataPython
|
6463637
|
import logging
import common.ibc.processor
import fet.constants as co
import fet.fetchhub1.constants as co2
import common.ibc.processor
import common.ibc.handle
import common.ibc.constants
from fet.config_fet import localconfig
from settings_csv import FET_NODE
from fet.fetchhub1.processor_legacy import process_tx_legacy
from fet.handle_tx import handle_tx
from fet.handle_contract import handle_contract
def process_txs(wallet_address, elems, exporter, node, progress=None):
for i, elem in enumerate(elems):
process_tx(wallet_address, elem, exporter, node)
# Update progress bar for slower processing of fetchhub-1 (due to required timestamp query for each tx)
if node == co2.FET_FETCHUB1_NODE:
if i % 10 == 0 or i == len(elems)-1:
message = f"Processed {i + 1} of {len(elems)} transactions for fetchhub1"
progress.report(i+1, message, progress.STAGE_FET1_TXS)
def process_tx(wallet_address, elem, exporter, node=None):
if node and node in (co2.FET_FETCHUB1_NODE):
return process_tx_legacy(wallet_address, elem, exporter, node)
txinfo = common.ibc.processor.txinfo(
wallet_address, elem, co.MINTSCAN_LABEL_FET, co.EXCHANGE_FET, localconfig.ibc_addresses, FET_NODE)
try:
if txinfo.is_execute_contract():
# Handle transaction with execute contract message(s)
handle_contract(exporter, txinfo)
else:
# Handle all other transactions
handle_tx(exporter, txinfo)
except Exception as e:
logging.error(
"Exception when handling txid=%s, exception=%s", txinfo.txid, str(e))
common.ibc.handle.handle_unknown_detect_transfers_tx(exporter, txinfo)
if localconfig.debug:
raise e
return txinfo
|
StarcoderdataPython
|
3357581
|
__author__ = 'nick'
import unittest
import numpy as np
from hmm_localisation.hmm import HMM
from hmm_localisation.robot import Direction
class TestHMM(unittest.TestCase):
def test_probable_transitions_corner(self):
model = HMM(8, 8)
corner = (7, 0, Direction.SOUTH)
expected = [((7, 1, Direction.SOUTH), 0.7),
((7, 1, Direction.NORTH), 0.1),
((7, 1, Direction.EAST), float(1) / 3),
((7, 1, Direction.WEST), 0.1),
]
self.assertEqual(sorted(model.probable_transitions(corner)),
sorted(expected))
def test_probable_transitions_wall(self):
model = HMM(8, 8)
wall = (3, 7, Direction.EAST)
expected = [((2, 7, Direction.NORTH), float(1) / 3),
((2, 7, Direction.SOUTH), 0.1),
((2, 7, Direction.EAST), 0.7),
((2, 7, Direction.WEST), 0.1),
]
self.assertEqual(sorted(model.probable_transitions(wall)),
sorted(expected))
def test_probable_transitions_one_away_from_wall(self):
model = HMM(8, 8)
one_away = (7, 6, Direction.SOUTH)
expected = [((7, 7, Direction.SOUTH), 0.7),
((7, 7, Direction.NORTH), float(1) / 2),
((7, 7, Direction.EAST), float(1) / 2),
((7, 7, Direction.WEST), 0.1),
]
self.assertEqual(sorted(model.probable_transitions(one_away)),
sorted(expected))
def test_probable_transitions_inner(self):
model = HMM(8, 8)
inner = (3, 3, Direction.WEST)
expected = [((4, 3, Direction.SOUTH), 0.1),
((4, 3, Direction.NORTH), 0.1),
((4, 3, Direction.EAST), 0.1),
((4, 3, Direction.WEST), 0.7),
]
self.assertEqual(sorted(model.probable_transitions(inner)),
sorted(expected))
def test_probably_transitions_edge(self):
model = HMM(3, 3)
edge = (0, 0, Direction.EAST)
expected = []
self.assertEqual(model.probable_transitions(edge), expected)
def test_create_t_matrix(self):
model = HMM(2, 2)
self.assertIsInstance(model.t_matrix, np.ndarray)
self.assertEqual(model.t_matrix.size, 256)
def test_create_sensor_matrix(self):
model = HMM(8, 8)
sensor_matrix = model.create_sensor_matrix((1, 1))
self.assertEqual(sensor_matrix.size, 65536)
self.assertEqual(sensor_matrix[0, 1], 0)
self.assertEqual(sensor_matrix[0, 0], 0.05)
self.assertEqual(sensor_matrix[36, 36], 0.1)
self.assertEqual(sensor_matrix[44, 44], 0.025)
sensor_matrix = model.create_sensor_matrix(None)
self.assertEqual(sensor_matrix.size, 65536)
def test_none_matrix(self):
model = HMM(8, 8)
none_matrix = model.none_matrix
self.assertEqual(none_matrix.size, 65536)
self.assertEqual(none_matrix[0, 0], 0.625)
self.assertEqual(none_matrix[144, 144], 0.1)
self.assertEqual(none_matrix[132, 132], 0.225)
self.assertAlmostEquals(none_matrix[128, 128], 0.425)
def test_priors(self):
model = HMM(8, 8)
self.assertEqual(model.f_matrix[5], float(1) / (8 * 8 * 4))
def test_forward(self):
model = HMM(8, 8)
model.forward_step((4, 4))
self.assertNotEqual(model.f_matrix[5], float(1) / (8 * 8 * 4))
def test_most_probable(self):
model = HMM(8, 8)
model.forward_step((4, 2))
self.assertEqual(model.most_probable()[0], (4, 2))
model.forward_step((4, 3))
self.assertEqual(model.most_probable()[0], (4, 3))
model.forward_step((4, 6))
self.assertEqual(model.most_probable()[0], (4, 4))
model.forward_step((5, 7))
self.assertEqual(model.most_probable()[0], (4, 5))
model.forward_step((5, 2))
self.assertEqual(model.most_probable()[0], (4, 4))
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1738320
|
<filename>SPGPylibs/PHItools/phifdt_flat.py
#=============================================================================
# Project: SoPHI
# File: phifdt_flat.py
# Author: <NAME> (<EMAIL>)
# Contributors: <NAME> and <NAME> (<EMAIL>)
#-----------------------------------------------------------------------------
# Description: Pipeline implementation for calculating the FDT flat
# Includes three algorithms for flat calculation and
# the circular Hough Transform in the frequency domain.
#-----------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from .tools import printc,bcolors,timeit
from .phi_gen import *
from .phi_utils import *
from .phi_fits import *
from .phi_reg import *
from .phifdt_pipe_modules import phi_correct_dark
from SPGPylibs.GENtools import *
def centers_flat(n_images,inner_radius,outer_radius,steps,r_width,binmask,imsize,verbose=None):
############################
#FIND CENTERS - PART OF DO_HOUGH
############################
centers = []
radius = []
radii = np.linspace(inner_radius, outer_radius, steps + 1)
printc('Analizing ', n_images, ' images',color = bcolors.OKGREEN)
for i in range(n_images):
#acc_conv = find_Circles(
# binmask[i], radii_coarse, r_width_coarse, verbose=verbose, full=True)
acc_conv = find_Circles_ida(binmask[i], radii, r_width)
center,rad,c,d = votes(acc_conv, radii)
centers.append(center)
radius.append(rad)
printc('Found center: ', centers[i], ' and radius: ', radius[i],color = bcolors.WARNING)
if verbose == True:
fig = plt.figure(frameon=False)
im1 = plt.imshow(binmask[i], cmap=plt.cm.gray, alpha=.5)
circle_fit = bin_annulus(
imsize, radius[i], 1, full=False).astype(float)
dd = np.array(centers[i])
dx = dd[0] - imsize[0]//2
dy = dd[1] - imsize[1]//2
circle_fit = shift(circle_fit, shift=[dx,dy])
im2 = plt.imshow(circle_fit, cmap=plt.cm.gray, alpha=.5)
plt.show()
return centers,radius
@timeit
def do_hough(image,inner_radius, outer_radius, steps, org_centers=None,method='prewitt',save=False,
dhtr=10,normalize = False,verbose=False,Otsu = None,threshold = 0.15):
'''
Calculates the position and radious of the solar disk in a set of input images using the Hough transform.
Parameters
----------
image : (K, N, M) ndarray
List or numpy array of K Grayscale images of NxM size.
inner_radius : int
Minimum search radious
outer_radius : int
Maximum search radious
steps: int
Number of steps to look for solar radius.
step is used to generate:
(1): coarse find jumps: np.linspace(inner_radius, outer_radius, steps)
(2): width of the ring for crosscorrelating the disk: (outer_radius - inner_radius)//steps * 2
(3): if step is a negative number then uses FM find model
-#-
4 iterations
1] inner_radius = 152; outer_radius = 1048; steps = 64; 15 iterations
152_____________600_____________1048
--|---|---|---|---|---|---|---|---|---|---|---|---|---|---|--
2] inner_radius = Prev.Radius-32; outer_radius = Prev.Radius+32; steps = 16; 5 iterations
---------|---------------|---------------|---------------|---------------|--------
3] inner_radius = Prev.Radius-8; outer_radius = Prev.Radius+8; steps = 4; 5 iterations
-----------|---------------|---------------|---------------|---------------|-----------
4] inner_radius = Prev.Radius-2; outer_radius = Prev.Radius+2; steps = 1; 5 iterations
-----------|---------------|---------------|---------------|---------------|-----------
-#-
org_centers = org_centers: numpy array [K,2] centers for comparison (they are not used)
method = method: method for finding the limb boundary. default = 'prewitt'
more info look FindEdges()
save = False: save the centers as 'hough_centers.txt' -> ASCII (centers_fine,radii_fine)
dhtr = 10:
normalize = False:
verbose = False:
Otsu = None:
threshold = 0.15:
Returns
-------
centers : numpy int array of [K,2] elements where [i,0] = x-centers and [i,1] = y-centers
radious : numpy int array of [K] elements containing the radious of the K-images in pixels
Raises
------
References
----------
[1] <NAME>, Machine Vision and Applications (2013) 24:683–694 DOI 10.1007/s00138-012-0420-x
Examples
--------
>>> import SPGPylibs as spg
Notes
-----
'''
imsize = image[0].shape
n_images = len(image)
if org_centers is None:
org_centers = np.tile(np.array([0., 0.], dtype=np.int16), (n_images, 1))
############################
#Normalize images (using a box 100x100 in the central image)
############################
if normalize == True:
norma = np.mean(image[0][imsize[0]//2-100:imsize[0]//2 +
100, imsize[0]//2-100:imsize[0]//2+100])
if verbose == True:
print('Normalization constant: ', norma, '[calculated with first image assumed to be central one]')
for i in range(n_images):
image[i] = image[i]/norma
############################
#CALCULATE THE MASK GRADIENT FOR EACH IMAGE
############################
binmask = []
image_dummy, threshold = FindEdges(
image[0], threshold, method=method, dthr=dhtr, verbose=verbose,Otsu=Otsu)
binmask.append(image_dummy)
for i in range(1, n_images):
image_dummy = FindEdges(
image[i], threshold, method=method, verbose=verbose,Otsu=Otsu)
binmask.append(image_dummy)
############################
#FIND CENTERS - COARSE SEARCH
############################
#Coarse and fine compressed in one call
# centers = []
# radius = []
# r_width_coarse = (outer_radius - inner_radius)//steps * 2
# radii_coarse = np.linspace(inner_radius, outer_radius, steps)
# print('Analizing ', n_images, ' images (coarse search)')
# for i in range(n_images):
# #acc_conv = find_Circles(
# # binmask[i], radii_coarse, r_width_coarse, verbose=verbose, full=True)
# acc_conv = find_Circles_ida(binmask[i], radii_coarse, r_width_coarse)
# center,rad,c,d = votes(acc_conv, radii_coarse)
# centers.append(center)
# radius.append(rad)
# print('Found center: ', centers[i], ' and radius: ', radius[i])
# if verbose == True:
# fig = plt.figure(frameon=False)
# im1 = plt.imshow(binmask[i], cmap=plt.cm.gray, alpha=.5)
# circle_fit = bin_annulus(
# imsize, radius[i], 1, full=False).astype(float)
# dd = np.array(centers[i])
# dx = dd[0] - imsize[0]//2
# dy = dd[1] - imsize[1]//2
# circle_fit = shift(circle_fit, shift=[dx,dy])
# im2 = plt.imshow(circle_fit, cmap=plt.cm.gray, alpha=.5)
# plt.show()
# print('Image | Original | Inferred | Radius')
# for i in range(n_images):
# print(" %2.0f | (%4.0f,%4.0f) | (%4.0f,%4.0f) | %6.2f" %
# (i, org_centers[i, 0], org_centers[i, 1],
# centers[i][1], centers[i][0], radius[i]))
############################
#FIND CENTERS - FINE SEARCH
############################
# centers_fine = []
# radius_fine = []
# mean_r = np.mean(radius)
# print('pp',mean_r)
# inner_radius = mean_r-20
# outer_radius = mean_r+20
# steps = 20
# r_width_fine = 5
# radii_fine = np.linspace(inner_radius, outer_radius, steps)
# print('Analizing ', n_images, ' images (fine case)')
# for i in range(n_images):
# acc_conv = find_Circles_ida(binmask[i], radii_fine, r_width_fine,verbose=False)
# center,rad,c,d = votes(acc_conv, radii_fine)
# centers_fine.append(center)
# radius_fine.append(rad)
# print('Found center: ', centers_fine[i],
# ' and radius: ', radius_fine[i])
# if verbose == True:
# fig = plt.figure(frameon=False)
# im1 = plt.imshow(binmask[i], cmap=plt.cm.gray, alpha=.5)
# circle_fit = bin_annulus(
# imsize, radius_fine[i], 1, full=False).astype(float)
# dd = np.array(center)
# dx = dd[0] - imsize[0]//2
# dy = dd[1] - imsize[1]//2
# circle_fit = shift(circle_fit, shift=[dx,dy])
# im2 = plt.imshow(circle_fit, cmap=plt.cm.gray, alpha=.5)
# plt.show()
# print('Method | Image | Original | Inferred | Radius')
# for i in range(n_images):
# print(" Coarse %2.0f | (%4.0f,%4.0f) | (%4.0f,%4.0f) | %6.2f" %
# (i, org_centers[i, 0], org_centers[i, 1],
# centers[i][1], centers[i][0], radius[i]))
# print(" Fine %2.0f | (%4.0f,%4.0f) | (%4.0f,%4.0f) | %6.2f" %
# (i, org_centers[i, 0], org_centers[i, 1],
# centers_fine[i][1], centers_fine[i][0], radius_fine[i]))
if steps > 0:
#############################
#FIND CENTERS - COARSE SEARCH
#############################
r_width = (outer_radius - inner_radius)//steps * 2
print(np.linspace(inner_radius, outer_radius, steps + 1))
printc('from: ',inner_radius,' to: ',outer_radius,' steps: ', steps,' width: ',r_width,color = bcolors.OKGREEN)
centers, radius = centers_flat(n_images,inner_radius,outer_radius,steps,r_width,binmask,imsize,verbose=verbose)
print('Image | Original | Inferred | Radius')
for i in range(n_images):
printc(" %2.0f | (%4.0f,%4.0f) | (%4.0f,%4.0f) | %6.2f" %
(i, org_centers[i, 0], org_centers[i, 1],
centers[i][1], centers[i][0], radius[i]),color = bcolors.FAIL)
###########################
#FIND CENTERS - FINE SEARCH
###########################
mean_r = np.int(np.mean(radius))
inner_radius = mean_r - 32
outer_radius = mean_r + 32
steps = 16
r_width = (outer_radius - inner_radius)//steps * 2
print(np.linspace(inner_radius, outer_radius, steps + 1))
printc('from: ',inner_radius,' to: ',outer_radius,' steps: ', steps,' width: ',r_width,color = bcolors.OKGREEN)
centers, radius = centers_flat(n_images,inner_radius,outer_radius,steps,r_width,binmask,imsize,verbose=verbose)
print('Image | Original | Inferred | Radius')
for i in range(n_images):
printc(" %2.0f | (%4.0f,%4.0f) | (%4.0f,%4.0f) | %6.2f" %
(i, org_centers[i, 0], org_centers[i, 1],
centers[i][1], centers[i][0], radius[i]),color = bcolors.FAIL)
################################
#FIND CENTERS - VERY FINE SEARCH
################################
mean_r = np.int(np.mean(radius))
inner_radius = mean_r - 4
outer_radius = mean_r + 4
steps = 8
r_width = (outer_radius - inner_radius)//steps * 2
print(np.linspace(inner_radius, outer_radius, steps + 1))
printc('from: ',inner_radius,' to: ',outer_radius,' steps: ', steps,' width: ',r_width,color = bcolors.OKGREEN)
centers, radius = centers_flat(n_images,inner_radius,outer_radius,steps,r_width,binmask,imsize,verbose=verbose)
print('Image | Original | Inferred | Radius')
for i in range(n_images):
printc(" %2.0f | (%4.0f,%4.0f) | (%4.0f,%4.0f) | %6.2f" %
(i, org_centers[i, 0], org_centers[i, 1],
centers[i][1], centers[i][0], radius[i]),color = bcolors.FAIL)
elif steps < 0:
##################################
#FIND CENTERS - FM SEARCH STRATEGY
##################################
r_width = 2
inner_radius = 128
outer_radius = 1024
steps = 32
r_width = (outer_radius - inner_radius)//steps * 2
print(np.linspace(inner_radius, outer_radius, steps + 1))
printc('from: ',inner_radius,' to: ',outer_radius,' steps: ', steps,' width: ',r_width,color = bcolors.OKGREEN)
centers, radius = centers_flat(n_images,inner_radius,outer_radius,steps,r_width,binmask,imsize,verbose=verbose)
print('Image | Original | Inferred | Radius')
for i in range(n_images):
print(" %2.0f | (%4.0f,%4.0f) | (%4.0f,%4.0f) | %6.2f" %
(i, org_centers[i, 0], org_centers[i, 1],
centers[i][1], centers[i][0], radius[i]))
mean_r = np.int(np.mean(radius))
inner_radius = mean_r - 32
outer_radius = mean_r + 32
steps = 16
r_width = (outer_radius - inner_radius)//steps * 2
print(np.linspace(inner_radius, outer_radius, steps + 1))
printc('from: ',inner_radius,' to: ',outer_radius,' steps: ', steps,' width: ',r_width,color = bcolors.OKGREEN)
centers, radius = centers_flat(n_images,inner_radius,outer_radius,steps,r_width,binmask,imsize,verbose=verbose)
print('Image | Original | Inferred | Radius')
for i in range(n_images):
print(" %2.0f | (%4.0f,%4.0f) | (%4.0f,%4.0f) | %6.2f" %
(i, org_centers[i, 0], org_centers[i, 1],
centers[i][1], centers[i][0], radius[i]))
mean_r = np.int(np.mean(radius))
inner_radius = mean_r - 8
outer_radius = mean_r + 8
steps = 8
r_width = (outer_radius - inner_radius)//steps * 2
print(np.linspace(inner_radius, outer_radius, steps + 1))
printc('from: ',inner_radius,' to: ',outer_radius,' steps: ', steps,' width: ',r_width,color = bcolors.OKGREEN)
centers, radius = centers_flat(n_images,inner_radius,outer_radius,steps,r_width,binmask,imsize,verbose=verbose)
print('Image | Original | Inferred | Radius')
for i in range(n_images):
print(" %2.0f | (%4.0f,%4.0f) | (%4.0f,%4.0f) | %6.2f" %
(i, org_centers[i, 0], org_centers[i, 1],
centers[i][1], centers[i][0], radius[i]))
mean_r = np.int(np.mean(radius))
inner_radius = mean_r - 2
outer_radius = mean_r + 2
steps = 4
r_width = (outer_radius - inner_radius)//steps * 2
print(np.linspace(inner_radius, outer_radius, steps + 1))
printc('from: ',inner_radius,' to: ',outer_radius,' steps: ', steps,' width: ',r_width,color = bcolors.OKGREEN)
centers, radius = centers_flat(n_images,inner_radius,outer_radius,steps,r_width,binmask,imsize,verbose=verbose)
print('Image | Original | Inferred | Radius')
for i in range(n_images):
print(" %2.0f | (%4.0f,%4.0f) | (%4.0f,%4.0f) | %6.2f" %
(i, org_centers[i, 0], org_centers[i, 1],
centers[i][1], centers[i][0], radius[i]))
else:
print('NO HOUGH **** WRONG')
if save == True:
status = write_shifts('hough_centers.txt', (centers,radius))
if status != 1:
print('Error in write_shifts')
return centers, radius
@timeit
def fdt_flat_gen(image, rel_centers, method, radious = 0, thrd=0.05, iter=15, \
bit_trun = 0,verbose = 0, expand=0, c_term = 0,imasize=[2048,2048]):
'''
Khun-Lin-Lorantz algorithm ()
Input:
image -> [n_images][y,x]
rel_centers -> [n_images,2] where [:,0]=dx and [:,1]=dy
Displacements are given with respect to image origin (0,0)
radious = 0 : int
radious of circular mask. Default = 0. In this case, the code uses the thrd to create the mask
thrd = 0.05 : float
threshold above which pixels are valid. Default = 0.05 (assuming image is normalized to one)
iter = 15 : int
maximum number of iterations in the kll algorithm. Default = 15
expand = 0 : int
how much the circular mask is expanded (positive = schrinks the mask) in %.
verbose = 0 : int
0 = do nothing, 1 = ...., 2 = ....,
method = 'kll' : string
'kll': <NAME>, <NAME>, and <NAME>, PASP 103, 1097-1108, 1991
'chae': Adapted from: J. Chae, Solar Physics 221: 1–14, 2004
'alter': <NAME>, <NAME>, and <NAME>,
Review of Scientific Instruments 87, 063710 (2016); doi: 10.1063/1.4954730
bit_trun = 0: int
Do not touch
c_term = 0 : float np.array(n_images)
intensity factor correction for chae method
imasize = [2048,2048] : Image size
'''
imsize = image[0].shape
n_images = len(image)
############################
# set displacements of observed images (A) wth respect Object image (centered)
############################
xyshifts = np.empty([n_images,2],dtype=int)
xyshifts[:,0], xyshifts[:,1] = rel_centers[:,0] - imsize[0]//2 , rel_centers[:,1] - imsize[1]//2
############################
# calculate masks
############################
mask = np.zeros([n_images, imsize[0], imsize[1]], dtype=np.int8)
if radious != 0: # In case radius of solar disk is provided....
maskn,dummy = generate_circular_mask([imsize[0]-1, imsize[1]-1],radious - expand,radious - expand)
print('Using circular mask')
for i in range(n_images):
mask[i] = shift(maskn, shift=[xyshifts[i,0],xyshifts[i,1]], fill_value = 0)
else:
# find pixel coordinates with solar information (> thrd given by user, default = 0.05)
# This step assumes input data has a mean value of one.
for i in range(n_images):
x, y = np.where(image[i] > thrd)
mask[i][x, y] = 1
############################
# DO LOG
############################
D = np.log10(image)
# replace NaNs and Infs by 0
D[np.isneginf(D)] = 0
D[np.isnan(D)] = 0
if method == 'kll':
############################
# CALCULATE CONSTANT
############################
n = np.zeros([imsize[0], imsize[1]], dtype=np.float64)
sum_image = np.zeros([imsize[0], imsize[1]], dtype=np.float64)
print('Rel centers: ',rel_centers)
# for [iq, ir] in itertools.combinations(range(n_images), 2): # overall 36 combinations
for iq in range(1, n_images): #loop in iq
for ir in range(iq): #loop in ir
# shift of iq with respect ir
dx = rel_centers[iq, 0] - rel_centers[ir, 0]
dy = rel_centers[iq, 1] - rel_centers[ir, 1]
if verbose == 2:
print('dx,dy',dx,dy,iq,ir)
t_mask_1 = mask[ir] & shift(mask[iq], [-dx, -dy])
t_mask_2 = mask[iq] & shift(mask[ir], [dx, dy])
t_mask = t_mask_1 & t_mask_2 # compound mask only used for mean
t_image_1 = shift(D[iq], [-dx, -dy])
t_image_2 = shift(D[ir], [dx, dy])
aa = (D[iq] - t_image_2) * t_mask_2 #add _2
bb = (D[ir] - t_image_1) * t_mask_1 #add _1
image_pair = aa + bb
sum_image += image_pair
n += t_mask_1 # accumulate valid pixels first sumatorio
n += t_mask_2 # accumulate valid pixels second sumatorio
K = sum_image / n.astype(np.float64)
# replace NaNs and Infs by 0
K[np.isneginf(K)] = 0
K[np.isnan(K)] = 0
if verbose == 1:
plt.imshow(K,cmap='gray')
plt.clim(vmax=0.02,vmin=-0.02)
plt.colorbar()
plt.show()
G = np.copy(K)
if bit_trun == 1:
K = np.int32(K * 256) / 256 # bit truncation
k = np.power(10, K)
if bit_trun == 1:
k = np.int32(k * 256) / 256 # bit truncation
for itera in range(iter):
r_res = np.zeros(imasize, dtype=np.float64)
for iq in range(1,n_images):
for ir in range(iq):
# shift of iq with respect ir
dx = rel_centers[iq, 0] - rel_centers[ir, 0]
dy = rel_centers[iq, 1] - rel_centers[ir, 1]
if verbose == 2:
print('dx,dy',dx,dy,iq,ir)
t_mask_1 = mask[ir] & shift(mask[iq], [-dx, -dy])
t_mask_2 = mask[iq] & shift(mask[ir], [dx, dy])
t_image_1 = shift(G, [-dx, -dy]) * t_mask_1
t_image_2 = shift(G, [ dx, dy]) * t_mask_2
correction = (t_image_1 + t_image_2)
r_res += correction
G = K + r_res / n.astype(np.float64)
# replace NaNs and Infs by 0
G[np.isneginf(G)] = 0
G[np.isnan(G)] = 0
idx = np.where(n > 0)
s = G[idx]
# calculate average of gain table for normalization
sm = np.mean(s)
sm2 = np.mean(s**2)
five_sigma = 5*np.sqrt(sm2-sm*sm)
idx2 = np.where(np.abs(s-sm) < five_sigma)
sm = np.mean(s[idx2])
G[idx] = G[idx] - sm
print('Iteration: ', itera, five_sigma, '5*rms', sm, ' of ', iter)
if verbose == 1:
plt.imshow(G, cmap='gray', vmin=-0.05, vmax=0.05)
plt.colorbar()
plt.show()
g = np.power(10, G,dtype='d')#/np.log(10,dtype='d') + 0.5672334407 - 0.0018501610250685886#0.566 #exp(2.303)
g[np.isneginf(g)] = 0
g[np.isnan(g)] = 0
return g
elif method == 'chae':
tmask = np.sum(mask,axis=0)
mask_Ob,dummy = generate_circular_mask([imsize[0]-1, imsize[1]-1],radious,radious)
#Constant term
if c_term == 0:
fit_c = 0
c_term = np.log10(np.ones((n_images)))
else:
c_term = np.log10(np.ones((n_images)))
flat = np.log10(np.ones_like(D[0]))
Ob = np.zeros_like(D[0])
for i in range(n_images):
# shift input image to the center of the frame.
#
Ob += shift(D[i], shift = -xyshifts[i,:])
Ob = Ob / float(n_images)
idx = tmask >= 1
for k in range(iter):
numerator = np.zeros((imsize))
for i in range(n_images):
numerator += ((c_term[i] + Ob - shift(D[i] - flat, shift = -xyshifts[i,:]))*mask_Ob) #(i+xk,j+yk) Eq 8
Ob -= (numerator/mask_Ob/n_images)
Ob[np.isneginf(Ob)] = 0.
Ob[np.isnan(Ob)] = 0.
if verbose == 3:
plt.imshow(Ob,cmap='gray',vmin=1,vmax=2)
plt.show()
numerator = np.zeros((imsize))
for i in range(n_images):
dummy = (c_term[i] + shift(Ob, shift = +xyshifts[i,:]) + flat - D[i])*mask[i]
numerator += dummy
c_term[i] -= ( np.sum(dummy) / np.sum(mask[i]) )
dummy = (numerator/tmask)
flat -= dummy
flat[np.isneginf(flat)] = 0.
flat[np.isnan(flat)] = 0.
if verbose == 3:
plt.imshow(flat,cmap='gray',vmin=-0.02,vmax=0.02)
plt.show()
if verbose >= 1:
print('Iter: ',k, ' STD: ',np.max(np.abs(dummy[idx])),np.exp(c_term))
s = flat[idx]
sm = np.mean(s)
sm2 = np.mean(s**2)
five_sigma = 5*np.sqrt(sm2-sm*sm)
print('Iteration: ', k, five_sigma, '5*rms', sm, ' of ', k, ' STD: ',np.max(np.abs(dummy[idx])))
flat = flat - np.mean(flat)
Ob = Ob + np.mean(flat) + np.mean(c_term)
c_term = c_term - np.mean(c_term)
flat = np.power(10, flat,dtype='d')
flat[np.isneginf(flat)] = 0
flat[np.isnan(flat)] = 0
if verbose >= 2:
plt.imshow(flat,cmap='gray',vmin=0.95,vmax=1.05)#vmin=)np.min(,vmax=0.05)
plt.show()
return flat
elif method == 'alter':
#Extracting flat-field images from scene-based image sequences using phase
#correlation
#<NAME>, <NAME>, and <NAME>
#Citation: Review of Scientific Instruments 87, 063710 (2016); doi: 10.1063/1.4954730
#View online: https://doi.org/10.1063/1.4954730
#View Table of Contents: http://aip.scitation.org/toc/rsi/87/6
#Published by the American Institute of Physics
Gf = np.zeros([imsize[0], imsize[1]],dtype=np.float64)
n = np.zeros([imsize[0], imsize[1]],dtype=np.float64)
sum_image = np.zeros([imsize[0], imsize[1]],dtype=np.float64)
Gr = np.zeros([n_images,imsize[0], imsize[1]],dtype=np.float64)
iter = 1
for itera in range(iter):
ir = 0
for iq in range(n_images):
dx = rel_centers[iq,0] - rel_centers[ir,0] #shift of iq with respect ir
dy = rel_centers[iq,1] - rel_centers[ir,1]
t_mask = mask[ir] * shift(mask[iq], [dx,dy])
n += t_mask
t_image = shift(D[iq] - Gf, [dx,dy])
sum_image += t_mask * t_image
K = sum_image / n
K[np.isneginf(K)] = 0
K[np.isnan(K)] = 0
idx = np.where(K > 5)
mask[0][idx] = 0
idx2 = np.where(n == 0)
K[idx2] = 0
iq = 0
for ir in range(n_images):
dx = rel_centers[iq,0] - rel_centers[ir,0] #shift of iq with respect ir
dy = rel_centers[iq,1] - rel_centers[ir,1]
Kn = shift(K * mask[0], [dx,dy])
G = (D[ir] - Kn) * mask[ir]
G[np.isneginf(G)] = 0
G[np.isnan(G)] = 0
Gr[ir,:,:] = G
m = np.sum(mask,axis=0)
Gf = np.sum(Gr,axis=0) / m
Gf[np.isneginf(K)] = 0
Gf[np.isnan(K)] = 0
print('Iteration: ', itera)
g = np.power(10, Gf)
g[np.isneginf(g)] = 0
g[np.isnan(g)] = 0
return g
else:
return None
def fdt_flat(files, wavelength, npol, method = 'kll', dark = None, read_shits = 0, shifts = None, verbose = 1,
correct_ghost = 0,expand = 1,thrd = 0,iter = 4, normalize = 1 , disp_method = 'Hough', c_term = 0,
inner_radius = 400, outer_radius = 800, steps = 20,shifts_file = False,imasize = [2048,2048]):
'''
The Dark, if provided, should have the same scaling as the data and same size!!!!!!!
This program does not take care of sizes. For that go to fdt_pipeline
USES OLD CORRECT GHOST ROUTINE!!!! TO BE MODIFIED
TBD: GET SIZE FROM HEADERSSS
'''
############################
# open the 9 FITS images and get one pol and wave.
# This can be done all at once but I have this like that because I am lazy.
############################
image = [fits_get_part(i,wavelength,npol) for i in files]
n_images = len(image)
ys,xs = image[0].shape
############################
# Correct Dark if not done in advance
############################
try:
print('...Dark correction...')
for i in range(n_images):
image[i] = image[i] - dark
except:
pass
#TODO To be implemented (detailes dark correction)
# try:
# for i in range(n_images):
# row = image[i][1800:,:].sum(axis=0) / (2048.-1800.)
# dark2 = np.zeros_like(dark)
# dark2 = dark2 + row[:np.newaxis]
# image[i] = np.abs(image[i] - dark2)
# except:
# pass
if read_shits == 1:
try:
print('... read user input shifts_file ...')
centers = read_shifts(shifts_file+'_cnt_w'+str(wavelength)+'_n'+str(npol)+'.txt')
radius = read_shifts(shifts_file+'_rad_w'+str(wavelength)+'_n'+str(npol)+'.txt')
for i in range(n_images):
print('Image',i,'c: ',centers[i,0],',',centers[i,1],' rad: ', radius[i])
except Exception:
print("Unable to open fits file: {}",shifts_file+'_cnt_w'+str(wavelength)+'_n'+str(npol)+'.txt')
elif read_shits == 2:
print('... shifts provided by user ...')
centers = shifts[0]
print(centers, '... read ')
radius = shifts[1]
print(radius, '... read ')
else:
print('... calculating shifts ...')
if disp_method == 'Hough':
centers, radius = do_hough(image, inner_radius, outer_radius, steps,verbose=False,threshold = 0.05)
if shifts_file:
_ = write_shifts(shifts_file+'_cnt_w'+str(wavelength)+'_n'+str(npol)+'.txt', centers)
_ = write_shifts(shifts_file+'_rad_w'+str(wavelength)+'_n'+str(npol)+'.txt', radius )
elif disp_method == 'FFT':
print('TB checked. Input par "expand" should be negative number representing solar disk')
image_dummy = np.zeros((n_images,ys,xs))
for i in range(n_images):
image_dummy[i,:,:] = image[i]
s_y,s_x,_ = PHI_shifts_FFT(image_dummy,prec=5,verbose=False,norma=False,coarse_prec = 150)
centers = np.zeros((n_images,2))
radius = np.zeros((n_images))
radius[:] = -expand
expand = 5
centers[:,0] = -s_x + xs//2
centers[:,1] = -s_y + ys//2
for i in range(n_images):
print('Image',i,'c: ',centers[i,0],',',centers[i,1],' rad: ', radius[i],'*')
elif disp_method == 'circle':
centers = np.zeros((n_images,2))
radius = np.zeros((n_images))
for i in range(n_images):
centers[i,1],centers[i,0],radius[i] = find_center(image[i],sjump = 4,njumps = 100,threshold = 0.8)
print, 'Image',i,'c: ',centers[i,0],',',centers[i,1],' rad: ', radius[i]
else:
pass
#make sure we have integer numpy numbers in the centers
centers = np.array(centers).astype(int)
mean_radii = np.mean(radius)
if correct_ghost == 1:
coef = [-1.98787669,1945.28944245]
print(' Ghost corrrection...')
poly1d_fn = np.poly1d(coef)
sh = poly1d_fn(centers[4,:]).astype(int) #np.array([ -1.99350209*centers[4,0] + 1948.44866543,-1.98963222*centers[4,1] + 1949.61650596]).astype(int)
reflection = image[4] - shift(image[4], shift=sh) * 0.004
reflection = shift(reflection, shift=[-centers[4,1]+1024,-centers[4,0]+1024])
for i in range(9):
sh = poly1d_fn(centers[i,:]).astype(int)
image[i] = image[i] - shift(reflection, shift=[sh[1]+centers[i,1]-1024,sh[0]+centers[i,0]-1024]) * 0.004
#PROBAR CON TODA LA IMAGES TODO 1
for i in range(n_images):
norma = np.mean(image[i][centers[i,1]-100:centers[i,1]+100,centers[i,0]-100:centers[i,0]+100])
if normalize == 1:
image[i] = image[i]/norma
print('Normalization: ', norma)
else:
norma = 0
pass
if thrd != 0:
gain = fdt_flat_gen(image, centers,method,iter=iter,thrd=thrd,verbose = verbose, c_term = c_term,imasize = imasize)
else:
gain = fdt_flat_gen(image, centers,method,iter=iter,radious=mean_radii,expand=expand,verbose = verbose, c_term = c_term, imasize = imasize)
return gain, norma
def fdt_flat_testrun():
'''
Just for local test run in a folder at same level of SPGlib
'''
dir = '/Users/orozco/Dropbox_folder/SoPHI/PHI-COMMISSIONING/software-and-images/RSW1/Add-data/'
files = ['solo_L0_phi-fdt-ilam_20200618T035946_V202007101227C_0066180100.fits',
'solo_L0_phi-fdt-ilam_20200618T040546_V202007101223C_0066180125.fits',
'solo_L0_phi-fdt-ilam_20200618T041146_V202007101231C_0066180150.fits',
'solo_L0_phi-fdt-ilam_20200618T041746_V202009211029_0066180175_scorr.fits',
'solo_L0_phi-fdt-ilam_20200618T042346_V202009211027_0066180200_scorr.fits',
'solo_L0_phi-fdt-ilam_20200618T043004_V202011101020_0066180225_scorr.fits',
'solo_L0_phi-fdt-ilam_20200618T043546_V202009211031_0066180250_scorr.fits',
'solo_L0_phi-fdt-ilam_20200618T044146_V202009211031_0066180275_scorr.fits',
'solo_L0_phi-fdt-ilam_20200618T044804_V202009291424C_0066180300.fits']
files = [dir + s for s in files]
dark_file = '/Users/orozco/Dropbox_folder/SoPHI/PHI-COMMISSIONING/software-and-images/RSW1/solo_L0_phi-fdt-ilam_20200618T000547_V202006221044C_0066181001_dark.fits'
dark,dark_scale = phi_correct_dark(dark_file,files[0],0,0,verbose = False,get_dark = True)
# dark, _ = fits_get(dark_file)
# scaling_dark = fits_get(dark_file,scaling = True)
# scaling_flat = fits_get(files[0],scaling = True)
# dark = dark * scaling_flat / scaling_dark
wavelength = 0
npol = 0
allgain = []
norma = np.zeros((24))
for wavelength in range(6):
for npol in range(4):
print(wavelength,npol,'................')
gain, norma_out = fdt_flat(files, wavelength, npol, method = 'kll', dark = dark,read_shits = False,
shifts_file = 'shifts/shifts', correct_ghost = 0 , expand = 10, normalize = 0,
iter = 3,verbose=True)#,steps = -1)
#steps = 20)
# gain, norma_out = fdt_flat(files,wavelength,npol,'kll',dark=dark,read_shits = False, shifts_file = ' '
# correct_ghost=0 , expand = 10, normalize=0,thrd = 0.2, iter = 3, method = 'kll',verbose=0)
allgain.append(gain)
norma[wavelength*4+npol] = norma_out
hdu_list = pyfits.open(files[0])
hdu_list[0].data = allgain
hdu_list.writeto('flats.fits', clobber=True)
|
StarcoderdataPython
|
228979
|
from collections import Counter
from tool.runners.python import SubmissionPy
class ThoreSubmission(SubmissionPy):
def run(self, s):
"""
:param s: input in string format
:return: solution flag
"""
N_DAYS = 100
black_hexs = self.parse_start_grid(s)
for _ in range(N_DAYS):
n_neighbors = Counter(
neighbor
for hex_coord in black_hexs
for neighbor in self.get_neighbors(*hex_coord)
)
black_hexs = {
hex_coord
for hex_coord, n in n_neighbors.items()
if (hex_coord not in black_hexs and n == 2)
or (hex_coord in black_hexs and n in [1, 2])
}
return len(black_hexs)
@staticmethod
def get_neighbors(x, y):
yield x + 2, y
yield x - 2, y
yield x + 1, y + 1
yield x + 1, y - 1
yield x - 1, y + 1
yield x - 1, y - 1
@classmethod
def parse_start_grid(cls, s):
black_hexs = set()
for line in s.splitlines():
hex_coord = cls.parse_line(line)
if hex_coord in black_hexs:
black_hexs.remove(hex_coord)
else:
black_hexs.add(hex_coord)
return black_hexs
@staticmethod
def parse_line(line):
return (
2 * line.count("w")
- line.count("nw")
- line.count("sw")
- 2 * line.count("e")
+ line.count("ne")
+ line.count("se"),
line.count("s") - line.count("n"),
)
def test_thore():
"""
Run `python -m pytest ./day-24/part-2/thore.py` to test the submission.
"""
assert (
ThoreSubmission().run(
"""sesenwnenenewseeswwswswwnenewsewsw
neeenesenwnwwswnenewnwwsewnenwseswesw
seswneswswsenwwnwse
nwnwneseeswswnenewneswwnewseswneseene
swweswneswnenwsewnwneneseenw
eesenwseswswnenwswnwnwsewwnwsene
sewnenenenesenwsewnenwwwse
wenwwweseeeweswwwnwwe
wsweesenenewnwwnwsenewsenwwsesesenwne
neeswseenwwswnwswswnw
nenwswwsewswnenenewsenwsenwnesesenew
enewnwewneswsewnwswenweswnenwsenwsw
sweneswneswneneenwnewenewwneswswnese
swwesenesewenwneswnwwneseswwne
enesenwswwswneneswsenwnewswseenwsese
wnwnesenesenenwwnenwsewesewsesesew
nenewswnwewswnenesenwnesewesw
eneswnwswnwsenenwnwnwwseeswneewsenese
neswnwewnwnwseenwseesewsenwsweewe
wseweeenwnesenwwwswnew
""".strip()
)
== 2208
)
|
StarcoderdataPython
|
252897
|
from org.sfu.billing.utils.configurations import SparkConfig
from org.sfu.billing.utils.dataLayer import dataLoader
from org.sfu.billing.devices.cdr import CallDetailRecord
from pyspark.sql import functions
from pyspark.sql.functions import split
class Controller:
"""
Controller class is used to control lifecycle of entire application. It invokes all modules in logical sequence.
Execution pipeline is Mediation module, Rating Module and Persistent module
"""
spark_config = SparkConfig()
#This method will start streaming of events based on registered topic in kafka
def stream_rawCdr(self):
events = self.spark_config.get_events()
return events
# This method will determine device type and return its object
# Default or implemented case as of now is of call detail record
# There can be other iot devices as well which can be rated
def device_type(self,events):
return CallDetailRecord()
#def process_row(self,row):
# # Write row to storage
# print("cdr: ", row)
# pass
# Main method which includes logic of Lifecycle of application
# 1.Starts Streaming process
# 2.Detect type of device
# 3.Map raw events into structured dataframe
# 4.Invoke Mediation process
# 5.Invoke Rating process
# 6.Check data in hdfs
# 7.Persist data on configured database
def process(self):
events = self.stream_rawCdr()
cdr = self.device_type(events)
mapped_df = cdr.map(events)
dl = dataLoader()
normalized_frame = cdr.invoke_mediation(mapped_df)
rated_frame = cdr.invoke_rating(normalized_frame)
stream = rated_frame.writeStream.foreachBatch(dl.save_batch).start()
#stream = normalized_frame.writeStream.foreachBatch(configurations.save_batch).start()
#stream = normalized_frame.writeStream.outputMode("append").format("console").start()
self.spark_config.stopStreaming(stream)
pass
|
StarcoderdataPython
|
34440
|
<reponame>BLSQ/iaso-copy<gh_stars>10-100
import re
from django.db.models import Q, Count, Sum, Case, When, IntegerField, Value
from iaso.models import OrgUnit, Instance, DataSource
def build_org_units_queryset(queryset, params, profile):
validation_status = params.get("validation_status", OrgUnit.VALIDATION_VALID)
has_instances = params.get("hasInstances", None)
date_from = params.get("dateFrom", None)
date_to = params.get("dateTo", None)
search = params.get("search", None)
org_unit_type_id = params.get("orgUnitTypeId", None)
source_id = params.get("sourceId", None)
with_shape = params.get("withShape", None)
with_location = params.get("withLocation", None)
geography = params.get("geography", None)
parent_id = params.get("parent_id", None)
source = params.get("source", None)
group = params.get("group", None)
version = params.get("version", None)
default_version = params.get("defaultVersion", None)
org_unit_parent_id = params.get("orgUnitParentId", None)
linked_to = params.get("linkedTo", None)
link_validated = params.get("linkValidated", True)
link_source = params.get("linkSource", None)
link_version = params.get("linkVersion", None)
roots_for_user = params.get("rootsForUser", None)
ignore_empty_names = params.get("ignoreEmptyNames", False)
org_unit_type_category = params.get("orgUnitTypeCategory", None)
if validation_status != "all":
queryset = queryset.filter(validation_status=validation_status)
if search:
if search.startswith("ids:"):
s = search.replace("ids:", "")
try:
ids = re.findall("[A-Za-z0-9_-]+", s)
queryset = queryset.filter(id__in=ids)
except:
queryset = queryset.filter(id__in=[])
print("Failed parsing ids in search", search)
elif search.startswith("refs:"):
s = search.replace("refs:", "")
try:
refs = re.findall("[A-Za-z0-9_-]+", s)
queryset = queryset.filter(source_ref__in=refs)
except:
queryset = queryset.filter(source_ref__in=[])
print("Failed parsing refs in search", search)
else:
queryset = queryset.filter(Q(name__icontains=search) | Q(aliases__contains=[search]))
if group:
queryset = queryset.filter(groups__in=group.split(","))
if source:
source = DataSource.objects.get(id=source)
if source.default_version:
queryset = queryset.filter(version=source.default_version)
else:
queryset = queryset.filter(version__data_source_id=source)
if version:
queryset = queryset.filter(version=version)
if default_version == "true" and profile is not None:
queryset = queryset.filter(version=profile.account.default_version)
if date_from is not None and date_to is None:
queryset = queryset.filter(instance__created_at__gte=date_from)
if date_from is None and date_to is not None:
queryset = queryset.filter(instance__created_at__lte=date_to)
if date_from is not None and date_to is not None:
queryset = queryset.filter(instance__created_at__range=[date_from, date_to])
if has_instances is not None:
if has_instances == "true":
ids_with_instances = (
Instance.objects.filter(org_unit__isnull=False)
.exclude(file="")
.exclude(deleted=True)
.values_list("org_unit_id", flat=True)
)
queryset = queryset.filter(id__in=ids_with_instances)
if has_instances == "false":
ids_with_instances = (
Instance.objects.filter(org_unit__isnull=False)
.exclude(file="")
.exclude(deleted=True)
.values_list("org_unit_id", flat=True)
)
queryset = queryset.exclude(id__in=ids_with_instances)
if has_instances == "duplicates":
ids_with_duplicate_instances = (
Instance.objects.with_status()
.filter(org_unit__isnull=False, status=Instance.STATUS_DUPLICATED)
.exclude(file="")
.exclude(deleted=True)
.values_list("org_unit_id", flat=True)
)
queryset = queryset.filter(id__in=ids_with_duplicate_instances)
if org_unit_type_id:
queryset = queryset.filter(org_unit_type__id__in=org_unit_type_id.split(","))
if geography == "location":
queryset = queryset.filter(location__isnull=False)
if geography == "shape":
queryset = queryset.filter(simplified_geom__isnull=False)
if geography == "none":
queryset = queryset.filter(Q(location__isnull=True) & Q(simplified_geom__isnull=True))
if geography == "any":
queryset = queryset.filter(Q(location__isnull=False) | Q(simplified_geom__isnull=False))
if with_shape == "true":
queryset = queryset.filter(simplified_geom__isnull=False)
if with_shape == "false":
queryset = queryset.filter(simplified_geom__isnull=True)
if with_location == "true":
queryset = queryset.filter(Q(location__isnull=False))
if with_location == "false":
queryset = queryset.filter(Q(location__isnull=True))
if parent_id:
if parent_id == "0":
queryset = queryset.filter(parent__isnull=True)
else:
queryset = queryset.filter(parent__id=parent_id)
if roots_for_user:
if profile.org_units.all():
queryset = queryset.filter(id__in=profile.org_units.all())
else:
queryset = queryset.filter(parent__isnull=True)
if org_unit_parent_id:
parent = OrgUnit.objects.get(id=org_unit_parent_id)
queryset = queryset.hierarchy(parent)
if linked_to:
is_destination = Q(destination_set__destination_id=linked_to)
if link_validated != "all":
is_destination &= Q(destination_set__validated=link_validated)
is_source = Q(source_set__source_id=linked_to)
if link_validated != "all":
is_source &= Q(source_set__validated=link_validated)
queryset = queryset.filter(is_source | is_destination)
if link_source:
queryset = queryset.filter(version__data_source_id=link_source)
if link_version:
queryset = queryset.filter(version_id=link_version)
if source_id:
queryset = queryset.filter(sub_source=source_id)
if org_unit_type_category:
queryset = queryset.filter(org_unit_type__category=org_unit_type_category.upper())
if ignore_empty_names:
queryset = queryset.filter(~Q(name=""))
queryset = queryset.select_related("version__data_source")
queryset = queryset.select_related("org_unit_type")
queryset = queryset.prefetch_related("groups")
queryset = queryset.prefetch_related("parent")
queryset = queryset.prefetch_related("parent__parent")
queryset = queryset.prefetch_related("parent__parent__parent")
queryset = queryset.prefetch_related("parent__parent__parent__parent")
return queryset.distinct()
def annotate_query(queryset, count_instances, count_per_form, forms):
if count_instances:
queryset = queryset.annotate(
instances_count=Count(
"instance",
filter=(~Q(instance__file="") & ~Q(instance__device__test_device=True) & ~Q(instance__deleted=True)),
)
)
if count_per_form:
annotations = {
f"form_{frm.id}_instances": Sum(
Case(
When(
Q(instance__form_id=frm.id)
& ~Q(instance__file="")
& ~Q(instance__device__test_device=True)
& ~Q(instance__deleted=True),
then=1,
),
default=0,
output_field=IntegerField(),
)
)
for frm in forms
}
queryset = queryset.annotate(**annotations)
return queryset
|
StarcoderdataPython
|
11288399
|
import os
from tabnanny import check
import yaml
import sys
import pandas as pd
import numpy as np
config = yaml.load(open(os.path.join(os.path.dirname(__file__),'config.yaml')), yaml.FullLoader)
SAMPLE_NUM = config['sample_number']
def refractorAppComputeInfo(computeInfoFile):
infoFile = open(computeInfoFile, "r")
computeInfo = infoFile.readlines()[1:]
prevName = None
appInfo = {}
chainLen = 0
for line in computeInfo:
splitted = line.rstrip().split(",")
appName = splitted[0]
mem = splitted[2]
execTime = splitted[3]
if appName != prevName:
chainLen = 0
appInfo[appName] = []
prevName = appName
chainLen += 1
appInfo[appName].append([mem, execTime])
return appInfo
# exec time: ms scale
# specify execTime in update phase
def actionWskGen(chainList, th):
for key, val in chainList.items():
appName = key
sequenceID = key[3:]
functionID = 0
funcChainStr = ""
for info in val:
mem = info[0]
if th == None:
execTime = info[1]
else:
execTime = th
if int(mem) < 128:
mem = "128"
if int(mem) > 512:
mem = "512"
# cmd = "./action_update.sh %s %s %s %s" % (sequenceID, functionID, execTime, mem)
cmd = "./action_update.sh %s %s %s %s" % (str(sequenceID).zfill(3), str(functionID).zfill(3), execTime, mem)
print(cmd)
r = os.popen(cmd)
r.read()
funcName = "func%s-%s" % (str(sequenceID).zfill(3), str(functionID).zfill(3))
funcChainStr = funcChainStr + funcName + ","
functionID += 1
funcChainStr = funcChainStr[:-1]
cmd = "wsk -i action update %s --sequence %s" % (appName, funcChainStr)
print(cmd)
r = os.popen(cmd)
r.read()
# func------- means the end of benchmark
cmd = "./action_update.sh %s %s %s %s" % ("---", "---", 1, 128)
r = os.popen(cmd)
r.read()
print("Workload creation complete")
def checkThOK(path, th):
mapFile = pd.read_csv(path)
newExecTime = mapFile["functionsPerApp"].multiply(th)
return np.all(mapFile["IAT"].astype(int) > newExecTime)
if __name__ == '__main__':
th = None
argument = sys.argv
del argument[0]
if len(argument) == 1:
# run successful workload
if "_" in argument[0]:
workloadDir = "../CSVs/success/%s" % argument[0]
else: # run new workload with same function runtime
workloadDir = "../CSVs/%i" % SAMPLE_NUM
th = int(argument[0])
elif len(argument) == 2: # run successful workload with same function runtime
workloadDir = "../CSVs/success/%s" % argument[0]
mapInfoFile = "%s/appandIATMap.csv" % workloadDir
th = int(argument[1])
isFine = checkThOK(mapInfoFile, th)
if isFine == False:
print("Threshold is not fit!")
sys.exit(1)
else:
workloadDir = "../CSVs/%i" % SAMPLE_NUM
computeInfoFile = "%s/appComputeInfo.csv" % workloadDir
seqInfo = refractorAppComputeInfo(computeInfoFile)
actionWskGen(seqInfo, th)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.