id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
1757676 | <gh_stars>0
import time
import datetime
from pyoddgen.generators.basegen import BaseGenerator
from pyoddgen.config import ObjectDetectionConfiguration
from pyoddgen.tools.distribution import Distribution
from pyoddgen.datastructures.objectdetectionrecord import ObjectDetectionDataRecord
class ObjectDetectionGenerator(BaseGenerator):
def __init__(self, config, project_dir):
if not isinstance(config, ObjectDetectionConfiguration):
raise Exception("Configuration for generator must be of type '" + str(ObjectDetectionConfiguration) + "'! "
"Got '" + str(type(config)) + "' instead. ")
super(ObjectDetectionGenerator, self).__init__(config, project_dir)
self.background_distribution = Distribution(config.fields["background_distribution_method"])
self.class_distribution = Distribution(config.fields["class_distribution_method"])
self.position_distribution = Distribution(config.fields["position_distribution_method"])
self.paste_no_distribution = Distribution(config.fields["number_of_pastes_per_background_distribution_method"])
def get_log_header(self):
return ["Time"] + [ObjectDetectionDataRecord.mandatory_fields[i] for i in range(len(ObjectDetectionDataRecord.mandatory_fields))]
def log_generated_data(self, generated_data_record):
if not isinstance(generated_data_record, ObjectDetectionDataRecord):
raise Exception("Generated data record to log must be of type '" + str(ObjectDetectionDataRecord) + "'!")
ret, msg = generated_data_record.check_validity()
if not ret:
raise Exception("Generated data record is not valid: " + msg)
log_time = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
rows = [[log_time] + [generated_data_record.data_dict[ObjectDetectionDataRecord.mandatory_fields[i]] for i in range(len(ObjectDetectionDataRecord.mandatory_fields))]]
self.csv_log_file.write_rows(rows)
def generate_next(self):
return ObjectDetectionDataRecord()
def select_background(self):
pass
def select_classes(self):
pass
def select_positions(self, background_dims, num_classes):
pos_x_randoms = []
pos_y_randoms = []
bck_x, bck_y = background_dims
dist_cls_x, dist_cls_y = self.config.distance_between_pastes
num_of_retries = 10
while len(pos_x_randoms) < num_classes:
for i in range(num_of_retries):
next_x = int(self.background_distribution.next(calc_distance=False)*bck_x)
next_y = int(self.background_distribution.next(calc_distance=False)*bck_y)
valid = True
for j in range(len(pos_x_randoms)):
valid = ((pos_x_randoms[j] - next_x) >= dist_cls_x) and ((pos_y_randoms[j] - next_y) >= dist_cls_y)
if not valid:
break
if valid or i == num_of_retries - 1:
pos_x_randoms.append(next_x)
pos_y_randoms.append(next_y)
return zip(pos_x_randoms, pos_y_randoms)
def apply_modifications_to_background(self, background):
pass
def apply_modifications_to_classes(self, classes):
pass
| StarcoderdataPython |
110366 | # -*- coding: UTF-8 -*-
# Copyright 2015 <NAME>
# License: BSD (see file COPYING for details)
from __future__ import unicode_literals
__author__ = 'drx'
def populate(p):
p.city("Giza", "الجيزة", "")
p.city("Al Haram", "الهرم", "")
p.city("King Faisel", "الملك فيصل", "")
p.city("Memphis", "ممفيس", "")
p.city("6th of October", "السادس من اكتوبر", "")
p.city("Sheikh Zayed", "الشيخ زايد", "")
p.city("Others", "اخرى", "") | StarcoderdataPython |
1748528 | <gh_stars>0
#
# (C) Copyright 2013 Enthought, Inc., Austin, TX
# All right reserved.
#
# This file is open source software distributed according to the terms in
# LICENSE.txt
#
from numpy import zeros
# Enthought library imports.
from traits.api import Any, Bool, Float, Instance, Property, Tuple
# Local relative imports
from image_plot import ImagePlot
from abstract_colormap import AbstractColormap
from speedups import apply_selection_fade
class CMapImagePlot(ImagePlot):
""" Colormapped image plot. Takes a value data object whose elements are
scalars, and renders them as a colormapped image.
"""
# TODO: Modify ImageData to explicitly support scalar value arrays
#------------------------------------------------------------------------
# Data-related traits
#------------------------------------------------------------------------
# Maps from scalar data values in self.data.value to color tuples
value_mapper = Instance(AbstractColormap)
# Convenience property for value_mapper as color_mapper
color_mapper = Property
# Convenience property for accessing the data range of the mapper.
value_range = Property
# alpha value to use to fade out unselected data points when there is an
# active selection
fade_alpha = Float(0.3)
#fade_background = Tuple((255,255,255))
# RGB color to use to fade out unselected points.
fade_background = Tuple((0,0,0))
# whether to pre-compute the full colormapped RGB(A) image
cache_full_map = Bool(True)
#------------------------------------------------------------------------
# Private Traits
#------------------------------------------------------------------------
# Is the mapped image valid?
_mapped_image_cache_valid = Bool(False)
# Cache of the fully mapped RGB(A) image.
_cached_mapped_image = Any
#------------------------------------------------------------------------
# Public methods
#------------------------------------------------------------------------
def __init__(self, **kwargs):
super(CMapImagePlot, self).__init__(**kwargs)
if self.value_mapper:
self.value_mapper.on_trait_change(self._update_value_mapper,
"updated")
if self.value:
self.value.on_trait_change(self._update_selections,
"metadata_changed")
def set_value_selection(self, val):
""" Sets a range of values in the value data source as selected.
"""
if val is not None:
low, high = val
data = self.value.get_data()
new_mask = (data>=low) & (data<=high)
self.value.metadata["selection_masks"] = [new_mask]
else:
del self.value.metadata["selection_masks"]
self._update_selections()
#------------------------------------------------------------------------
# Base2DPlot interface
#------------------------------------------------------------------------
def _render(self, gc):
""" Ensures that the cached image is valid.
Called before _render() is called. Implements the Base2DPlot interface.
"""
if not self._mapped_image_cache_valid:
if 'selection_masks' in self.value.metadata:
self._compute_cached_image(self.value.metadata['selection_masks'])
else:
self._compute_cached_image()
ImagePlot._render(self, gc)
#------------------------------------------------------------------------
# Private methods
#------------------------------------------------------------------------
def _cmap_values(self, data, selection_masks=None):
""" Maps the data to RGB(A) with optional selection masks overlayed
"""
# get the RGBA values from the color map as uint8
mapped_image = self.value_mapper.map_uint8(data)
if selection_masks is not None:
# construct a composite mask
if len(selection_masks) > 0:
mask = zeros(mapped_image.shape[:2], dtype=bool)
for m in selection_masks:
mask = mask | m
else:
mask = zeros(self._cached_mapped_image.shape[:2], dtype=bool)
# Apply the selection fade, from speedups.py
apply_selection_fade(mapped_image, mask,
self.fade_alpha, self.fade_background)
return mapped_image
def _compute_cached_image(self, selection_masks=None):
""" Updates the cached image.
"""
if self.cache_full_map:
if not self._mapped_image_cache_valid:
self._cached_mapped_image = self._cmap_values(self.value.data,
selection_masks)
self._mapped_image_cache_valid = True
mapped_value = self._cached_mapped_image
ImagePlot._compute_cached_image(self, mapped_value)
else:
self._mapped_image_cache_valid = True
ImagePlot._compute_cached_image(self, self.value.data, mapper=lambda data:
self._cmap_values(data))
def _update_value_mapper(self):
self._mapped_image_cache_valid = False
self._image_cache_valid = False
self.invalidate_draw()
def _update_selections(self):
self._mapped_image_cache_valid = False
self._image_cache_valid = False
self.invalidate_draw()
#------------------------------------------------------------------------
# Properties
#------------------------------------------------------------------------
def _get_value_range(self):
return self.value_mapper.range
def _set_value_range(self, val):
self.value_mapper.range = val
def _get_color_mapper(self):
return self.value_mapper
def _set_color_mapper(self, val):
self.value_mapper = val
#------------------------------------------------------------------------
# Event handlers
#------------------------------------------------------------------------
def _value_mapper_changed(self, old, new):
if old is not None:
old.on_trait_change(self._update_value_mapper,
"updated", remove=True)
if new is not None:
new.on_trait_change(self._update_value_mapper, "updated")
if old and new:
if new.range is None and old.range is not None:
new.range = old.range
self._update_value_mapper()
def _value_data_changed_fired(self):
super(CMapImagePlot, self)._value_data_changed_fired()
self._mapped_image_cache_valid = False
return
def _index_data_changed_fired(self):
super(CMapImagePlot, self)._index_data_changed_fired()
self._mapped_image_cache_valid = False
return
def _cache_full_map_changed(self):
self._mapped_image_cache_valid = False
| StarcoderdataPython |
1667143 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 24 05:55:59 2021
@author: jeremiasendrinajr
"""
import streamlit as st
#write a multi app object-oriented-program
class MultiApp:
def __init__(self):
self.apps = []
#a function to create a page
def add_app(self, title, func):
self.apps.append({
"title": title,
"function": func
})
#a function to create a sidebar in the main page
def run(self):
app = st.sidebar.radio(
'Click the page to display:',
self.apps,
format_func=lambda app: app['title'])
app['function']() | StarcoderdataPython |
99412 | # -*- coding: utf-8 -*-
"""Provides ``get_redis_client``, a redis client factory that can be used
directly, or in contect of a Pyramid application as a request method.
"""
__all__ = [
'GetRedisClient',
'get_redis_client'
]
import logging
logger = logging.getLogger(__name__)
from pyramid_redis import DEFAULT_SETTINGS
from pyramid_redis.hooks import RedisFactory
class GetRedisClient(object):
def __init__(self, **kwargs):
self.factory = kwargs.get('factory', RedisFactory())
self.settings = kwargs.get('settings', DEFAULT_SETTINGS)
def __call__(self, request=None):
if request is None:
registry = None
settings = self.settings
else:
registry = request.registry
settings = registry.settings
return self.factory(settings, registry=registry)
get_redis_client = GetRedisClient()
| StarcoderdataPython |
3240180 | <reponame>doubleblind148/IGCCF<filename>tests/datasets/implemented_datasets/test_lastfm.py
#!/usr/bin/env python
__author__ = "XXX"
__email__ = "XXX"
import pytest
import pandas as pd
def test_load():
from datasets.implemented_datasets.lastfm import LastFM
dataset = LastFM()
dataset.download()
dataset = dataset.load()
assert isinstance(dataset, pd.DataFrame) | StarcoderdataPython |
84623 | from typing import Optional, MutableMapping, List, Union
from datetime import datetime, timedelta
from fastapi.security import OAuth2PasswordBearer
from passlib.context import CryptContext
from sqlalchemy.orm.session import Session
from jose import jwt
from app.models import User
from app.config import settings
JWTPayloadMapping = MutableMapping[
str, Union[datetime, bool, str, List[str], List[int]]
]
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
def verify_password(plain_password: str, hashed_password: str) -> bool:
return pwd_context.verify(plain_password, hashed_password)
def get_password_hash(password: str) -> str:
return pwd_context.hash(password)
def authenticate(*, email: str, password: str, db: Session,) -> Optional[User]:
user = db.query(User).filter(User.email == email).first()
if not user:
return None
if not verify_password(password, user.hashed_password):
return None
return user
def get_user_jwt_payload(user: User,) -> JWTPayloadMapping:
payload: JWTPayloadMapping = {}
payload["aud"] = [settings.DOMAIN]
return payload
def create_access_token(sub: str, payload: JWTPayloadMapping,) -> str:
return _create_token(
token_type="access_token",
lifetime=timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES),
sub=sub,
payload=payload,
)
def _create_token(
token_type: str,
lifetime: timedelta,
sub: str,
payload: Optional[JWTPayloadMapping] = None,
) -> str:
payload = payload if payload else {}
expire = datetime.utcnow() + lifetime
payload["type"] = token_type
payload["exp"] = expire
payload["iat"] = datetime.utcnow()
payload["sub"] = str(sub)
return jwt.encode(payload, settings.JWT_SECRET, algorithm=settings.ALGORITHM)
| StarcoderdataPython |
1733069 | #/usr/bin/env python3
def is_chinese(uchar):
"""判断一个unicode是否是汉字"""
if uchar >= '\u4e00' and uchar <= '\u9fa5':
return True
else:
return False
def is_number(uchar):
"""判断一个unicode是否是数字"""
if uchar >= '\u0030' and uchar <= '\u0039':
return True
else:
return False
def is_alphabet(uchar):
"""判断一个unicode是否是英文字母"""
if (uchar >= '\u0041' and uchar <= '\u005a') \
or (uchar >= '\u0061' and uchar <= '\u007a'):
return True
else:
return False
def is_other(uchar):
"""判断是否非汉字,数字和英文字符"""
if not (is_chinese(uchar) or is_number(uchar) or is_alphabet(uchar)):
return True
else:
return False
def B2Q(uchar):
"""半角转全角"""
inside_code = ord(uchar)
if inside_code < 0x0020 or inside_code > 0x7e:
# 不是半角字符就返回原来的字符
return uchar
if inside_code == 0x0020: # 除了空格其他的全角半角的公式为:半角=全角-0xfee0
inside_code = 0x3000
else:
inside_code += 0xfee0
return unichr(inside_code)
def Q2B(uchar):
"""全角转半角"""
inside_code = ord(uchar)
if inside_code == 0x3000:
inside_code = 0x0020
else:
inside_code -= 0xfee0
if inside_code < 0x0020 or inside_code > 0x7e:
# 转完之后不是半角字符返回原来的字符
return uchar
return unichr(inside_code)
def stringQ2B(ustring):
"""把字符串全角转半角"""
return "".join([Q2B(uchar) for uchar in ustring])
def uniform(ustring):
"""格式化字符串,完成全角转半角,大写转小写的工作"""
return stringQ2B(ustring).lower()
| StarcoderdataPython |
98073 | <gh_stars>0
# Importing Libraries
from requests import get
print('[Scripts.zAllSBStats] - Imported requests.get')
from pathlib import Path
print('[Scripts.zAllSBStats] - Imported pathlib.Path')
import os
print('[Scripts.zAllSBStats] - Imported os')
from dotenv import load_dotenv
print('[Scripts.zAllSBStats] - Imported dotenv.load_dotenv')
import zNumberFormat
print('[Scripts.zAllSBStats] - Imported Scripts.zNumberFormat')
import zSBStalk
print('[Scripts.zAllSBStats] - Imported Scripts.zSBStalk')
# Loading Data From .env File
load_dotenv()
env_path = Path('.') / '.env'
api_key = os.getenv("API_KEY")
print('[Scripts.zAllSBStats] - Read API_Key from ENV')
# Getting UUID Using Mojang API
def getUUID(username):
try:
playerdata_mojang = get("https://api.mojang.com/users/profiles/minecraft/%s" % (username)).json()
print('[Scripts.zAllSBStats.getUUID] - Mojang API Response Positive')
uuid = playerdata_mojang["id"]
print('[Scripts.zAllSBStats.getUUID] - Returning UUID')
return uuid
except:
print('[Scripts.zAllSBStats.getUUID] - Error, Can\'t Return UUID, Exiting')
return 'no'
# print('[Scripts.zAllSBStats] - ')
def getLatestProfile(username):
print('[Scripts.zAllSBStats.getLatestProfile] - Calling Scripts.zSBStalk.getLatestProfile')
print('[Scripts.zAllSBStats.getLatestProfile] - Returning Profile')
return zSBStalk.getLatestProfile(getUUID(username))
def getStats(username):
print('[Scripts.zAllSBStats.getStats] - Getting Profile Data')
latestProfile, latestProfileID = getLatestProfile(username)
print('[Scripts.zAllSBStats.getStats] - Received Profile Data')
data = get('https://sky.shiiyu.moe/api/v2/profile/%s' % (username)).json()['profiles'][latestProfileID]['data']['stats']
print('[Scripts.zAllSBStats.getStats] - Receiving Data from API with username: %s' % (username))
print('[Scripts.zAllSBStats.getStats] - Parsing JSON File')
health = zNumberFormat.comma(data['health'])
defence = zNumberFormat.comma(data['defense'])
effective_health = zNumberFormat.comma(data['effective_health'])
strength = zNumberFormat.comma(data['strength'])
speed = zNumberFormat.comma(data['speed'])
intelligence = zNumberFormat.comma(data['intelligence'])
sea_creature_chance = zNumberFormat.comma(data['sea_creature_chance'])
magic_find = zNumberFormat.comma(data['magic_find'])
pet_luck = zNumberFormat.comma(data['pet_luck'])
ferocity = zNumberFormat.comma(data['ferocity'])
ability_damage = zNumberFormat.comma(data['ability_damage'])
mining_speed = zNumberFormat.comma(data['mining_speed'])
mining_fortune = zNumberFormat.comma(data['mining_fortune'])
farming_fortune = zNumberFormat.comma(data['farming_fortune'])
foraging_fortune = zNumberFormat.comma(data['foraging_fortune'])
print('[Scripts.zAllSBStats.getStats] - JSON File Parsed, Values Stored')
talismans = get('https://sky.shiiyu.moe/api/v2/profile/%s' % (username)).json()['profiles'][latestProfileID]['data']['talismanCount']
print('[Scripts.zAllSBStats.getStats] - Receiving Talisman Data')
print('[Scripts.zAllSBStats.getStats] - Returning All Data')
return [health, defence, effective_health, strength, speed, intelligence, sea_creature_chance, magic_find, pet_luck, ferocity, ability_damage, mining_speed, mining_fortune, farming_fortune, foraging_fortune]
def getFairySouls(username):
print('[Scripts.zAllSBStats.getFairySouls] - Getting Profile Data')
latestProfile, latestProfileID = getLatestProfile(username)
print('[Scripts.zAllSBStats.getFairySouls] - Received Profile Data')
data = get('https://sky.shiiyu.moe/api/v2/profile/%s' % (username)).json()['profiles'][latestProfileID]['data']['fairy_souls']
print('[Scripts.zAllSBStats.getFairySouls] - Receiving Fairy Souls Data')
print('[Scripts.zAllSBStats.getFairySouls] - Parsing Data')
collected = data['collected']
total = data['total']
progress = round(data['progress'] * 100, 2)
print('[Scripts.zAllSBStats.getFairySouls] - Data Parsing Complete')
print('[Scripts.zAllSBStats.getFairySouls] - Returning Values')
return [collected, total, progress]
def maincommand(username):
stats = getStats(username)
fairy_souls = getFairySouls(username)
return stats + fairy_souls
maincommand('nottcurious') | StarcoderdataPython |
3285278 | # coding=utf-8
# Copyright 2022 DataLab Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import datalabs
from datalabs import get_task, TaskType
logger = datalabs.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{emerson-2005-second,
title = "The Second International {C}hinese Word Segmentation Bakeoff",
author = "<NAME>",
booktitle = "Proceedings of the Fourth {SIGHAN} Workshop on {C}hinese Language Processing",
year = "2005",
url = "https://aclanthology.org/I05-3017",
}
"""
_DESCRIPTION = """\
The Simplified Chinese corpora were provided by Beijing University and Microsoft Research Beijing.
"""
_HOMEPAGE = "http://sighan.cs.uchicago.edu/bakeoff2005/"
_LICENSE = "Available for research use"
_URL = "https://datalab-hub.s3.amazonaws.com/msr.zip"
class MSR(datalabs.GeneratorBasedBuilder):
VERSION = datalabs.Version("1.0.0")
def _info(self):
return datalabs.DatasetInfo(
description=_DESCRIPTION,
features=datalabs.Features(
{
"id": datalabs.Value("string"),
"tokens": datalabs.Sequence(datalabs.Value("string")),
"tags": datalabs.Sequence(
datalabs.features.ClassLabel(
names=["B","M","E","S"]
)
),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE,
languages=['zh'],
version=self.VERSION,
task_templates=[get_task(TaskType.word_segmentation)(
tokens_column="tokens",
tags_column="tags")],
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_dir = dl_manager.download_and_extract(_URL)
return [
datalabs.SplitGenerator(
name=datalabs.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(data_dir + "/msr/", "train-msr.tsv"),
},
),
datalabs.SplitGenerator(
name=datalabs.Split.TEST,
gen_kwargs={
"filepath": os.path.join(data_dir + "/msr/", "test-msr.tsv"),
},
),
datalabs.SplitGenerator(
name=datalabs.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(data_dir + "/msr/", "dev-msr.tsv"),
},
),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
current_tokens = []
current_labels = []
sentence_counter = 0
for row in f:
row = row.rstrip()
if row:
token, label = row.split("\t")
current_tokens.append(token)
current_labels.append(label)
else:
# New sentence
if not current_tokens:
# Consecutive empty lines will cause empty sentences
continue
assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
sentence = (
sentence_counter,
{
"id": str(sentence_counter),
"tokens": current_tokens,
"tags": current_labels,
},
)
sentence_counter += 1
current_tokens = []
current_labels = []
yield sentence
# Don't forget last sentence in dataset 🧐
if current_tokens:
yield sentence_counter, {
"id": str(sentence_counter),
"tokens": current_tokens,
"tags": current_labels,
}
| StarcoderdataPython |
24643 | #!/usr/bin/python3
import os
import click
import sys
import csv
import time
import pandas as pd
import country_converter as coco
import hashlib
import phonenumbers
from tqdm import tqdm
from uszipcode import SearchEngine
HEADER_TRANSLATIONS = {
"email1": "Email",
"phone1": "Phone",
"person_country": "Country",
}
REQUIRED_HEADERS = {"<NAME>", "<NAME>", "Phone", "Email", "Country", "Zip"}
OPTIONAL_HEADERS = set() # TODO: Add optional headers that can be uploaded.
# All headers that can be in a Customer Match CSV.
ALL_HEADERS = REQUIRED_HEADERS.union(OPTIONAL_HEADERS)
DO_NOT_HASH = {"Country", "Zip"}
# ANSI codes to color/format terminal prints.
ANSI = {
"YELLOW": "\u001b[33m",
"RED": "\u001b[31m",
"CYAN": "\u001b[36m",
"BOLD": "\u001b[1m",
"RESET": "\u001b[0m",
}
class Error(ValueError):
"""Base class for other custom exceptions"""
pass
class FormatError(Error):
"""Raised when a file is not in the correct format."""
pass
class NoZipError(FormatError):
"""Raised when a zip code is not found in a spreadsheet. Sometimes recoverable."""
pass
# ==========================
# Formatted console prints
# ==========================
def warn(message: str):
tqdm.write(f"{ANSI['BOLD'] + ANSI['YELLOW']}WARNING:{ANSI['RESET']} {message}")
def notify(message: str):
tqdm.write(f"{ANSI['BOLD'] + ANSI['CYAN']}INFO:{ANSI['RESET']} {message}")
def check_path(filepath: str):
"""Checks that the path to a file exists. To check if a path to the file and the file itself exists,
use check_csv
Args:
filepath (str): The path to the file
Raises:
ValueError: If the path to the file does not exist
"""
path = os.path.dirname(filepath)
if path.strip() and not os.path.exists(path):
raise ValueError(f"The path {path} does not exist.")
def check_csv(filepath: str) -> csv.Dialect:
"""Runs checks on a CSV file, such as whether it exists and if it can be parsed, and returns
its dialect object
Args:
filepath (str): Path to the CSV file
Raises:
ValueError: If the path does not exist, or the file cannot be read as a CSV
Returns:
csv.Dialect: Parsed CSV dialect from the file
"""
# Check that the file exists, and is a file.
basename = os.path.basename(filepath)
if not os.path.exists(filepath):
raise ValueError(f"The path {filepath} does not exist.")
if not os.path.isfile(filepath):
raise ValueError(f"{basename} is not a file.")
# Try to open the file and verify it can be read as a CSV.
try:
file = open(filepath, encoding="utf8")
dialect = csv.Sniffer().sniff(file.read(100000))
file.seek(0)
file.close()
return dialect
except csv.Error as e:
raise ValueError(
f"Could not get a CSV dialect for file {basename}. Is it a CSV file? Is it maybe too large?"
)
def parse_google_fields(filepath: str, ignore_zip: bool = False) -> dict:
"""Parse the header of the CSV to get the Google field names.
Args:
filepath (str): Path to the CSV file.
ignore_zip (bool): Flag to ignore the zip code column, and not throw an error if it is missing.
Raises:
ValueError: If not all required headers can be found
Returns:
dict: A map from the field name that was found in the CSV to Google's field name.
eg: "first_name": "<NAME>"
"""
field_map = {}
found_headers = []
with open(filepath, "r", encoding="utf8") as file:
reader = csv.DictReader(file)
field_names = reader.fieldnames
# For each field in the header column, try to translate
# them to a header recognized by Google.
for field in field_names:
header = None
# Check if there is a direct translation first:
if field in HEADER_TRANSLATIONS:
header = HEADER_TRANSLATIONS[field]
# Otherwise attempt to translate snake case:
elif (translated_field := field.replace("_", " ").title()) in ALL_HEADERS:
header = translated_field
# If we have not found this header yet, add it to the map.
# Otherwise, if we have found the header already, warn the user.
if header is not None and header not in found_headers:
notify(f"Detected header name '{header}' as '{field}' in CSV file")
field_map[field] = header
found_headers.append(header)
elif header in found_headers:
warn(
f"Duplicate header name '{header}' was extracted as '{field}'. Keeping column with header '{field_map[header]}'"
)
# Check if we have all required headers.
# All required headers are found if the required headers set is a subset of the headers found.
if not REQUIRED_HEADERS.issubset(field_map.values()):
missing_headers = REQUIRED_HEADERS.difference(field_map.values())
if len(missing_headers) == 1 and list(missing_headers)[0] == "Zip":
if not ignore_zip:
raise NoZipError(field_map)
else:
raise FormatError(
f"Not all required headers found. Missing: {', '.join(missing_headers)}"
)
return field_map
def parse_location_fields(filepath: str) -> dict:
"""Parse a header of a CSV file to get the country and city.
Args:
filepath (str): Path to the CSV file
Raises:
FormatError: If the city, country or both columns cannot be found.
Returns:
dict: A map from the field name that was found in the CSV to the standardized name.
eg: "person_city": "City"
"""
WANTED_FIELDS = {"state", "city"}
found_translations = []
field_map = {}
with open(filepath, "r", encoding="utf8") as file:
reader = csv.DictReader(file)
field_names = reader.fieldnames
for field in field_names:
# Salesql CSVs prefix state and city by person_.
field = field.lower()
salesql_field = field.replace("person_", "")
possible_fields = {field, salesql_field}
if found_set := WANTED_FIELDS.intersection(possible_fields):
translation = list(found_set)[0]
notify(f"Detected header name '{translation}' as '{field}' in CSV file")
found_translations.append(translation)
field_map[field] = translation
if not WANTED_FIELDS.issubset(field_map.values()):
missing_fields = WANTED_FIELDS.difference(field_map.values())
raise FormatError(
f"Could not find state and city columns. Missing: {', '.join(missing_fields)}"
)
return field_map
def hash_element(element: any) -> str:
"""Produces a sha256 hash of an element of data.
Args:
element (any): The data to be hashed
Returns:
str: The sha256 hash hex digest
"""
element = str(element).encode("utf-8")
return hashlib.sha256(element).hexdigest()
def hash_series(series: pd.Series):
"""Hashes a series, usually represnting columns in a CSV.
Args:
series (pd.Series): [description]
Returns:
[type]: [description]
"""
# If the name of the series is a field
# that shouldn't be hashed (eg: Zip), don't hash it.
if series.name in DO_NOT_HASH:
return series
else:
return series.map(hash_element)
def hash_dataframe(dataframe: pd.DataFrame) -> pd.DataFrame:
"""Hashes all elements in a Pandas dataframe.
Args:
dataframe (pd.DataFrame): The dataframe to be hashed
Returns:
pd.DataFrame: The dataframe with all elements hashed
"""
notify(f"Hashing {dataframe.size} elements...")
start = time.time()
dataframe = dataframe.apply(hash_series, axis=0)
notify(
f"Finished hashing {dataframe.size} elements in {time.time() - start} seconds."
)
return dataframe
def get_dataframe(filepath: str) -> pd.DataFrame:
"""Gets a dataframe for a given CSV file.
Args:
filepath (str): Path to the CSV file.
Returns:
pd.DataFrame: [description]
"""
dialect = check_csv(filepath)
return pd.read_csv(
filepath,
warn_bad_lines=False,
error_bad_lines=False,
sep=dialect.delimiter,
low_memory=False,
dtype=str,
)
def translate_dataframe(dataframe: pd.DataFrame, field_map: dict) -> pd.DataFrame:
"""Translates a CSV file to use Google's desired field names in the header.
Any columns with field names that are not recognized by the Customer Match
specification are removed.
Args:
dataframe (pd.DataFrame): The DataFrame of the CSV file.
Returns:
pd.DataFrame: The pandas dataframe that was translated.
Can be exported to a CSV with the save_csv function.
"""
# Parse the headers into a field_map.
# Keep only the columns that have matching headers.
dataframe = dataframe[field_map.keys()]
# Reverse the map to rename columns to Google's expectation.
dataframe = dataframe.rename(columns=field_map)
return dataframe
def save_csv(dataframe: pd.DataFrame, output: str):
"""Saves a dataframe to a CSV file.
Args:
dataframe (pd.DataFrame): The dataframe to be saved
output (str): The filepath to be saved to
"""
dataframe.to_csv(output, index=False, encoding="utf-8")
notify(f"Succesfully saved Customer Match data file to {os.path.abspath(output)}.")
def get_zip(row: pd.Series, search: SearchEngine) -> str:
"""Get the zip code for a row in a dataframe with the city and state.
Args:
row (pd.Series): A series containing a city and state field.
search (SearchEngine): The search engine object to lookup the zipcode.
Returns:
str: The zipcode if found. None otherwise.
"""
try:
if row.count() == 2:
res = search.by_city_and_state(city=row["city"], state=row["state"])
return res[0].zipcode
else:
warn(f"NaN detected for {row['city']}, {row['state']}.")
return ""
except (AttributeError, IndexError):
warn(f"Zip lookup for {row['city']}, {row['state']} failed.")
return ""
def get_zips(dataframe: pd.DataFrame) -> pd.Series:
"""Gets the zips for a dataframe with city and state columns.
Args:
dataframe (pd.DataFrame): The dataframe, must have city and state columns.
Returns:
pd.Series: A series of zip codes correlating to the zips for each city and state.
"""
search = SearchEngine()
tqdm.pandas(desc="Getting zipcodes")
zips = dataframe.progress_apply(lambda row: get_zip(row, search), axis=1)
zips = zips.rename("Zip")
return zips
def convert_to_iso(dataframe: pd.DataFrame) -> pd.DataFrame:
"""Converts a dataframe's Country column to ISO2 format (United States => US)
Args:
dataframe (pd.DataFrame): A dataframe with a Country column.
Returns:
pd.DataFrame: The dataframe with the Country column in ISO2 format.
"""
notify(f"Converting {len(dataframe.index)} countries to ISO2 format...")
start = time.time()
iso2_names = coco.convert(names=dataframe["Country"], to="ISO2", not_found=None)
dataframe["Country"] = pd.Series(iso2_names)
notify(
f"Finished converting countries to ISO2 format in {time.time() - start} seconds."
)
return dataframe
def normalize_series(column: pd.Series) -> pd.Series:
"""Formats a series (usually a column) of strings to be all lowercase and without whitespace.
Args:
column (pd.Series): The series of strings to be normalized
Returns:
pd.Series: The same series, with normalized strings.
"""
def format(el: str) -> str:
el = el.strip()
el = el.lower()
return el
return column.map(format)
def get_e164(row: pd.Series) -> str:
"""Takes a series containing a Phone and Country column and returns the
phone number in E.164 format.
Args:
row (pd.Series): A series containing at least a Phone and Country column.
Returns:
str: The phone number in E.164 format, if it could be formatted.
None otherwise.
"""
if row.count() == 2:
try:
number = phonenumbers.parse(row["Phone"], row["Country"])
return phonenumbers.format_number(
number, phonenumbers.PhoneNumberFormat.E164
)
except phonenumbers.NumberParseException:
warn(
f"Can't parse phone number {row['Phone']} for country {row['Country']}. It is not recognized as a valid number."
)
return None
else:
# warn(
# f"Can't convert phone number {row['Phone']} for country {row['Country']} due to missing data."
# )
return None
def convert_to_e164(dataframe: pd.DataFrame) -> pd.DataFrame:
"""Converts a dataframe's Phone column to E.164. Requires a Country column.
Args:
dataframe (pd.DataFrame): A dataframe with a Phone and Country column
Returns:
pd.DataFrame: The same dataframe with the Phone column reformatted to E.164.
"""
tqdm.pandas(desc="Converting phone numbers to E.164 format")
numbers = dataframe[["Country", "Phone"]].progress_apply(get_e164, axis=1)
dataframe["Phone"] = numbers
return dataframe
def format_for_hashing(dataframe: pd.DataFrame) -> pd.DataFrame:
"""Performs formatting on a dataframe necessary for accurate hashing.
Will convert the Country column to ISO, normalize all strings, and convert
the phone number column to E.164 format.
Args:
dataframe (pd.DataFrame): A dataframe to be formatted
Returns:
pd.DataFrame: The same dataframe formatted. May have many NaN values!
"""
notify("Formatting file for hashing...")
dataframe = dataframe.apply(normalize_series, axis=0)
dataframe = convert_to_iso(dataframe)
dataframe = convert_to_e164(dataframe)
notify("Done formatting file.")
return dataframe
def prune(dataframe: pd.DataFrame) -> pd.DataFrame:
"""Drops any rows in a dataframe that contain NaN, and prints
how many rows were affected.
Args:
dataframe (pd.DataFrame): Dataframe to be pruned
Returns:
pd.DataFrame: Same dataframe without rows that have NaN.
"""
total_rows = len(dataframe.index)
notify(f"Removing rows with empty values...")
dataframe = dataframe.dropna()
pruned_rows = len(dataframe.index)
notify(f"Removed {total_rows - pruned_rows} rows with empty values.")
return dataframe
@click.command(
help="Generates a Google Ads Customer Match compliant CSV file from a (potentially large) CSV file in another format."
)
@click.option("-o", "--output", default="result.csv", help="Path to output file.")
@click.option(
"--hash",
"do_hash",
help="SHA256 hash each element in the resulting CSV.",
is_flag=True,
)
@click.option(
"--ignore-empty",
help="Don't remove rows with empty elements.",
is_flag=True,
)
@click.option(
"--format",
help="Format the document as it would before hashing with E.164 phone numbers and lowercase names. Will remove a significant amount of rows.",
is_flag=True,
)
@click.argument("filepath")
def main(
filepath: str, output: str, do_hash: bool, ignore_empty: bool, format: bool
):
try:
file = None
# Attempt to translate to Google's standard.
try:
check_path(output)
file = get_dataframe(filepath)
field_map = parse_google_fields(filepath)
file = translate_dataframe(file, field_map)
# If the no zip is found, it is possible to lookup zip
# codes. Ask the user if they want to try.
except NoZipError:
warn(
"A zip code column could not be found in the CSV file. If there is a state and city column, the zip codes may be able to be automatically detected. This may take hours, depending on your file size."
)
if click.confirm("Would you like to try to detect zip codes?"):
field_map = parse_location_fields(filepath)
states_and_cities = translate_dataframe(file, field_map)
zip_codes = get_zips(states_and_cities)
field_map = parse_google_fields(filepath, ignore_zip=True)
translated = translate_dataframe(file, field_map)
file = pd.concat([translated, zip_codes], axis=1)
else:
sys.exit()
if not ignore_empty:
file = prune(file)
# Format the file for hashing if we are going to hash.
# Country codes are converted to ISO as a step in hashing, so
# we only have to convert if we are not hashing.
if do_hash or format:
file = format_for_hashing(file)
else:
file = convert_to_iso(file)
# Check again for empty values, if phone numbers can't be formatted
# or ISO formats can't be found.
if not ignore_empty:
file = prune(file)
# Hashing must be the last step, or else NaN will be hashed.
if do_hash:
file = hash_dataframe(file)
save_csv(file, output)
return 0
except ValueError as e:
sys.exit(f"{ANSI['BOLD'] + ANSI['RED']}ERROR:{ANSI['RESET']} {e}")
if __name__ == "__main__":
main() | StarcoderdataPython |
21755 | import rpyc
from Crypto.Signature import pkcs1_15
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
#############
## KLIJENT ##
#############
def generiraj_kljuceve():
key = RSA.generate(2048)
#stvaranje i spremanje privatnog ključa u datoteku
file_out = open("private_key.pem", "wb")
file_out.write(key.export_key())
file_out.close()
#stvaranje i spremanje javnog ključa u datoteku
file_out = open("public_key.pem", "wb")
file_out.write(key.publickey().export_key())
file_out.close()
return True
flag = True
try:
#klijent iz prethodno stvorenih datoteka učitava svoj javni i privatni ključ
prKey = RSA.import_key(open('private_key.pem').read())
puKey = RSA.import_key(open('public_key.pem').read())
except FileNotFoundError:
#ukoliko datoteke s ključevima nisu pronađene, ide se u stvaranje novih
print("Nije pronađena adresa pridružena klijentu!")
odabir = input("Generirati novu adresu?[D/N]: ")
odabir = odabir.lower()
if odabir == 'd':
if generiraj_kljuceve():
print("Stvaranje ključeva uspjelo")
prKey = RSA.import_key(open('private_key.pem').read())
puKey = RSA.import_key(open('public_key.pem').read())
else:
print('Prekid programa!')
flag=False
if flag:
c = rpyc.connect("127.0.0.1", 25555)
#nakon povezivanja sa serverom, ide se u petlju korisničnog sučelja
while True:
opcija = int(input(
""" 1-Pošaljite transakciju na odabranu adresu
2-Provjerite stanje svoje adrese
3-Provjerite stanje tuđe adrese
4-Prijavi svoju adresu na mrežu
5-Odustani
Odabir[1-5]: """))
if opcija == 1:
###############################################
#implementirati unos odredišne adrese i iznosa#
#-> korisnika se pita da unese ta 2 podatka #
###############################################
adresa_primatelja = input('Unesite adresu primatelja: ')
iznos = input('Unesite iznos transakcije: ')
#message sadrži string s informacijama o transakciji u obliku:
#adresa_pošiljatelja#adresa_primatelja#iznos
#znak # je graničnik između pojedinih vrijednosti
adresa_posiljatelja = str(puKey.n)
##################################################################
#sastaviti string koji će se poslati serveru prema gornjem opisu #
#spremiti ga u varijablu message #
##################################################################
message = '#'.join([adresa_primatelja, adresa_posiljatelja, iznos])
#hakirani sustav
#message = '#'.join([adresa_primatelja, adresa_posiljatelja, iznos])
#prije izrade signature-a moramo regularan string pretvoriti u byte string
message = message.encode()
#izrađujemo hash kod poruke
h = SHA256.new(message)
#hash kod kriptiramo privatnim ključem klijenta i tako dobijemo signature.
#server može dekriptirati signature pomoću javnog ključa klijenta i tako dobiti hash kod iz njega
#server može odrediti javni ključ klijenta na temelju njegove adrese
signature = pkcs1_15.new(prKey).sign(h)
print(c.root.transakcija(message,signature))
#gornja linija je slanje transakcije sa dig. potpisom dok je donja bez potpisa
##print(c.root.transakcija(message))
elif opcija == 2:
print('Adresa: ')
print(str(puKey.n))
print('Stanje: ')
#šaljemo adresu klijenta
#adresa se iz javnog ključa uzima pozivom atributa n
#adresa se vraća kao integer pa ga treba pretvoriti u string
print(c.root.provjeri_adresu(str(puKey.n)))
elif opcija == 3:
add = str(input('Unesi adresu za provjeru: '))
print('Stanje: ')
print(c.root.provjeri_adresu(add))
elif opcija == 4:
print(c.root.registriraj_adresu(str(puKey.n)))
else:
break
| StarcoderdataPython |
117252 | '''
MIT License
Optimal Testing and Containment Strategies for Universities in Mexico amid COVID-19
Copyright © 2021 Test and Contain. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,and <NAME>. https://www.testandcontain.com/
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import dash
import time, random, pandas as pd, json
from dash.dash import no_update
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from dash.dependencies import Input, Output, State, MATCH, ALL
from app import dash_app
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
from preprocess import blank_fig, health_label
from preprocess import population, _, campuses
from layout import get_layout
import flask
import numpy as np
import os
from dash.exceptions import PreventUpdate
dash_app.layout = html.Div([
dcc.Location(id='url', refresh=False),
html.Div(id='page-content')
])
@dash_app.callback(
Output('page-content', 'children'),
[Input('url', 'pathname')])
def display_page(pathname):
print ("Displaying", pathname)
if pathname == '/':
dash.callback_context.response.set_cookie('campus_cookie', "/campus1")
return get_layout(len(campuses["campus1"]["categories"]), campuses["campus1"]["no_solutions"], campuses["campus1"]["budget"], campuses["campus1"]["buckets"], campuses["campus1"]["d"], campuses["campus1"]["pi"], campuses["campus1"]["p"], campuses["campus1"]["categories"])
else:
dash.callback_context.response.set_cookie('campus_cookie', pathname)
return get_layout(len(campuses[pathname[1:]]["categories"]), campuses[pathname[1:]]["no_solutions"], campuses[pathname[1:]]["budget"], campuses[pathname[1:]]["buckets"], campuses[pathname[1:]]["d"], campuses[pathname[1:]]["pi"], campuses[pathname[1:]]["p"], campuses[pathname[1:]]["categories"])
@dash_app.callback(
[Output(component_id='location-label', component_property='children'),
Output('campus_id', 'data')
],
[
Input('page-content', 'children')
]
)
def update_campus(page_content):
allcookies=dict(flask.request.cookies)
if 'campus_cookie' in allcookies:
campus_id_prev = allcookies['campus_cookie']
if (campus_id_prev is None):
return campuses['campus1']['label'],"campus1"
return campuses[campus_id_prev[1:]]['label'],campus_id_prev[1:]
def get_fig(solution, campus_id):
"""Generates figure from a solution row."""
categories = campuses[campus_id]['categories']
k = len(categories)
fig = make_subplots(rows=2, cols=1,
subplot_titles=[_("# Unnecessarily self-isolating individuals"),
_("# Prevented critical infections")],
specs=[[{}], [{}]], shared_xaxes=False,
shared_yaxes=False, vertical_spacing=0.25, row_heights=[k, 1])
# Create subfigures
contain_labels = [f'Containment {i}' for i in range(1,k+1)]
y_prev=[cat['name'] for cat in categories]
x_prev=solution[contain_labels]
x_prev=np.trunc(x_prev).astype(int).tolist()
contain_fig = go.Bar(
x=x_prev,
y=[y_prev[i]+"<br>("+str(x_prev[i])+") " for i in range(k)],
marker=dict(color='purple',
line=dict(color='black', width=0)),
orientation='h')
x_prev = -solution[health_label]
x_prev=np.trunc(x_prev).astype(int).tolist()
y_prev = [population['name']]
health_fig = go.Bar(
x=x_prev,
y=[y_prev[i]+"<br>("+str(x_prev[i])+") " for i in range(len(y_prev))],
marker=dict(color=['orange'],
line=dict(color='black', width=0)),
orientation='h')
# Add subfigures to fig
fig.append_trace(contain_fig, 1, 1)
fig.append_trace(health_fig, 2, 1)
# Fix the x-axis of health bar subplot
fig.update_xaxes(range=[0, sum(cat['size'] for cat in categories)], row=2, col=1)
fig.layout.update(margin=dict(l=0, r=10, t=20, b=0),
paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)',
showlegend=False)
fig.update_yaxes(autorange='reversed')
fig.layout.height = 300
return fig
@dash_app.callback(
[Output({'type': 'threshold', 'index': MATCH}, 'value'),
Output({'type': 'threshold', 'index': MATCH}, 'max'),
Output({'type': 'categories_size', 'index': MATCH}, 'children')],
Input('campus_id','data'),
State({'type': 'threshold', 'index': MATCH}, 'id')
)
def update_size_treshold(campus_id, id):
"""Update the size"""
print("Running 'update_size_threshold'.")
categories = campuses[campus_id]['categories']
i = int(id['index'])
if campus_id is not None:
thres_size_string = int(categories[i]['size'])
else:
thres_size_string = None
return thres_size_string, thres_size_string, thres_size_string
@dash_app.callback(
[Output({'type': 'threshold_h'}, 'value'),
Output({'type': 'threshold_h'}, 'max'),
Output({'type': 'population_size'},'children')],
Input('campus_id','data')
)
def update_size_threshold_Healt(campus_id):
print("Running 'update_size_threshold_Healt'.", campus_id)
if campus_id is not None:
population = campuses[campus_id]['population']
return 0, population, population
@dash_app.callback(
Output({'type': 'categories_name', 'index': MATCH}, 'children'),
Output({'type': 'sol_name', 'index': MATCH}, 'children'),
Input('campus_id','data'),
State({'type': 'threshold', 'index': MATCH}, 'id')
)
def update_names(campus_id, id):
"""Update the names"""
print("Running 'update_names'.", campus_id)
categories = campuses[campus_id]['categories']
if campus_id is not None:
i = int(id['index'])
return f"{categories[i]['name']}",f"{categories[i]['name']}"
else:
return None
@dash_app.callback(
Output({'type': 'percent', 'index': MATCH}, 'children'),
Input({'type': 'threshold', 'index': MATCH}, 'value'),
Input('campus_id','data'),
State({'type': 'threshold', 'index': MATCH}, 'id')
)
def update_percentage(threshold, campus_id, id):
"""Update the percentage box corresponding to the threshold value that was updated."""
print("Running 'update_percentage'.")
categories = campuses[campus_id]['categories']
i = int(id['index'])
if threshold is not None:
div = int(categories[i]['size'])
percentage = 0 if (div == 0) else (int(threshold) * 100 / div)
return f"{round(percentage, 2)}%"
else:
return f"100%"
@dash_app.callback(
Output({'type': 'percent_h'}, 'children'),
Input({'type': 'threshold_h'}, 'value'),
Input('campus_id','data')
)
def update_percentage_Healt(threshold, campus_id):
"""Update the percentage box corresponding to the threshold value that was updated."""
print("Running 'update_percentage_Health'.")
population = campuses[campus_id]['population']
if threshold is not None:
percentage = int(threshold) * 100 / int(population)
return f"{round(percentage, 2)}%"
else:
return f"0.0%"
@dash_app.callback(
Output("asked_no_solutions_store","data"),
Output("loading-output", "children"),
Input("asked_no_solutions_button", "n_clicks"),
State("asked_no_solutions", "value"),
State('campus_id','data')
)
def update_asked_solutions(n_clicks,asked_no_solutions, campus_id):
print("Running 'update_asked_solutions'.")
if campus_id is None:
print ("Default campus")
campus_id = "campus1"
return "done",""
print (campus_id)
os.system('julia pareto/pareto.jl data/'+ campus_id +'.json ' + str(asked_no_solutions) + ' data/'+ campus_id +'.csv')
print("From method")
return "done",""
##This method has problems when there are different number of categories
@dash_app.callback(
Output("bar-chart", "figure"),
Output({'type': 'allocation', 'index': ALL}, 'children'),
Output({'type': 'groupsize', 'index': ALL}, 'children'),
Input("jsoned_solutions", "data"),
Input("current-solution", "value"),
State('campus_id','data'),
State("solutions", "data"),
)
def update_displayed_solution(jsoned_solutions, sol_index, campus_id, solutions):
"""Updates the figure and the allocation/group size boxes when current_solution is modified."""
print("Running 'update_displayed_solution'.")
k = len (campuses[campus_id]['categories'])
# If sol_index is None, return None
if sol_index is None:
return blank_fig, (None,)*k, (None,)*k
# If sol_index is not an int, do nothing.
elif not isinstance(sol_index, int):
return no_update, [no_update]*k, [no_update]*k
# Load the solution from dataframe
row_index = solutions[sol_index-1]
jsoned_solutions = json.loads(jsoned_solutions)
specific = jsoned_solutions['data'][row_index]
specific2 = pd.DataFrame(specific, jsoned_solutions['columns'])
# Get updated bar chart
fig = get_fig(specific2[0], campus_id)
# Get allocation and group sizes
g_labels = [f'g{i}' for i in range(1,k+1)]
t_labels = [f't{i}' for i in range(1,k+1)]
t = list(specific2[0][t_labels])
g = list(specific2[0][g_labels])
# Return figure, allocation, and group sizes
return fig, t, g
@dash_app.callback(
Output("solutions", "data"),
Output("threshold_vals", "data"),
Output("threshold_h_val", "data"),
Output("solution-num-sentence", "children"),
Output("current-solution", "value"),
Output("current-solution", "max"),
Output("jsoned_solutions", "data"),
Input({'type': 'threshold', 'index': ALL}, 'value'),
Input({'type': 'threshold_h'}, 'value'),
Input("asked_no_solutions_store", "data"),
State('campus_id', 'data'),
State("current-solution", "value")
)
def update_solution_set(thresholds, threshold_h, asked_no_solutions, campus_id, current_sol):
"""Updates the set of solutions stored when one of the thresholds changes."""
print("Running 'update_solution_set'.")
# Check that all thresholds are integers, otherwise do nothing.
if not all(map(lambda x: isinstance(x, int), thresholds)):
return (no_update,)*6
sols, jsoned_solutions = get_solutions(thresholds, threshold_h, campus_id)
num_sols = len(sols)
if current_sol is not None and current_sol < num_sols:
picked_sol = current_sol
elif num_sols > 0:
picked_sol = random.randint(1, num_sols)
else:
picked_sol = None
if num_sols != 1:
solutions_sentence = _("There are {} solutions that satisfy the thresholds.").format(num_sols)
else:
solutions_sentence = _("There is one solution that satisfies the thresholds.")
return sols, thresholds, threshold_h, solutions_sentence, picked_sol, num_sols, jsoned_solutions
def get_solutions(thresholds, threshold_h, campus_id):
if campus_id is not None:
print ("Reading file", campuses[campus_id]['file'])
df = pd.read_csv(campuses[campus_id]['file'])
k = len(campuses[campus_id]['categories'])
if df.columns.size != 3*k+1:
raise Exception("Data input has inconsistent number of categories!")
g_labels = [f'g{i}' for i in range(1,k+1)]
t_labels = [f't{i}' for i in range(1,k+1)]
contain_labels = [f'Containment {i}' for i in range(1,k+1)]
health_label = ['Health']
obj_labels = health_label + contain_labels
col_labels = g_labels + t_labels + obj_labels
df.columns = col_labels
"""Return list of solutions (=indices of dataframe) that are not filtered out by thresholds."""
df = df.sort_values(by=['Health'], ignore_index=True)
contain_mask = (df[contain_labels] <= thresholds[:]).all(axis=1)
health_mask = (-df[health_label] >= threshold_h).all(axis=1)
mask = contain_mask & health_mask
return list(mask[mask].index), df.to_json(orient="split")
@dash_app.callback(
Output("solutions-row", "children"),
Input('save-button', 'n_clicks'),
State('campus_id','data'),
State("current-solution", "value"),
State("solutions", "data"),
State("jsoned_solutions", "data"),
State("solutions-row", "children")
)
def save_solution(n_clicks, campus_id, sol_index, solutions, jsoned_solutions, saved_solutions):
"""Saves the current figure and the allocations / group sizes when the save button is clicked."""
print("Running 'save_solution'.")
# If sol_index is not an int, do nothing.
if not isinstance(sol_index, int):
return no_update
row_index = solutions[sol_index-1]
jsoned_solutions = json.loads(jsoned_solutions)
specific = jsoned_solutions['data'][row_index]
specific2 = pd.DataFrame(specific, jsoned_solutions['columns'])
k = len(campuses[campus_id]['categories'])
# Get updated box-graph
fig = get_fig(specific2[0], campus_id)
# Get allocation and group sizes
g_labels = [f'g{i}' for i in range(1,k+1)]
t_labels = [f't{i}' for i in range(1,k+1)]
t = list(specific2[0][t_labels])
g = list(specific2[0][g_labels])
# Get time at which solution is saved, to use as index
timestamp = time.time()
column = dbc.Col([
dbc.Card([
dcc.Graph(id={'type': 'saved-graph', 'index': sol_index},
figure=fig, config={'staticPlot': True}, className="mb-1"),
html.Span(_("Allocation: {}.").format(t)),
html.Span(_("Group sizes: {}.").format(g)),
], id={'type': 'saved_solution', 'index': timestamp}, className="p-3 mb-3"),
], width=6)
saved_solutions.append(column)
# Return solution column
return saved_solutions | StarcoderdataPython |
1620797 | import os
import json
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from pydatamail_google.base import GoogleDriveBase, GoogleMailBase
class Drive(GoogleDriveBase):
def __init__(self, client_service_file=None, config_folder="~/.pydatamail"):
"""
Google Drive class to manage files via the Google drive API directly from Python
Args:
client_service_file (str/ None): path to the credentials.json file
typically "~/.pydatamail_google/credentials.json"
config_folder (str): the folder for the configuration, typically "~/.pydatamail_google"
"""
connect_dict = {
"api_name": "drive",
"api_version": "v3",
"scopes": ["https://www.googleapis.com/auth/drive"],
}
self._config_path = _create_config_folder(config_folder=config_folder)
if client_service_file is None:
client_service_file = os.path.join(self._config_path, "credentials.json")
super().__init__(
_create_service(
client_secret_file=client_service_file,
api_name=connect_dict["api_name"],
api_version=connect_dict["api_version"],
scopes=connect_dict["scopes"],
prefix="",
working_dir=self._config_path,
)
)
class Gmail(GoogleMailBase):
def __init__(
self,
client_service_file=None,
user_id="me",
config_folder="~/.pydatamail",
enable_google_drive=True,
db_user_id=1,
):
"""
Gmail class to manage Emails via the Gmail API directly from Python
Args:
client_service_file (str/ None): path to the credentials.json file
typically "~/.pydatamail/credentials.json"
userid (str): in most cases this should be simply "me"
config_folder (str): the folder for the configuration, typically "~/.pydatamail"
"""
connect_dict = {
"api_name": "gmail",
"api_version": "v1",
"scopes": ["https://mail.google.com/"],
}
# Create config directory
self._config_path = _create_config_folder(config_folder=config_folder)
if client_service_file is None:
client_service_file = os.path.join(self._config_path, "credentials.json")
self._client_service_file = client_service_file
# Read config file
config_file = os.path.join(self._config_path, "config.json")
if os.path.exists(config_file):
with open(config_file) as f:
self._config_dict = json.load(f)
else:
self._config_dict = {}
# Initialise service
google_mail_service = _create_service(
client_secret_file=self._client_service_file,
api_name=connect_dict["api_name"],
api_version=connect_dict["api_version"],
scopes=connect_dict["scopes"],
prefix="",
working_dir=self._config_path,
)
# Google drive
if enable_google_drive:
google_drive_service = Drive(client_service_file=self._client_service_file)
else:
google_drive_service = None
# Initialize database
if "database" in self._config_dict.keys():
database_email, database_ml = self.create_database(
connection_str=self._config_dict["database"]
)
else:
database_email, database_ml = None, None
super().__init__(
google_mail_service=google_mail_service,
database_email=database_email,
database_ml=database_ml,
google_drive_service=google_drive_service,
user_id=user_id,
db_user_id=db_user_id,
)
def _create_service(
client_secret_file, api_name, api_version, scopes, prefix="", working_dir=None
):
cred = None
if working_dir is None:
working_dir = os.getcwd()
token_dir = "token_files"
json_file = f"token_{api_name}_{api_version}{prefix}.json"
os.makedirs(os.path.join(working_dir, token_dir), exist_ok=True)
token_file = os.path.join(working_dir, token_dir, json_file)
if os.path.exists(token_file):
cred = Credentials.from_authorized_user_file(token_file, scopes)
if not cred or not cred.valid:
if cred and cred.expired and cred.refresh_token:
cred.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(client_secret_file, scopes)
cred = flow.run_local_server()
with open(os.path.join(working_dir, token_dir, json_file), "w") as token:
token.write(cred.to_json())
return build(api_name, api_version, credentials=cred)
def _create_config_folder(config_folder="~/.pydatamail_google"):
config_path = os.path.abspath(os.path.expanduser(config_folder))
os.makedirs(config_path, exist_ok=True)
return config_path
| StarcoderdataPython |
1706467 | from typing import Optional
from uuid import UUID
import aiohttp
import sqlalchemy.sql as sa
from sqlalchemy.engine import RowMapping
from vocabulary.common import database, settings
from vocabulary.common.log import logger
from vocabulary.models import models
async def _get_json(url: str):
async with aiohttp.ClientSession() as ses:
async with ses.get(url) as resp:
try:
json = await resp.json()
except Exception as e:
logger.exception(e)
return {}
return json
async def get_linked_words(word: str) -> list[str]:
word = word.lower().strip()
url = settings.SYNONYMS_SEARCH_URL.format(word=word)
resp = await _get_json(url)
try:
words = list(list(resp.values())[0].values())[0].keys()
words = [
i.replace('_X', ' sth/sb').replace('_', ' ')
for i in words
]
except Exception:
return []
else:
return words
async def get_words_to_learn(*,
limit: Optional[int] = None,
offset: Optional[int] = None) -> list[RowMapping]:
stmt = sa.select(models.WordToLearn)\
.order_by(models.WordToLearn.c.added_at)\
.limit(limit).offset(offset)
async with database.session() as ses:
return (await ses.execute(stmt)).mappings().all()
async def delete_word_to_learn(*,
word_id: UUID) -> Optional[RowMapping]:
stmt = sa.delete(models.WordToLearn)\
.returning(models.WordToLearn)\
.where(models.WordToLearn.c.word_id == str(word_id))
async with database.session() as ses:
return (await ses.execute(stmt)).mappings().one_or_none()
async def add_word_to_learn(*,
word: str) -> None:
stmt = models.WordToLearn.insert()\
.values(word=word)
async with database.session() as ses:
await ses.execute(stmt)
| StarcoderdataPython |
3214211 | <reponame>waleko/libreta
import logging
from telegram.ext import Updater
import handlers.handlers
class Bot:
def __init__(self, token):
self.updater = Updater(token)
for handler in handlers.handlers:
self.updater.dispatcher.add_handler(handler)
logging.info(f"Registered a total of {len(handlers.handlers)} handlers.")
| StarcoderdataPython |
1603059 | from __future__ import annotations
import logging
from abc import ABCMeta
from dataclasses import dataclass
from typing import Any, Iterable
from pants.core.goals.publish import (
NoApplicableTargetsBehavior,
TargetRootsToFieldSets,
TargetRootsToFieldSetsRequest,
)
from pants.engine.console import Console
from pants.engine.engine_aware import EngineAwareReturnType
from pants.engine.fs import EMPTY_DIGEST, Digest
from pants.engine.goal import Goal, GoalSubsystem
from pants.engine.process import FallibleProcessResult
from pants.engine.rules import Get, collect_rules, goal_rule
from pants.engine.target import FieldSet
from pants.engine.unions import union
from pants.util.logging import LogLevel
from pants.util.memo import memoized_property
from pants.util.meta import frozen_after_init
from pants.util.strutil import strip_v2_chroot_path
logger = logging.getLogger(__name__)
# TODO: Copied from LintResult/CheckResult - can this be inherited or composed?
@dataclass(frozen=True)
class DeployResult:
exit_code: int
stdout: str
stderr: str
partition_description: str | None = None
report: Digest = EMPTY_DIGEST
@classmethod
def from_fallible_process_result(
cls,
process_result: FallibleProcessResult,
*,
partition_description: str | None = None,
strip_chroot_path: bool = False,
report: Digest = EMPTY_DIGEST,
) -> DeployResult:
def prep_output(s: bytes) -> str:
return strip_v2_chroot_path(s) if strip_chroot_path else s.decode()
return cls(
exit_code=process_result.exit_code,
stdout=prep_output(process_result.stdout),
stderr=prep_output(process_result.stderr),
partition_description=partition_description,
report=report,
)
def metadata(self) -> dict[str, Any]:
return {"partition": self.partition_description}
# TODO: Copied from LintResult/CheckResult - can this be inherited or composed?
@frozen_after_init
@dataclass(unsafe_hash=True)
class DeployResults(EngineAwareReturnType):
results: tuple[DeployResult, ...]
deployer_name: str
def __init__(self, results: Iterable[DeployResult], *, deployer_name: str) -> None:
self.results = tuple(results)
self.deployer_name = deployer_name
@property
def skipped(self) -> bool:
return bool(self.results) is False
@memoized_property
def exit_code(self) -> int:
return next(
(result.exit_code for result in self.results if result.exit_code != 0), 0
)
def level(self) -> LogLevel | None:
if self.skipped:
return LogLevel.DEBUG
return LogLevel.ERROR if self.exit_code != 0 else LogLevel.INFO
def message(self) -> str | None:
if self.skipped:
return f"{self.deployer_name} skipped."
message = self.deployer_name
message += (
" succeeded."
if self.exit_code == 0
else f" failed (exit code {self.exit_code})."
)
def msg_for_result(result: DeployResult) -> str:
msg = ""
if result.stdout:
msg += f"\n{result.stdout}"
if result.stderr:
msg += f"\n{result.stderr}"
if msg:
msg = f"{msg.rstrip()}\n\n"
return msg
if len(self.results) == 1:
results_msg = msg_for_result(self.results[0])
else:
results_msg = "\n"
for i, result in enumerate(self.results):
msg = f"Partition #{i + 1}"
msg += (
f" - {result.partition_description}:"
if result.partition_description
else ":"
)
msg += msg_for_result(result) or "\n\n"
results_msg += msg
message += results_msg
return message
def cacheable(self) -> bool:
"""Is marked uncacheable to ensure that it always renders."""
return False
@union
class DeploymentFieldSet(FieldSet, metaclass=ABCMeta):
"""The fields necessary to deploy an asset."""
class DeploySubsystem(GoalSubsystem):
name = "deploy"
help = "Deploy packages to a remote."
required_union_implementations = (DeploymentFieldSet,)
class Deploy(Goal):
subsystem_cls = DeploySubsystem
@goal_rule
async def deploy(
console: Console,
deploy: DeploySubsystem,
) -> Deploy:
target_roots_to_deployment_field_sets = await Get(
TargetRootsToFieldSets,
TargetRootsToFieldSetsRequest(
DeploymentFieldSet,
goal_description="",
no_applicable_targets_behavior=NoApplicableTargetsBehavior.error,
expect_single_field_set=True,
),
)
field_set = target_roots_to_deployment_field_sets.field_sets[0]
_ = await Get(
DeployResults, DeploymentFieldSet, field_set
) # TODO: Make Flake happy
# request = await Get(DeployResults, DeploymentFieldSet, field_set)
# TODO: Do something with the result
return Deploy(exit_code=0)
def rules():
return collect_rules()
| StarcoderdataPython |
1665199 | class Solution:
def numSimilarGroups(self, A: List[str]) -> int:
| StarcoderdataPython |
122115 | import copy
from typing import Callable, Tuple
import numpy as np
from odyssey.distribution import Distribution
from iliad.integrators.fields import softabs
from iliad.integrators.info import CoupledInfo
from iliad.integrators.terminal import cond
from iliad.integrators.states.coupled_state import CoupledState
from iliad.linalg import solve_psd, sqrtm
def phi_a(qn: np.ndarray, xn: np.ndarray, pn: np.ndarray, yn: np.ndarray, step_size: float, vector_field: Callable) -> Tuple[np.ndarray]:
vel, force = vector_field(qn, yn)
dxn = step_size * vel
dpn = step_size * force
return qn, xn + dxn, pn + dpn, yn
def phi_b(qn: np.ndarray, xn: np.ndarray, pn: np.ndarray, yn: np.ndarray, step_size: float, vector_field: Callable) -> Tuple[np.ndarray]:
vel, force = vector_field(xn, pn)
dqn = step_size * vel
dyn = step_size * force
return qn + dqn, xn, pn, yn + dyn
def phi_c(qn: np.ndarray, xn: np.ndarray, pn: np.ndarray, yn: np.ndarray, step_size: float, omega: float) -> Tuple[np.ndarray]:
cos = np.cos(2*omega*step_size)
sin = np.sin(2*omega*step_size)
add = np.vstack([qn + xn, pn + yn])
qnmxn, pnmyn = qn - xn, pn - yn
Rsub = np.vstack((
np.hstack((cos*qnmxn + sin*pnmyn)),
np.hstack((-sin*qnmxn + cos*pnmyn))))
qnpn = 0.5*(add + Rsub).ravel()
xnyn = 0.5*(add - Rsub).ravel()
(qn, pn), (xn, yn) = np.split(qnpn, 2), np.split(xnyn, 2)
return qn, xn, pn, yn
def coupled_integrate(
vector_field: Callable,
zo: Tuple[np.ndarray],
step_size: float,
omega: float
) -> Tuple[np.ndarray]:
"""Implements the explicit integrator for non-separable Hamiltonian dynamics.
The coupled explicit integrator is composed of three component integration
steps.
Args:
vector_field: A function returning the time derivatives of position and
momentum.
force_vector: Function computing the time derivative of momentum.
zo: Tuple containing the position and momentum variables in the expanded
phase space.
step_size: Integration step_size.
omega: Binding strength between the two approximate solutions.
Returns:
qn: Terminal state of the original position variable.
xn: Terminal state of the expanded position variable.
pn: Terminal state of the original momentum variable.
yn: Terminal state of the expanded momentum variable.
"""
# Compute prerequisite quantities for the explicit integrator.
half_step = step_size / 2.0
# Apply the explicit integrator to the input.
qo, xo, po, yo = zo
qn, xn, pn, yn = phi_a(qo, xo, po, yo, half_step, vector_field)
if omega > 0:
qn, xn, pn, yn = phi_b(qn, xn, pn, yn, half_step, vector_field)
qn, xn, pn, yn = phi_c(qn, xn, pn, yn, step_size, omega)
qn, xn, pn, yn = phi_b(qn, xn, pn, yn, half_step, vector_field)
else:
qn, xn, pn, yn = phi_b(qn, xn, pn, yn, step_size, vector_field)
qn, xn, pn, yn = phi_a(qn, xn, pn, yn, half_step, vector_field)
return qn, xn, pn, yn
def constraint(q: np.ndarray, x: np.ndarray) -> np.ndarray:
"""The holonomic constraint function with which to equip the Lagrange
multiplier augmented explicit integrator. The constraint states that the
position variables in the expanded phase space must be equal.
Args:
q: Original position variable.
x: Expanded position variable.
Returns:
out: The element-wise difference between the original and expanded
position variables.
"""
return q - x
def loss(
vector_field: Callable,
zo: Tuple[np.ndarray],
step_size: float,
omega: float,
mu: np.ndarray
) -> np.ndarray:
"""A loss function representing violation of the constraint function with
respect to the inputs. In practice, one will want to identify the Lagrange
multipliers that cause the constraint to be satisfied.
Args:
vector_field: A function returning the time derivatives of position and
momentum.
zo: Tuple containing the position and momentum variables in the expanded
phase space.
step_size: Integration step_size.
omega: Binding strength between the two approximate solutions.
Returns:
c: The element-wise difference between the original and expanded
position variables.
zn: The output of the explicit integrator.
"""
qo, xo, po, yo = zo
zn = coupled_integrate(
vector_field,
(qo, xo, po + mu, yo - mu),
step_size,
omega
)
c = constraint(zn[0], zn[1])
return c, zn
def step(val, vector_field, zo, step_size, omega):
"""Single step of a Newton iteration to identify constraint-preserving Lagrange
multipliers.
"""
# Broyden's method.
mup, _, J, Jinv, cp, num_iters = val
Dx = -Jinv@cp
mun = mup + Dx
cn, aux = loss(vector_field, zo, step_size, omega, mun)
Df = cn - cp
# Update inverse using Sherman–Morrison formula.
u = (Df - J@Dx) / (Dx@Dx)
v = Dx
J += np.outer(u, v)
div = 1. + v@Jinv@u
if np.abs(div) > 1e-10:
Jinv -= ([email protected](u, v)@Jinv) / div
else:
num_mu = len(mun)
J = np.eye(num_mu)
Jinv = np.eye(num_mu)
num_iters += 1
return mun, aux, J, Jinv, cn, num_iters
def single_step(
vector_field: Callable,
state: CoupledState,
info: CoupledInfo,
step_size: float,
omega: float,
thresh: float,
max_iters: int
) -> Tuple:
"""Use the explicit integrator in combination with Lagrange multipliers in
order to satisfy the constraints that the position and momentum variables
in the expanded phase space are equal along trajectories.
Args:
vector_field: A function returning the time derivatives of position and
momentum.
state: An object containing the position and momentum variables of the
state in phase space.
info: An object that keeps track of the number of fixed point iterations
and whether or not integration has been successful.
step_size: Integration step_size.
omega: Binding strength between the two approximate solutions.
thresh: Convergence tolerance for Newton's method to find Lagrange
multipliers.
max_iters: Maximum number of iterations.
Returns:
state: An augmented state object with the updated position and momentum
and values for the log-posterior and metric and their gradients.
info: An information object with the updated number of fixed point
iterations and boolean indicator for successful integration.
"""
qo, po = state.position, state.momentum
zo = (qo, qo, po, po)
mu = np.zeros_like(qo)
# Decide whether or not to initialize the estimate of the Jacobian with the
# identity matrix or with a finite-difference approximation of the
# Jacobian.
num_mu = len(mu)
J = np.eye(num_mu)
Jinv = np.eye(num_mu)
# I think the correct course is to provide the auxiliary data. If the code
# doesn't complete a single iteration, then the auxiliary data will
# remain a vector of zeros, which is clearly incorrect.
cn, aux = loss(vector_field, zo, step_size, omega, mu)
val = (mu, aux, J, Jinv, cn, 1)
while cond(val, thresh, max_iters):
val = step(val, vector_field, zo, step_size, omega)
mu, (qn, xn, pn, yn), J, Jinv, cn, num_iters = val
# Compute whether or not integration was successful.
success = np.max(np.abs(cn)) < thresh
# Averaging the momentum variables is the projection to the cotangent
# bundle of the manifold. The averaging of the position variables is not
# necessary; they are equal under the constraint. However, averaging has a
# nicer aesthetic when only approximate constraint satisfaction is
# required.
qm = 0.5*(qn + xn)
pm = 0.5*(pn + yn)
state.position, state.momentum = qm, pm
info.num_iters += num_iters
info.success &= success
return state, info
def coupled_leapfrog(
state: CoupledState,
step_size: float,
num_steps: int,
distr: Distribution,
vector_field: Callable,
omega: float,
thresh: float,
max_iters: int
) -> Tuple[CoupledState, CoupledInfo]:
"""Implements the coupled explicit integrator where Lagrange multipliers are
used to satisfy reversibility and volume preservation.
Args:
state: An object containing the position and momentum variables of the
state in phase space.
step_size: Integration step_size.
num_steps: Number of integration steps.
log_posterior: The log-density of the posterior from which to sample.
metric_handler: Function to compute the Riemannian metric, its inverse,
its matrix square root, and its log-determinant.
vector_field: A function returning the time derivatives of position and
momentum.
omega: Binding strength between the two approximate solutions.
thresh: Convergence tolerance for Newton's method to find Lagrange
multipliers.
max_iters: Maximum number of iterations.
Returns:
state: An augmented state object with the updated position and momentum
and values for the log-posterior and metric and their gradients.
info: An information object with the updated number of fixed point
iterations and boolean indicator for successful integration.
"""
state = copy.copy(state)
info = CoupledInfo()
for i in range(num_steps):
state, info = single_step(
vector_field,
state,
info,
step_size,
omega,
thresh,
max_iters
)
state.update(distr)
return state, info
| StarcoderdataPython |
127898 | #!python
import os
import psycopg2
class Database():
""" Handles interaction with the Postgres database """
def __init__(self):
self.port = 5432
self.host = 'localhost'
self.database = 'postgis'
self.user = 'postgis'
self.password = 'password'
self.getParamsFromEnv()
self.c = self.createConnection()
def __del__(self):
try:
self.closeConnection()
except:
pass
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.closeConnection()
def getParamsFromEnv(self):
"""Extracts connection parameters from the environment"""
self.port = os.getenv('PGPORT', self.port)
self.host = os.getenv('PGHOST', self.host)
self.database = os.getenv('PGDATABASE', self.database)
self.user = os.getenv('PGUSER', self.user)
self.password = os.getenv('PGPASSWORD', self.password)
def createConnection(self):
"""Initiates a connection to the PostGIS server"""
conn_string = "host='{}' dbname='{}' user='{}' password='{}' port={}".format(
self.host, self.database, self.user, self.password, self.port)
return psycopg2.connect(conn_string)
def encodeLiteral(self, string):
"""Encodes a string literal"""
return string.replace("'","''")
def encodeTableName(self, schema, table):
"""Encodes a table name to a safe string to use in a query"""
return '"{}"."{}"'.format(schema, table)
def encodeSchemaName(self, schema):
"""Encodes a schema name to a safe string to use in a query"""
return '"{}"'.format(schema)
def encodeColumnName(self, column):
"""Encodes a column name to a safe string to use in a query"""
return '"{}"'.format(column)
def fetchSqlRecords(self, sql):
"""Executes a SQL query and returns the result rows"""
cursor = self.c.cursor()
cursor.execute(sql)
r = cursor.fetchall()
cursor.close()
return r
def runSql(self, sql):
"""Executes a SQL query"""
cursor = self.c.cursor()
cursor.execute(sql)
self.c.commit()
cursor.close()
return True
def runSqlNoTransaction(self, sql):
"""Executes a SQL query outside of a transaction block"""
self.c.autocommit = True
cursor = self.c.cursor()
cursor.execute(sql)
self.c.commit()
cursor.close()
self.c.autocommit = False
return True
def tableExists(self, schema, table):
"""Tests whether the specified table exists in the database"""
r = self.fetchSqlRecords(
"SELECT to_regclass('{}.{}')".format(schema, table))
return r[0][0]
def tableHasColumn(self, schema, table, column):
"""Tests whether a table has a specified column"""
res = self.fetchSqlRecords(
"select count(*) from information_schema.columns c where c.table_schema = '{}' and c.table_name='{}' and c.column_name='{}'".format(schema, table, column))
return res[0][0] > 0
def createTable(self, schema, table, cols):
"""Creates a new table in the database, with specified columns.
param cols is an array of [name, definition, extra defs (eg PRIMARY KEY)]
"""
col_definition = ','.join(
['"{}" {} {}'.format(c[0], c[1], c[2]) for c in cols])
return self.runSql('CREATE TABLE {} ({})'.format(self.encodeTableName(schema, table), col_definition))
def setTableComment(self, schema, table, comment):
"""Sets the comment for the specified table
"""
return self.runSql('COMMENT ON TABLE {} IS \'{}\''.format(self.encodeTableName(schema, table), self.encodeLiteral(comment)))
def dropTable(self, schema, table, cascade=False):
""" Drops a table from the database """
if cascade:
return self.runSql('DROP TABLE IF EXISTS {} CASCADE'.format(self.encodeTableName(schema, table)))
else:
return self.runSql('DROP TABLE IF EXISTS {}'.format(self.encodeTableName(schema, table)))
def truncateTable(self, schema, table):
""" Truncates a table from the database """
return self.runSql('TRUNCATE TABLE {}'.format(self.encodeTableName(schema, table)))
def getTableColumnDefs(self, schema, table):
""" Gets the column definitions for the specified table """
src_columns = self.fetchSqlRecords(
"select c.column_name, data_type, character_maximum_length, numeric_precision, numeric_scale from information_schema.columns c where c.table_schema = '{}' and c.table_name='{}'".format(schema, table))
return [dict(zip(('name', 'type', 'max_length', 'precision', 'scale'), c)) for c in src_columns]
def getGeometryColumnDef(self, schema, table, column):
""" Returns the definition of a geometry column """
defs = self.fetchSqlRecords(
"select type, srid from geometry_columns where f_table_schema='{}' and f_table_name='{}' and f_geometry_column='{}'".format(schema, table, column))
if not len(defs) == 1:
return None
return 'geometry({},{})'.format(defs[0][0], defs[0][1])
def recordCount(self, schema, table):
""" Returns the number of rows in a table """
r = self.fetchSqlRecords(
"SELECT count(*) FROM {}".format(self.encodeTableName(schema, table)))
return r[0][0]
def copyData(self, src_schema, src_table, src_columns, dest_schema, dest_table, dest_columns):
"""Copies data from one table to another"""
sql = 'INSERT INTO {} ( {} ) SELECT {} FROM {}'.format(self.encodeTableName(dest_schema, dest_table), ','.join(dest_columns),
','.join(src_columns), self.encodeTableName(src_schema, src_table))
return self.runSql(sql)
def schemaExists(self, schema):
"""Tests whether the specified schema exists in the database"""
r = self.fetchSqlRecords(
"SELECT count(*) FROM information_schema.schemata WHERE schema_name = '{}'".format(schema))
return r[0][0] > 0
def createSchema(self, schema):
"""Creates a schema"""
return self.runSql('CREATE SCHEMA IF NOT EXISTS {}'.format(self.encodeSchemaName(schema)))
def createSpatialIndex(self, schema, table, column):
"""Creates a spatial index on a geometry column"""
index_name = '{}_{}_idx'.format(table, column)
return self.runSql('CREATE INDEX {} ON {} USING gist ({})'.format(index_name, self.encodeTableName(schema, table), self.encodeColumnName(column)))
def vacuum(self, schema, table):
"""Vacuums a table"""
return self.runSqlNoTransaction('VACUUM ANALYSE {}'.format(self.encodeTableName(schema, table)))
def closeConnection(self):
self.c.close()
def ogrString(self):
"""Returns an OGR format string which can be used to connect to the database"""
return 'host={} user={} port={} dbname={} password={}'.format(self.host, self.user, self.port, self.database, self.password)
| StarcoderdataPython |
109529 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 20 16:23:57 2019
@author: hitansh
"""
import numpy as np
import os
import sys
# import tarfile
import tensorflow as tf
# import zipfile
# from distutils.version import StrictVersion
# from collections import defaultdict
# from io import StringIO
# from matplotlib import pyplot as plt
from PIL import Image
import cv2
# from object_detection.utils import ops as utils_ops
from object_detection.utils import label_map_util
import imutils
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# print('pwd: ' + os.getcwd())
'''
## Variables
Any model exported using the `export_inference_graph.py` tool can be loaded here simply
by changing `PATH_TO_FROZEN_GRAPH` to point to a new .pb file.
By default we use an "SSD with Mobilenet" model here.
See the [detection model zoo]
(https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md)
for a list of other models that can be run out-of-the-box with varying speeds and accuracies.
'''
print('Have you already satisfied the protobuf requirement? Check Wiki.')
os.chdir(r'../../../data/TensorFlow/workspace/training_demo/')
# print('Changed to: ' + os.getcwd())
# Path to frozen detection graph. This is the actual model that is used for the object detection.
# PATH_TO_FROZEN_GRAPH = MODEL_NAME + '/frozen_inference_graph.pb'
PATH_TO_FROZEN_GRAPH = 'trained-inference-graphs/output_inference_graph_v1.pb/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
# PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
PATH_TO_LABELS = 'annotations/label_map.pbtxt'
# Loading graph into memory
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Loading label map
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
'''
rh: right hand
lh: left hand
ll: left label
rl: right label
'''
# Detection
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
# PATH_TO_TEST_IMAGES_DIR = 'test_images'
PATH_TO_TEST_IMAGES_DIR = '../../../train/XR_HAND'
TEST_IMAGE_PATHS = []
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
# loading files list
for r, d, f in os.walk(PATH_TO_TEST_IMAGES_DIR):
for file in f:
if '.png' in file:
# os.remove(os.path.join(r,file))
TEST_IMAGE_PATHS.append(os.path.join(r, file))
total_files = len(TEST_IMAGE_PATHS)
# This is a fucntion which detects the stuff (like hands etc and then returns a dictionary)
def run_inference_for_single_image(image, graph):
with graph.as_default():
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in ['num_detections', 'detection_boxes', 'detection_scores', 'detection_classes']:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict, feed_dict={image_tensor: image})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.int64)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
return output_dict
# Looping through all images
log = open('../../../../../hand_detection_script_log.txt', 'w')
# this file is one folder behind x_ray folder
print('Starting Script. Check log file at one directory behind your git folder.')
log.write('Starting script.\n')
j = 0
count = 0
SAVE_PATH = r'../../../train/XR_HAND_CENTRED_NEW_2'
if not os.path.exists(os.path.exists(SAVE_PATH)):
os.mkdir(SAVE_PATH)
for image_path in TEST_IMAGE_PATHS:
count += 1
# print(count,end='\r')
log.write(str(count) + ' ')
image = Image.open(image_path)
image_rotate = image.transpose(Image.ROTATE_270)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = cv2.imread(image_path, 1)
image_np_rotate = imutils.rotate_bound(image_np, 90)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_np_rotate_expanded = np.expand_dims(image_np_rotate, axis=0)
# Actual detection.
output_dict = run_inference_for_single_image(image_np_expanded, detection_graph)
output_dict_rotate = run_inference_for_single_image(image_np_rotate_expanded, detection_graph)
# Visualization of the results of a detection.
boxes = output_dict['detection_boxes']
boxes_rotate = output_dict_rotate['detection_boxes']
if output_dict_rotate['num_detections'] > output_dict['num_detections']:
image = image_rotate
image_np = image_np_rotate
boxes = boxes_rotate
bool_anything_found = 0
detection_number = 0
for i in range(output_dict['num_detections']):
if(output_dict['detection_scores'][i] > 0.70):
j += 1
detection_number += 1
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
im_width, im_height = image_pil.size
(left, right, top, bottom) = (boxes[i][1] * im_width, boxes[i][3] * im_width,
boxes[i][0] * im_height, boxes[i][2] * im_height)
# plt.figure(j,figsize=IMAGE_SIZE)
# plt.plot([left,right],[bottom,top],linewidth=1.0)
# plt.imshow(image_np)
# check if it is a label
if(output_dict['detection_classes'][i] == 3 or output_dict['detection_classes'][i] == 4):
'''
This code can be used to paint labels, however, it is not implemented
mask=np.zeros(image_np.shape,dtype='uint8')
mask[int(top):int(bottom+top), int(left):int(left+right)]=image_np[int(top):int(bottom+top), int(left):int(left+right)]
mask[:int(top)]
'''
# j+=1
# plt.figure(j,figsize=IMAGE_SIZE)
# plt.imshow(mask)
# inpainted_image=cv2.inpaint(image_np,mask,3,cv2.INPAINT_TELEA)
# cv2.imshow(inpainted_image)
# print('Label', end='\r')
pass
# if it is not a label
# will only come here if score>70% and not a label
else:
bool_anything_found = 1
j = j + 1
crop_img = image_np[int(top):int(bottom + top), int(left):int(left + right)]
# plt.figure(j,figsize=IMAGE_SIZE)
# plt.imshow(crop_img)
IMAGE_PATH_DIR = os.path.join(SAVE_PATH, image_path.split('/')[-3], image_path.split('/')[-2])
if not os.path.exists(IMAGE_PATH_DIR):
os.makedirs(IMAGE_PATH_DIR)
IMAGE_PATH_NEW = IMAGE_PATH_DIR + '/' + image_path.split('/')[-1][:-4] + r'_cropped_' + str(detection_number) + '.png'
cv2.imwrite(IMAGE_PATH_NEW, crop_img)
log.flush()
if(not bool_anything_found):
# print('Nothing found in this image')
# save the image as it is
IMAGE_PATH_DIR = os.path.join(SAVE_PATH, image_path.split('/')[-3], image_path.split('/')[-2])
if not os.path.exists(IMAGE_PATH_DIR):
os.makedirs(IMAGE_PATH_DIR)
IMAGE_PATH_NEW = IMAGE_PATH_DIR + '/' + image_path.split('/')[-1][:-4] + r'_undetected.png'
cv2.imwrite(IMAGE_PATH_NEW, image_np)
# plt.figure(j,figsize=IMAGE_SIZE)
# plt.imshow(image_np)
pass
log.write('\nFertig.')
log.close()
| StarcoderdataPython |
4803488 | # ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the reproman package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
# NOTE: The singularity classes SingularitySession and PTYSingularitySession
# are tested in test_session.test_session_abstract_methods()
import os.path as op
import pytest
import logging
import uuid
import re
from ..singularity import Singularity, SingularitySession
from ...cmd import Runner
from ...tests.skip import mark
from ...utils import swallow_logs
def test_singularity_resource_image_required():
with pytest.raises(TypeError):
Singularity(name='foo')
@pytest.mark.xfail(reason="Singularity Hub is down", run=False)
@mark.skipif_no_network
@mark.skipif_no_singularity
def test_singularity_resource_class(tmpdir):
tmpdir = str(tmpdir)
with swallow_logs(new_level=logging.DEBUG) as log:
Runner(cwd=tmpdir).run(
['singularity', 'pull', '--name', 'img',
'shub://truatpasteurdotfr/singularity-alpine'])
# ATTN: Apparently an instance name can't contain a hyphen.
name = "reproman_test_{}".format(str(uuid.uuid4())[:4])
image = op.join(tmpdir, 'img')
# Test creating a new singularity container instance.
resource = Singularity(name=name, image=image)
assert resource.name == name
assert resource.image == image
resource.connect()
assert resource.id is None
assert resource.status is None
list(resource.create())
to_delete = [resource]
try:
assert resource.id.startswith(name + "-")
assert resource.status == 'running'
# Test trying to create an already running instance.
resource_duplicate = Singularity(name=name, image=image)
resource_duplicate.connect()
assert resource_duplicate.id.startswith(name + "-")
assert resource_duplicate.status == 'running'
list(resource_duplicate.create())
assert "Resource '{}' already exists".format(name) in log.out
# But using a different name with the same image would work.
resource_nondup = Singularity(name=name + "_nondup", image=image)
resource_nondup.connect()
resource_nondup.name = name + "_nondup"
to_delete.append(resource_nondup)
# Test retrieving instance info.
info = resource.get_instance_info()
assert info['name'] == name
assert re.match(r'^\d+$', info['pid'])
info["image"] = image
# Test starting an instance.
with pytest.raises(NotImplementedError):
resource.start()
# Test stopping an instance.
with pytest.raises(NotImplementedError):
resource.stop()
# Test getting a resource session.
session = resource.get_session()
assert isinstance(session, SingularitySession)
finally:
# Test deleting an instance, but do it here to try to
# unregister the test instance even if a check above fails.
for res in to_delete:
res.delete()
assert resource.id is None
assert resource.status is None
# Test retrieving info from a non-existent instance.
info = resource.get_instance_info()
assert info is None
| StarcoderdataPython |
194722 | <reponame>zopefoundation/grokcore.view<filename>src/grokcore/view/tests/base/view/templatedirectory_with_path_sep_fixture.py
"""
This should fail because you can not use path separator in templatedir
directive.
"""
import grokcore.view as grok
import os.path
grok.templatedir('templatedirectoryname' + os.path.sep + 'subdirname')
| StarcoderdataPython |
1702798 | <filename>aydin/nn/pytorch/lucyrichardson/lucyrichardson.py<gh_stars>10-100
import numpy
import torch
import torch.nn.functional as F
def richardson_lucy_pytorch(image, psf, iterations=50, clip=True, donut=False):
use_cuda = True
device_index = 0
device = torch.device(f"cuda:{device_index}" if use_cuda else "cpu")
# print(f"Using device: {device}")
image = image.astype(numpy.float)
psf = psf.astype(numpy.float)
im_deconv = numpy.full(image.shape, image.mean())
psf_mirror = psf[::-1, ::-1].copy()
psf_size = psf_mirror.shape[0]
image = (
torch.from_numpy(image[numpy.newaxis, numpy.newaxis, ...]).float().to(device)
)
psf = torch.from_numpy(psf[numpy.newaxis, numpy.newaxis, ...]).float().to(device)
psf_mirror = (
torch.from_numpy(psf_mirror[numpy.newaxis, numpy.newaxis, ...])
.float()
.to(device)
)
im_deconv = (
torch.from_numpy(im_deconv[numpy.newaxis, numpy.newaxis, ...])
.float()
.to(device)
)
for _ in range(iterations):
pad = (psf_size - 1) // 2
convolved = F.conv2d(F.pad(im_deconv, (pad, pad, pad, pad), "reflect"), psf)
relative_blur = image / convolved
im_deconv *= F.conv2d(
F.pad(relative_blur, (pad, pad, pad, pad), "reflect"), psf_mirror
)
if clip:
im_deconv[im_deconv > 1] = 1
im_deconv[im_deconv < 0] = 0
return im_deconv.detach().cpu().numpy().squeeze()
| StarcoderdataPython |
3252352 | <reponame>DOAJ/doaj
from copy import deepcopy
from portality.lib import swagger
from portality.lib.seamless import SeamlessMixin
from portality.models.v2.shared_structs import JOURNAL_BIBJSON
_SHARED_STRUCT = JOURNAL_BIBJSON
class OutgoingCommonJournalApplication(SeamlessMixin, swagger.SwaggerSupport):
"""
~~APIOutgoingCommonJournalApplication:Model->Seamless:Library~~
"""
@classmethod
def from_model(cls, journal_or_app):
d = deepcopy(journal_or_app.data)
# Prevent the field from appearing in the outgoing API models
if d.get("bibjson", {}).get("oa_start"):
del d["bibjson"]["oa_start"]
return cls(d)
| StarcoderdataPython |
152551 | <gh_stars>0
import re
import os
import sys
import pprint
import itertools
from os.path import splitext
from collections import Counter, defaultdict, namedtuple
from multiprocessing import Pool, Process, JoinableQueue
from code_clippy_dataset.utils import infer_source_from_data_dir, load_dataset_infer
import tqdm
import humanize
import numpy as np
from datasets import load_dataset, load_from_disk
from transformers import GPT2TokenizerFast, PreTrainedTokenizerFast
from tokenizers import ByteLevelBPETokenizer
import sentencepiece
from hacky_linguist import COMMON_LANGUAGES, EXTENSION_TO_LANGUAGE
# HF: Whether or not to add an initial space to the input. This allows to treat
# the leading word just as any other word. (GPT2 tokenizer detect beginning of
# words by the preceding space).
ADD_PREFIX_SPACE = False
#LANGUAGE_SOURCES = ['repo_language', 'guesslang', 'filename_extension']
#LANGUAGE_SOURCES = ['repo_language', 'filename_extension', 'linguist']
LANGUAGE_SOURCES = ['linguist']
POSSIBLE_TOKENIZERS = ["gpt2", "codet5", "bpe", "bpe_psno-False", "bpe_psno-True", "bpe_rn", "sentencepiece", "sentencepiece_rn"]
from train_multicorp_tokenizer import NEWLINE_REP
class Worker(Process):
def __init__(self, index, input_queue, output_queue, progress_bar=None, tokenizer_names=POSSIBLE_TOKENIZERS, **kwargs):
super().__init__(**kwargs)
self.index = index
self.input_queue = input_queue
self.output_queue = output_queue
self.progress_bar = progress_bar
self.tokenizer_names = tokenizer_names[:]
for tn in tokenizer_names:
assert tn in POSSIBLE_TOKENIZERS, f"invalid tokenizer {tn}"
def run(self):
#print(f"worker {self.index} starting")
if 'guesslang' in LANGUAGE_SOURCES:
from guesslang import Guess
guess = Guess()
else:
guess = None
# guess = None
tokenizers = {}
if "gpt2" in self.tokenizer_names:
tokenizers["gpt2"] = GPT2TokenizerFast.from_pretrained("gpt2", add_prefix_space=ADD_PREFIX_SPACE)
if "codet5" in self.tokenizer_names:
codet5_tokenizer_model = ByteLevelBPETokenizer.from_file(
"../CodeT5/tokenizer/salesforce/codet5-vocab.json",
"../CodeT5/tokenizer/salesforce/codet5-merges.txt"
)
codet5_tokenizer_model.add_special_tokens([
"<pad>",
"<s>",
"</s>",
"<unk>",
"<mask>"
])
tokenizers["codet5"] = PreTrainedTokenizerFast(tokenizer_object=codet5_tokenizer_model)
if "bpe" in self.tokenizer_names:
our_tokenizer_model = ByteLevelBPETokenizer.from_file(
"tokenizers/github-py+so_bpe_rn-False/vocab.json",
"tokenizers/github-py+so_bpe_rn-False/merges.txt",
pretokenizer_split_newlines_only=True,
)
tokenizers["bpe"] = PreTrainedTokenizerFast(tokenizer_object=our_tokenizer_model)
if "bpe_psno-False" in self.tokenizer_names:
our_tokenizer_model = ByteLevelBPETokenizer.from_file(
"tokenizers/github-py+so_psno-False/vocab.json",
"tokenizers/github-py+so_psno-False/merges.txt",
pretokenizer_split_newlines_only=False,
)
tokenizers["bpe_psno-False"] = PreTrainedTokenizerFast(tokenizer_object=our_tokenizer_model)
if "bpe_psno-True" in self.tokenizer_names:
our_tokenizer_model = ByteLevelBPETokenizer.from_file(
"tokenizers/github-py+so_psno-True/vocab.json",
"tokenizers/github-py+so_psno-True/merges.txt",
pretokenizer_split_newlines_only=True,
)
tokenizers["bpe_psno-True"] = PreTrainedTokenizerFast(tokenizer_object=our_tokenizer_model)
if "bpe_rn" in self.tokenizer_names:
our_tokenizer_model_rn = ByteLevelBPETokenizer.from_file(
"tokenizers/github-py+so_bpe_rn-True/vocab.json",
"tokenizers/github-py+so_bpe_rn-True/merges.txt",
)
our_tokenizer_rn = PreTrainedTokenizerFast(tokenizer_object=our_tokenizer_model_rn)
def bpe_rn_tokenize(text):
text = text.replace("\n", NEWLINE_REP)
return our_tokenizer_rn(text)
tokenizers["bpe_rn"] = bpe_rn_tokenize
if "sentencepiece" in self.tokenizer_names:
sp_tokenizer = sentencepiece.SentencePieceProcessor()
SPLIT_LINES = re.compile(f'.*[\r\n]+')
sp_tokenizer.Load("tokenizers/github-py+so_spm-rn-False.model")
def sp_tokenize(text):
pieces = re.findall(SPLIT_LINES, text)
if not pieces:
pieces = [text]
return {
'input_ids': [token for piece in pieces for token in sp_tokenizer.EncodeAsIds(piece)]
}
tokenizers["sentencepiece"] = sp_tokenize
if "sentencepiece_rn" in self.tokenizer_names:
sp_tokenizer_rn = sentencepiece.SentencePieceProcessor()
sp_tokenizer.Load("tokenizers/github-py+so_spm-rn-True.model")
def sp_tokenize_rn(text):
text = text.replace("\n", NEWLINE_REP)
return {
'input_ids': sp_tokenizer_rn.EncodeAsIds(text)
}
tokenizers["sentencepiece_rn"] = sp_tokenize_rn
while True:
x = self.input_queue.get()
if self.progress_bar:
with self.progress_bar.get_lock():
self.progress_bar.update(1)
if x is None:
self.output_queue.put(None)
self.input_queue.task_done()
break
tabulated = self.tabulate_single(guess, tokenizers, x)
self.output_queue.put((x, tabulated))
self.input_queue.task_done()
print(f"worker {self.index} ending")
@staticmethod
def tabulate_single(guess, tokenizers, x):
d = {}
for ls in LANGUAGE_SOURCES:
if ls == 'repo_language':
found_language = False
for key in ['repo_language', 'main_language']:
if not found_language and key in x:
d[ls] = x[key]
found_language = True
assert found_language
elif ls == 'guesslang':
d[ls] = guess.language_name(x['text'])
elif ls == 'filename_extension' or ls == 'linguist':
fname_toks = splitext(x['file_name'])
if len(fname_toks) > 1:
if ls == 'filename_extension':
d[ls] = fname_toks[-1]
elif ls == 'linguist':
d[ls] = EXTENSION_TO_LANGUAGE[fname_toks[-1]]
else:
d[ls] = None
d['file_size'] = len(x['text'])
for tokenizer_name, tokenizer in tokenizers.items():
tokens = tokenizer(x['text'])['input_ids']
token_count = len(tokens)
d[f'{tokenizer_name}_token_count'] = token_count
# d[f'{tokenizer_name}_above_1024'] = 1.0 if token_count > 1024 else 0.0
# d[f'{tokenizer_name}_above_2048'] = 1.0 if token_count > 2048 else 0.0
# d[f'{tokenizer_name}_lost_1024'] = max(token_count - 1024, 0)
# d[f'{tokenizer_name}_lost_2048'] = max(token_count - 2048, 0)
return d
def foo(train):
data = []
for x in train:
data.append(x)
if len(data) >= 100:
break
return tabulate(data, n_procs=4)
def readable(x, is_size=False):
if isinstance(x, float):
return f"{x:.2f}"
else:
if is_size:
return humanize.naturalsize(x)
else:
return f"{x:_}"
class WeightedSum:
def __init__(self):
self.sum = 0
self.total_weight = 0
def add(self, value, weight=1.0):
self.sum += value
self.total_weight += weight
@property
def mean(self):
if self.total_weight > 0:
return float(self.sum) / self.total_weight
else:
return 0.0
def agg_token_counts(token_counts, agg_fn=np.sum, human_readable=True, limit=None, is_size=False):
def inner_display(d):
d['total'] = sum(d.values())
if human_readable:
return {k: readable(v, is_size=is_size) for k, v in Counter(d).most_common(limit)}
else:
return d
return {
language_source: inner_display({
language: agg_fn(values)
for language, values in inner.items()
})
for language_source, inner in token_counts.items()
}
def size_counter_to_human_readable(f_counter, limit=None):
return {k: humanize.naturalsize(v) for k, v in f_counter.most_common(limit)}
def display_counts(file_counts, tabulated, sum_stats, mean_stats):
pprint.pprint(file_counts)
for stat in sum_stats:
print(f"{stat} sum:")
pprint.pprint(agg_token_counts(tabulated[stat], lambda ws: ws.sum, is_size='size' in stat))
for stat in mean_stats:
print(f"{stat} mean:")
pprint.pprint(agg_token_counts(tabulated[stat], lambda ws: ws.mean, is_size='size' in stat))
def tabulate(data, tokenizer_names, n_procs=10, max_items=None):
file_counts = defaultdict(Counter)
# tokenizer, language source, language
tabulated = defaultdict(lambda: defaultdict(lambda: defaultdict(WeightedSum)))
sum_stats = ['file_size']
mean_stats = ['file_size']
if tokenizer_names:
for tokenizer in tokenizer_names:
sum_stats.append(f'{tokenizer}_token_count')
mean_stats.append(f'{tokenizer}_token_count')
# sum_stats.append(f'{tokenizer}_lost_1024')
# sum_stats.append(f'{tokenizer}_lost_2048')
# mean_stats.append(f'{tokenizer}_above_1024')
# mean_stats.append(f'{tokenizer}_above_2048')
all_stats = list(set(sum_stats) | set(mean_stats))
in_queue, out_queue = JoinableQueue(), JoinableQueue()
workers = []
running_count = 0
for i in range(n_procs):
# signal to stop
worker = Worker(i, in_queue, out_queue, tokenizer_names=tokenizer_names)
worker.start()
running_count += 1
workers.append(worker)
if max_items is not None:
data = itertools.islice(data, max_items)
num_items = max_items
else:
num_items = len(data)
num_jobs = 0
for x in tqdm.tqdm(data, ncols=80, desc="loading queue"):
in_queue.put(x)
num_jobs += 1
for i in range(n_procs):
in_queue.put(None)
file_count = 0
with tqdm.tqdm(total=num_jobs, ncols=80, desc="processing") as progress_bar:
while num_jobs > 0:
r = out_queue.get()
if r is None:
running_count -= 1
#print(f"running count: {running_count}")
out_queue.task_done()
continue
datum, this_tabulated = r
num_jobs -= 1
for language_source in LANGUAGE_SOURCES:
language = this_tabulated[language_source]
file_counts[language_source][language] += 1
#for tokenizer in ['gpt2', 'codet5', 'ours']:
for stat in all_stats:
tabulated[stat][language_source][language].add(this_tabulated[stat], 1)
progress_bar.update(1)
out_queue.task_done()
file_count += 1
if file_count % 100000 == 0:
print(f"total files: {file_count}")
display_counts(file_counts, tabulated, sum_stats, mean_stats)
print()
#[worker.join() for worker in workers]
# print("calling inqueue.join")
# in_queue.join()
# print("calling outqueue.join")
# out_queue.join()
print("done")
return file_counts, tabulated, sum_stats, mean_stats
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("data_dir")
#parser.add_argument("--source", default='github')
parser.add_argument("--num_items", type=int)
parser.add_argument("--n_procs", type=int, default=10)
parser.add_argument("--tokenizer_names", nargs='*', choices=POSSIBLE_TOKENIZERS, default=POSSIBLE_TOKENIZERS)
args = parser.parse_args()
print(' '.join(sys.argv))
data_dir = args.data_dir
# datasets are hashed based on arguments, which are sensitive to trailing dashes (even though loading is not)
data_dir = data_dir.rstrip("/")
print(data_dir)
dataset = load_dataset_infer(data_dir)
file_counts, tabulated, sum_stats, mean_stats = tabulate(
dataset, tokenizer_names=args.tokenizer_names, n_procs=args.n_procs, max_items=args.num_items
)
display_counts(file_counts, tabulated, sum_stats, mean_stats)
| StarcoderdataPython |
1696279 | <gh_stars>0
#!/usr/bin/env python
# coding=utf8
from copy import deepcopy
"""
Important: Class was copy-pasted in cause of using it in study.
Normal use case suppose to import that class
"""
class Stack:
def __init__(self):
self.stack = []
def size(self):
return len(self.stack)
def pop(self):
if not self.stack:
return None
else:
value = deepcopy(self.stack[0])
del self.stack[0]
return value
def push(self, value):
self.stack.insert(0, value)
def peek(self):
if not self.stack:
return None
else:
return self.stack[0]
def check_brackets_in_string(check_str):
# Creating stack for provide string validation
stack = Stack()
for element in check_str:
if element == "(":
stack.push(1)
elif element == ")" and stack.size() > 0:
stack.pop()
else:
return False
return stack.size() == 0
| StarcoderdataPython |
1761372 | # coding: utf-8
import numpy
from plyfile import PlyData, PlyElement
def ply_autocut(inputfile, outputfile, percent):
plydata = PlyData.read(inputfile)
print (plydata['vertex'].dtype)
x = plydata['vertex']['x'].copy()
y = plydata['vertex']['y'].copy()
z = plydata['vertex']['z'].copy()
nx = plydata['vertex']['nx'].copy()
ny = plydata['vertex']['ny'].copy()
nz = plydata['vertex']['nz'].copy()
r = plydata['vertex']['red'].copy()
g = plydata['vertex']['green'].copy()
b = plydata['vertex']['blue'].copy()
a = plydata['vertex']['alpha'].copy()
# rmat = [[1, 0, 0], [0, 0, -1], [0, 1, 0]]
xyz = numpy.array([x,y,z])
xarr = numpy.array([x])
yarr = numpy.array([y])
zarr = numpy.array([z])
nxarr = numpy.array([nx])
nyarr = numpy.array([ny])
nzarr = numpy.array([nz])
normal = numpy.array([nx,ny,nz])
xmax = numpy.percentile(xarr, 100-percent)
xmin = numpy.percentile(xarr, percent)
ymax = numpy.percentile(yarr, 100-percent)
ymin = numpy.percentile(yarr, percent)
zmax = numpy.percentile(zarr, 100-percent)
zmin = numpy.percentile(zarr, percent)
idx = numpy.where(\
(xarr >= xmin) & (xarr <= xmax) & \
(yarr >= ymin) & (yarr <= ymax) & \
(zarr >= zmin) & (zarr <= zmax))
idx = idx[1]
# print(idx)
# res_xyz = numpy.array([xarr[idx],yarr[idx],zarr[idx]])
# res_normal = numpy.array([nxarr[idx],nyarr[idx],nzarr[idx]])
vertex_all = numpy.zeros(idx.shape[0], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'), ('red', 'uint8'), ('green', 'uint8'), ('blue', 'uint8'), ('alpha', 'uint8')])
# vertex = numpy.array(res_xyz.shape[0], dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])
# vertex = numpy.zeros(res_xyz.shape[1], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
# normply = numpy.zeros(res_xyz.shape[1], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
for i in range(idx.shape[0]):
vertex_all[i] = plydata['vertex'][idx[i]]
# vertex_all[i] = (res_xyz[0][i], res_xyz[1][i], res_xyz[2][i], res_normal[0][i], res_normal[1][i], res_normal[2][i], r[i], g[i], b[i], a[i])
# normply[i] = (res_normal[0][i], res_normal[1][i], res_normal[2][i])
# el = PlyElement.describe(vertex, 'vertex', comments=['vertices'])
points = PlyElement.describe(vertex_all, 'vertex', comments=['vertices']) #PlyElement.describe(np.array(res_xyz,dtype=[......]),'vertex')
# normals = PlyElement.describe(normply,'normals')
# plydata['vertex']['x'] = res_xyz[0,:]
# plydata['vertex']['y'] = res_xyz[1,:]
# plydata['vertex']['z'] = res_xyz[2,:]
# plydata['vertex']['nx'] = res_normal[0,:]
# plydata['vertex']['ny'] = res_normal[1,:]
# plydata['vertex']['nz'] = res_normal[2,:]
print("Write to ", outputfile, "\nwith rotation matrix")
# points.write(outputfile)
PlyData([points]).write(outputfile)
# PlyData.write(plydata, outputfile)
def cut_ply(inputfile, outputfile, axis, lower, upper):
plydata = PlyData.read(inputfile)
print (plydata['vertex'].dtype)
x = plydata['vertex']['x'].copy()
y = plydata['vertex']['y'].copy()
z = plydata['vertex']['z'].copy()
nx = plydata['vertex']['nx'].copy()
ny = plydata['vertex']['ny'].copy()
nz = plydata['vertex']['nz'].copy()
r = plydata['vertex']['red'].copy()
g = plydata['vertex']['green'].copy()
b = plydata['vertex']['blue'].copy()
a = plydata['vertex']['alpha'].copy()
# rmat = [[1, 0, 0], [0, 0, -1], [0, 1, 0]]
xyz = numpy.array([x,y,z])
xarr = numpy.array([x])
yarr = numpy.array([y])
zarr = numpy.array([z])
nxarr = numpy.array([nx])
nyarr = numpy.array([ny])
nzarr = numpy.array([nz])
normal = numpy.array([nx,ny,nz])
idx = numpy.where((zarr >= lower) & (zarr <= upper))
idx = idx[1]
# print(idx)
# res_xyz = numpy.array([xarr[idx],yarr[idx],zarr[idx]])
# res_normal = numpy.array([nxarr[idx],nyarr[idx],nzarr[idx]])
vertex_all = numpy.zeros(idx.shape[0], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'), ('red', 'uint8'), ('green', 'uint8'), ('blue', 'uint8'), ('alpha', 'uint8')])
# vertex = numpy.array(res_xyz.shape[0], dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])
# vertex = numpy.zeros(res_xyz.shape[1], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
# normply = numpy.zeros(res_xyz.shape[1], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
for i in range(idx.shape[0]):
vertex_all[i] = plydata['vertex'][idx[i]]
# vertex_all[i] = (res_xyz[0][i], res_xyz[1][i], res_xyz[2][i], res_normal[0][i], res_normal[1][i], res_normal[2][i], r[i], g[i], b[i], a[i])
# normply[i] = (res_normal[0][i], res_normal[1][i], res_normal[2][i])
# el = PlyElement.describe(vertex, 'vertex', comments=['vertices'])
points = PlyElement.describe(vertex_all, 'vertex', comments=['vertices']) #PlyElement.describe(np.array(res_xyz,dtype=[......]),'vertex')
# normals = PlyElement.describe(normply,'normals')
# plydata['vertex']['x'] = res_xyz[0,:]
# plydata['vertex']['y'] = res_xyz[1,:]
# plydata['vertex']['z'] = res_xyz[2,:]
# plydata['vertex']['nx'] = res_normal[0,:]
# plydata['vertex']['ny'] = res_normal[1,:]
# plydata['vertex']['nz'] = res_normal[2,:]
print("Write to ", outputfile, "\nwith rotation matrix")
# points.write(outputfile)
PlyData([points]).write(outputfile)
# PlyData.write(plydata, outputfile)
if __name__ == "__main__":
cut_ply("/media/netease/Dataset/LargeScene/Scene/Wanxiangcheng/pointclouds/F2_dense_gz_f2_cut.ply",
"/media/netease/Dataset/LargeScene/Scene/Wanxiangcheng/pointclouds/F2_dense_gz_f2_cut_2.ply", "z", 3, 8.0) | StarcoderdataPython |
47638 | import olll
import numpy as np
test1 = [[1,0,0,1,1,0,1],[0,1,0,5,0,0,0],[0,0,1,0,5,0,5]]
test2 = [[1,0,0,2,-1,1],[0,1,0,3,-4,-2],[0,0,1,5,-10,-8]]
test3 = [[1,0,0,1,1,0,1], [0,1,0,4,-1,0,-1], [0,0,1,1,1,0,1]]
test4 = [[1,0,0,2,5,3],[0,1,0,1,1,1,],[0,0,1,4,-2,0]]
test5 = [[1,0,0,0,0,0,2,1,1,2],[0,1,0,0,0,0,1,1,-1,-1],[0,0,1,0,0,0,-1,0,-2,-3],[0,0,0,1,0,0,1,-1,1,-1],[0,0,0,0,1,0,-1,2,-4,-3],[0,0,0,0,0,1,1,0,0,1]]
test6 = [[1, 0, 0, 5, 0, 0, 0],[0, 1, 0, 0, 5, 0, 5],[0, 0, 1, 1, 1, 0, 1]]
test7 = [[1, 0, 0, 20, 0, 0, 0],[0, 1, 0, 0, 20, 0, 20],[0, 0, 1, 4, 4, 0, 4]]
test8 = [[1, 0, 0, 10, 0, 0, 0],[0, 1, 0, 0, 10, 0, 10],[0, 0, 1, 2, 2, 0, 2]]
n = input("Please enter n: \n")
n = int(n)
k = input("Please enter k: \n")
k = int(k)
p = input("Please enter p: \n")
p = int(p)
id = np.identity(k)
A = [[]] * k
print("Please enter the generating set:\n")
for i in range(k):
print("\nEnter the generator a[",i,"]: ")
a = list(map(int,input().strip().split()))[:n]
#print(i, a)
a = [x * (2**p) for x in a]
y = list(id[i])
print(y[i],type(y[i]))
A[i] = y+a
print(A[i], type(A[i]))
print(A, type(A))
print(test7, type(test7))
rb = olll.reduction(test7,0.75)
print("Basis: ", rb)
| StarcoderdataPython |
3247888 | import os
import numpy as np
import collections
import megengine.module as M
import megengine.functional as F
import megengine as mge
from megengine.data.dataset import Dataset
from megengine.data import DataLoader
import hparams as hp
from megengine.data import Collator
class AsrDataset(Dataset):
def __init__(self, data_set="train"):
"""
Args:
root_dir (string): Directory with all the spectrograms.
"""
self.metas = self.load_metas(hp.dataset_root, data_set)
def load_metas(self, root, data_set): # fix a bug
metas = []
with open(os.path.join(root, f"{data_set}.txt")) as f:
for line in f.readlines():
info = line.split("|")
metas.append(
{
"mel_path": os.path.join(root, info[0]),
"frames": info[1],
"token_ids_str": info[2],
"speaker": info[3],
}
)
return metas
def __len__(self):
return len(self.metas)
def __getitem__(self, idx):
meta = self.metas[idx]
token_ids = [int(i) for i in meta["token_ids_str"].split(" ")]
text = np.array(token_ids, dtype=np.int32)
mel = np.load(meta["mel_path"])
text_input = text[:-1]
text_output = text[1:]
text_length = text_input.shape[0]
pos_text = np.arange(1, text_length + 1)
pos_mel = np.arange(1, mel.shape[0] + 1)
return {
"text": text,
"text_input": text_input,
"text_output": text_output,
"text_length": text_length,
"mel": mel,
"pos_mel": pos_mel,
"pos_text": pos_text,
}
class AsrCollator(Collator):
def __init__(self, pad_value: float = 0.0):
super().__init__()
self.pad_value = pad_value
def apply(self, batch):
# Puts each data field into a tensor with outer dimension batch size
if isinstance(batch[0], collections.Mapping):
text = [d["text"] for d in batch]
text_input = [d["text_input"] for d in batch]
text_output = [d["text_output"] for d in batch]
text_length = [d["text_length"] for d in batch]
mel = [d["mel"] for d in batch]
mel_length = [d["mel"].shape[0] for d in batch]
pos_mel = [d["pos_mel"] for d in batch]
pos_text = [d["pos_text"] for d in batch]
text = [
i
for i, _ in sorted(
zip(text, mel_length), key=lambda x: x[1], reverse=True
)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [
i
for i, _ in sorted(
zip(mel, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_text = [
i
for i, _ in sorted(
zip(pos_text, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_mel = [
i
for i, _ in sorted(
zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True
)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel)
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
mge.Tensor(text_output),
mge.Tensor(mel),
mge.Tensor(pos_text),
mge.Tensor(pos_mel),
mge.Tensor(text_length),
mge.Tensor(mel_length),
)
raise TypeError(
(
"batch must contain tensors, numbers, dicts or lists; found {}".format(
type(batch[0])
)
)
)
def collate_fn_transformer_test(batch):
# Puts each data field into a tensor with outer dimension batch size
# if isinstance(batch[0], collections.Mapping):
text = [batch["text"]] # for d in batch]
text_input = batch["text_input"]
text_output = batch["text_output"]
text_length = batch["text_length"]
mel = [batch["mel"]]
mel_length = [batch["mel"].shape[1]]
pos_mel = batch["pos_mel"]
pos_text = batch["pos_text"]
text = [
i for i, _ in sorted(zip(text, mel_length), key=lambda x: x[1], reverse=True)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [i for i, _ in sorted(zip(mel, mel_length), key=lambda x: x[1], reverse=True)]
pos_text = [
i
for i, _ in sorted(zip(pos_text, mel_length), key=lambda x: x[1], reverse=True)
]
pos_mel = [
i for i, _ in sorted(zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel[0])
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
mge.Tensor(text_output),
mge.Tensor(mel),
mge.Tensor(pos_text),
mge.Tensor(pos_mel),
mge.Tensor(text_length),
mge.Tensor(mel_length),
)
raise TypeError(
(
"batch must contain tensors, numbers, dicts or lists; found {}".format(
type(batch[0])
)
)
)
############################ Utils ###################################
def _pad_data(x, length):
_pad = 0
return np.pad(x, (0, length - x.shape[0]), mode="constant", constant_values=_pad)
def _prepare_data(inputs):
max_len = max((len(x) for x in inputs))
return np.stack([_pad_data(x, max_len) for x in inputs])
def _pad_mel(inputs):
_pad = 0
def _pad_one(x, max_len):
mel_len = x.shape[0]
return np.pad(
x, [[0, max_len - mel_len], [0, 0]], mode="constant", constant_values=_pad
)
max_len = max((x.shape[0] for x in inputs))
return np.stack([_pad_one(x, max_len) for x in inputs])
| StarcoderdataPython |
1665294 | <filename>pypy/lang/smalltalk/test/test_shadow.py
import random
from pypy.lang.smalltalk import model, shadow, constants
from pypy.lang.smalltalk import objspace
space = objspace.ObjSpace()
w_Object = space.classtable['w_Object']
w_Metaclass = space.classtable['w_Metaclass']
w_MethodDict = space.classtable['w_MethodDict']
w_Array = space.classtable['w_Array']
def build_methoddict(methods):
size = int(len(methods) * 1.5)
w_methoddict = w_MethodDict.as_class_get_shadow(space).new(size)
w_array = w_Array.as_class_get_shadow(space).new(size)
for i in range(size):
w_array.store(space, i, space.w_nil)
w_methoddict.store(space, constants.METHODDICT_NAMES_INDEX+i, space.w_nil)
w_tally = space.wrap_int(len(methods))
w_methoddict.store(space, constants.METHODDICT_TALLY_INDEX, w_tally)
w_methoddict.store(space, constants.METHODDICT_VALUES_INDEX, w_array)
positions = range(size)
random.shuffle(positions)
for selector, w_compiledmethod in methods.items():
pos = positions.pop()
w_selector = space.wrap_string(selector)
w_methoddict.store(space, constants.METHODDICT_NAMES_INDEX+pos, w_selector)
w_array.store(space, pos, w_compiledmethod)
return w_methoddict
def build_smalltalk_class(name, format, w_superclass=w_Object,
w_classofclass=None, methods={}):
if w_classofclass is None:
w_classofclass = build_smalltalk_class(None, 0x94,
w_superclass.w_class,
w_Metaclass)
w_methoddict = build_methoddict(methods)
size = constants.CLASS_NAME_INDEX + 1
w_class = model.W_PointersObject(w_classofclass, size)
w_class.store(space, constants.CLASS_SUPERCLASS_INDEX, w_superclass)
w_class.store(space, constants.CLASS_METHODDICT_INDEX, w_methoddict)
w_class.store(space, constants.CLASS_FORMAT_INDEX, space.wrap_int(format))
if name is not None:
w_class.store(space, constants.CLASS_NAME_INDEX, space.wrap_string(name))
return w_class
def basicshape(name, format, kind, varsized, instsize):
w_class = build_smalltalk_class(name, format)
classshadow = w_class.as_class_get_shadow(space)
assert classshadow.instance_kind == kind
assert classshadow.isvariable() == varsized
assert classshadow.instsize() == instsize
assert classshadow.name == name
assert classshadow.s_superclass() is w_Object.as_class_get_shadow(space)
def test_basic_shape():
yield basicshape, "Empty", 0x02, shadow.POINTERS, False, 0
yield basicshape, "Seven", 0x90, shadow.POINTERS, False, 7
yield basicshape, "Seventyseven", 0x1009C, shadow.POINTERS, False, 77
yield basicshape, "EmptyVar", 0x102, shadow.POINTERS, True, 0
yield basicshape, "VarTwo", 0x3986, shadow.POINTERS, True, 2
yield basicshape, "VarSeven", 0x190, shadow.POINTERS, True, 7
yield basicshape, "Bytes", 0x402, shadow.BYTES, True, 0
yield basicshape, "Words", 0x302, shadow.WORDS, True, 0
yield basicshape, "CompiledMeth", 0xE02, shadow.COMPILED_METHOD, True, 0
def test_methoddict():
methods = {'foo': model.W_CompiledMethod(0),
'bar': model.W_CompiledMethod(0)}
w_class = build_smalltalk_class("Demo", 0x90, methods=methods)
classshadow = w_class.as_class_get_shadow(space)
assert classshadow.s_methoddict().methoddict == methods
def method(tempsize=3,argsize=2, bytes="abcde"):
w_m = model.W_CompiledMethod()
w_m.bytes = bytes
w_m.tempsize = tempsize
w_m.argsize = argsize
w_m.literalsize = 2
return w_m
def methodcontext(w_sender=space.w_nil, pc=1, stackpointer=0, stacksize=5,
method=method()):
w_object = model.W_PointersObject(space.w_MethodContext, constants.MTHDCTX_TEMP_FRAME_START+method.tempsize+stacksize)
w_object.store(space, constants.CTXPART_SENDER_INDEX, w_sender)
w_object.store(space, constants.CTXPART_PC_INDEX, space.wrap_int(pc))
w_object.store(space, constants.CTXPART_STACKP_INDEX, space.wrap_int(method.tempsize+stackpointer))
w_object.store(space, constants.MTHDCTX_METHOD, method)
# XXX
w_object.store(space, constants.MTHDCTX_RECEIVER_MAP, '???')
w_object.store(space, constants.MTHDCTX_RECEIVER, 'receiver')
w_object.store(space, constants.MTHDCTX_TEMP_FRAME_START, 'el')
return w_object
def blockcontext(w_sender=space.w_nil, pc=1, stackpointer=1, stacksize=5,
home=methodcontext()):
w_object = model.W_PointersObject(space.w_MethodContext, constants.MTHDCTX_TEMP_FRAME_START+stacksize)
w_object.store(space, constants.CTXPART_SENDER_INDEX, w_sender)
w_object.store(space, constants.CTXPART_PC_INDEX, space.wrap_int(pc))
w_object.store(space, constants.CTXPART_STACKP_INDEX, space.wrap_int(stackpointer))
w_object.store(space, constants.BLKCTX_BLOCK_ARGUMENT_COUNT_INDEX, space.wrap_int(54))
w_object.store(space, constants.BLKCTX_INITIAL_IP_INDEX, space.wrap_int(17))
w_object.store(space, constants.BLKCTX_HOME_INDEX, home)
w_object.store(space, constants.BLKCTX_STACK_START, 'el')
return w_object
def test_context():
w_m = method()
w_object = methodcontext(stackpointer=3, method=w_m)
w_object2 = methodcontext(w_sender=w_object)
s_object = w_object.as_methodcontext_get_shadow(space)
assert len(s_object.stack()) == 3
s_object2 = w_object2.as_methodcontext_get_shadow(space)
assert w_object2.fetch(space, constants.CTXPART_SENDER_INDEX) == w_object
assert s_object.w_self() == w_object
assert s_object2.w_self() == w_object2
assert s_object.s_sender() == None
assert s_object2.s_sender() == s_object
assert s_object.w_receiver() == 'receiver'
s_object2.settemp(0, 'a')
s_object2.settemp(1, 'b')
assert s_object2.gettemp(1) == 'b'
assert s_object2.gettemp(0) == 'a'
assert s_object.w_method() == w_m
idx = s_object.stackstart()
w_object.store(space, idx, 'f')
w_object.store(space, idx + 1, 'g')
w_object.store(space, idx + 2, 'h')
assert s_object.stack() == ['f', 'g', 'h' ]
assert s_object.top() == 'h'
s_object.push('i')
assert s_object.top() == 'i'
assert s_object.peek(1) == 'h'
assert s_object.pop() == 'i'
assert s_object.pop_and_return_n(2) == ['g', 'h']
assert s_object.pop() == 'f'
assert s_object.external_stackpointer() == s_object.stackstart()
def test_methodcontext():
w_m = method()
# Point over 2 literals of size 4
w_object = methodcontext(pc=13,method=w_m)
s_object = w_object.as_methodcontext_get_shadow(space)
assert s_object.getbytecode() == 97
assert s_object.getbytecode() == 98
assert s_object.getbytecode() == 99
assert s_object.getbytecode() == 100
assert s_object.getbytecode() == 101
assert s_object.s_home() == s_object
def test_attach_detach_mc():
w_m = method()
w_object = methodcontext(pc=13, method=w_m)
old_vars = w_object._vars
s_object = w_object.as_methodcontext_get_shadow(space)
assert w_object._vars is None
s_object.detach_shadow()
assert w_object._vars == old_vars
assert w_object._vars is not old_vars
def test_attach_detach_bc():
w_object = blockcontext(pc=13)
old_vars = w_object._vars
s_object = w_object.as_blockcontext_get_shadow(space)
assert w_object._vars is None
s_object.detach_shadow()
assert w_object._vars == old_vars
assert w_object._vars is not old_vars
def test_replace_to_bc():
w_object = blockcontext(pc=13)
old_vars = w_object._vars
s_object = w_object.as_blockcontext_get_shadow(space)
s_object.detach_shadow()
s_classshadow = shadow.ClassShadow(space, w_object)
w_object._shadow = s_classshadow
s_classshadow.invalid = False
s_newobject = w_object.as_blockcontext_get_shadow(space)
assert s_classshadow.invalid
assert ([s_newobject.fetch(i) for i in range(s_newobject.size())] ==
[s_object.fetch(i) for i in range(s_newobject.size())])
assert w_object._shadow is s_newobject
| StarcoderdataPython |
4828004 | import pandas as pd
import numpy as np
import time
from scipy.sparse import csr_matrix
# TODO: make channel column optional
def edgelist_to_adjs(edgelist, nodelist=None):
edgecounts = edgelist.groupby(by=edgelist.columns.tolist(),
as_index=False).size().reset_index(name="count")
if "channel" in edgelist.columns:
channels = edgecounts.channel.unique()
if nodelist is None:
nodes = pd.concat([edgecounts.src, edgecounts.dst]).unique()
else:
nodes = nodelist.node
node_to_idx = {node: idx for idx, node in enumerate(nodes)}
edgecounts[["src", "dst"]] = edgecounts[["src", "dst"]].applymap(node_to_idx.get)
if "channel" in edgelist.columns:
adjs = []
for channel in channels:
ch_ec = edgecounts[edgecounts.channel==channel]
adjs.append(csr_matrix((ch_ec["count"], (ch_ec["src"], ch_ec["dst"])),
shape=(len(node_to_idx), len(node_to_idx))))
else:
# [None] is used since there are no channels
channels = [None]
# Short alias for edgecounts df for ease of typing
ec = edgecounts
adjs = [csr_matrix((ec["count"], (ec["src"], ec["dst"])),
shape=(len(node_to_idx), len(node_to_idx)))]
return nodelist, channels, adjs
def load_combo(filepath, *,
node_vs_edge_col=0,
node_str="v",
node_col=1,
label_col=None,
src_col=1,
dst_col=2,
channel_col=3,
**kwargs):
if label_col is None:
usecols = [node_vs_edge_col, node_col]
names = ["vs", "node"]
else:
usecols = [node_vs_edge_col, node_col, label_col]
names = ["vs", "node", "label"]
nodelist = pd.read_csv(filepath,
usecols=usecols,
names=names,
engine='python',
**kwargs)
# Get rid of the "vs" column
nodelist = nodelist[nodelist.vs == node_str][names[1:]]
if channel_col is None:
usecols = [node_vs_edge_col, src_col, dst_col]
names = ["vs", "src", "dst"]
else:
usecols = [node_vs_edge_col, src_col, dst_col, channel_col]
names = ["vs", "src", "dst", "channel"]
edgelist = pd.read_csv(filepath,
usecols=usecols,
names=names,
engine='python',
**kwargs)
edgelist = edgelist[edgelist.vs != node_str][names[1:]]
return edgelist_to_adjs(edgelist, nodelist)
def load_nodelist(filepath, *,
node_col=0,
label_col=None,
**kwargs):
if label_col is None:
usecols = [node_col]
names = ["node"]
else:
usecols = [node_col, label_col]
names = ["node", "label"]
return pd.read_csv(filepath,
usecols=usecols,
names=names,
engine='python',
**kwargs)
def load_edgelist(filepath, *,
nodelist=None,
src_col=0,
dst_col=1,
channel_col=2,
**kwargs):
if channel_col is None:
usecols = [src_col, dst_col]
names = ["src", "dst"]
else:
usecols = [src_col, dst_col, channel_col]
names = ["src", "dst", "channel"]
edgelist = pd.read_csv(filepath,
usecols=usecols,
names=names,
engine='python',
**kwargs)
return edgelist_to_adjs(edgelist, nodelist)
| StarcoderdataPython |
1773968 | <reponame>ecosystemai/ecosystem-notebooks<gh_stars>1-10
import dash
# import dash_table
# import dash_core_components as dcc
# import dash_html_components as html
from dash import dcc
from dash import html
from dash import dash_table
from dash.dependencies import State
import plotly.graph_objects as go
from datetime import date
from datetime import timedelta
import flask
import json
import base64
import os
import pandas as pd
import dash_bootstrap_components as dbc
import dash_trich_components as dtc
import dash_pivottable
import logging
import dateutil
import ecosystem_scoring_pdash
from dashboard import gv
from dashboard import functions
graphing_adv_refresh = False
continuous_empty = html.Div([
html.Label("Predictor type not acceptable.")
],
style={"height": "650px", "display": "none"},
id="continuous_empty_div"
)
continuous_wellness = html.Div([
html.Br(),
html.Div([
html.Label("Customer Data"),
html.Br(),
dbc.InputGroup(
[
dcc.Input(
id="wellness_upload_customer_data",
disabled=True,
style={"width": "60%"}
),
dbc.InputGroupAddon(
dcc.Upload(dbc.Button(html.I(className="fas fa-upload"), outline=True, color="primary"), id="wellness_customer_upload_picker", style={"display": "inline-block"}),
addon_type="append",
),
]
),
html.Br(),
],
style={"border": "1px solid #dee2e6", "padding": "5px"}
),
html.Br(),
dbc.Button("Process Uploads",
outline=True,
color="primary",
className="mr-1",
id="wellness_process_uploads_button"
)
],
style={"height": "650px", "display": "none"},
id="continuous_wellness_div"
)
continuous_spend_personality = html.Div([
html.Br(),
html.Div([
html.Label("Customer Data"),
html.Br(),
dbc.InputGroup(
[
dcc.Input(
id="spend_personality_upload_customer_data",
disabled=True,
style={"width": "60%"}
),
dbc.InputGroupAddon(
dcc.Upload(dbc.Button(html.I(className="fas fa-upload"), outline=True, color="primary"), id="spend_personality_customer_upload_picker", style={"display": "inline-block"}),
addon_type="append",
),
]
),
html.Br(),
html.Label("Transaction Data"),
html.Br(),
dbc.InputGroup(
[
dcc.Input(
id="spend_personality_upload_transaction_data",
disabled=True,
style={"width": "60%"}
),
dbc.InputGroupAddon(
dcc.Upload(dbc.Button(html.I(className="fas fa-upload"), outline=True, color="primary"), id="spend_personality_transaction_upload_picker", style={"display": "inline-block"}),
addon_type="append",
),
]
),
html.Br(),
html.Label("CTO Data"),
html.Br(),
dbc.InputGroup(
[
dcc.Input(
id="spend_personality_upload_cto_data",
disabled=True,
style={"width": "60%"}
),
dbc.InputGroupAddon(
dcc.Upload(dbc.Button(html.I(className="fas fa-upload"), outline=True, color="primary"), id="spend_personality_cto_upload_picker", style={"display": "inline-block"}),
addon_type="append",
),
]
),
html.Br(),
],
style={"border": "1px solid #dee2e6", "padding": "5px"}
),
html.Br(),
dbc.Button("Process Uploads",
outline=True,
color="primary",
className="mr-1",
id="spend_personality_process_uploads_button"
)
],
style={"height": "650px", "display": "none"},
id="continuous_spend_personality_div"
)
explore_component = html.Div([
html.Div([
dbc.Row(
[
dbc.Col(
html.Div([
dbc.Card(
dbc.CardBody([
html.Label(html.B("Use Case Details"), style={"margin-bottom": "0rem"}),
],
style={"padding": "0.75rem"}
),
),
html.Br(),
dbc.Card(
dbc.CardBody([
html.Div([
html.Label("Use Case"),
dcc.Dropdown(
id="usecase_dropdown",
clearable=False,
),
html.Br(),
dbc.Button("Test Connection", outline=True, color="primary", id="test_conn_button"),
html.Br(),
html.Br(),
html.Label("Find Filter"),
html.Br(),
dbc.InputGroup(
[
dcc.Input(
id="find_filter_input",
value="{}"
),
dbc.InputGroupAddon(
dbc.Button("Filter", outline=True, color="primary", id="filter_button"),
addon_type="append",
),
]
),
html.Br(),
html.Label("Customer"),
html.Div([
dbc.Checklist(
id="customer_list",
options = [],
style={"padding-left": "5px"}
),
],
style={"overflow-y": "scroll", "height": "180px", "border": "1px solid grey"}
),
html.Br(),
html.Label("Score Value"),
html.Br(),
dbc.InputGroup(
[
dcc.Input(
id="score_value_input",
value=""
),
dbc.InputGroupAddon(
dbc.Button("Score", outline=True, color="primary", id="score_button"),
addon_type="append",
),
]
),
html.Br(),
html.Label("", id="score_buffer", style={"display": "none"}),
dcc.Upload(
dbc.Button("Batch Score", outline=True, color="primary", className="mr-1"),
id="batch_score_picker",
style={"display": "inline-block"}
)
],
style={"height": "693px"}
)
]
)
)
],
# style={"border": "5px solid grey", "width": "100%"}
),
md=3
),
dbc.Col(
html.Div(
[
dbc.Card(
dbc.CardBody([
html.Label(html.B("Transaction Details"), style={"margin-bottom": "0rem"}),
],
style={"padding": "0.75rem"}
),
),
html.Br(),
dbc.Card(
dbc.CardBody([
html.Div([
html.Div([
dbc.ListGroup([], id="table_div", style={"overflow-y": "scroll", "max-height": "600px"})
],
)
],
style={"height": "693px"}
)
]
)
)
],
# style={"border": "5px solid grey", "width": "100%"}
),
md=5
),
dbc.Col(
html.Div(
[
dbc.Card(
dbc.CardBody([
html.Label(html.B("Upload Data"), style={"margin-bottom": "0rem"}),
],
style={"padding": "0.75rem"}
),
),
html.Br(),
dbc.Card(
dbc.CardBody(
dbc.Tabs([
dbc.Tab(
html.Div([
html.Br(),
html.Div([
html.Label("Use Case Name"),
html.Br(),
dcc.Input(
id="usecase_name",
style={"width": "100%"}
),
html.Br(),
html.Br(),
dbc.Row([
dbc.Col([
html.Label("Runtime URL"),
html.Br(),
dcc.Input(id="usecase_runtime_url")
]
),
html.Br(),
dbc.Col([
html.Label("API Endpoint"),
html.Br(),
dcc.Input(id="usecase_api_endpoint", style={"width": "100%"})
]
)
]
),
html.Br(),
html.Br(),
html.Label("Properties"),
dcc.Textarea(
id="properties_textarea",
style={"width": "100%", "height": "200px"},
),
html.Br(),
dcc.Upload(dbc.Button(html.I(className="fas fa-upload"), outline=True, color="primary"), id="upload_properties_picker", style={"display": "inline-block"}),
],
style={"border": "1px solid #dee2e6", "padding": "5px"}
),
html.Br(),
dbc.Button("Upload",
outline=True,
color="primary",
className="mr-1",
id="properties_button"
),
],
style={"height": "650px"}
),
label="Use Case",
tab_id="setup_properties"
),
dbc.Tab(
html.Div([
html.Br(),
html.Div([
html.Label("Database"),
html.Br(),
dcc.Input(id="upload_database"),
],
style={"border": "1px solid #dee2e6", "padding": "5px"}
),
html.Br(),
html.Div([
html.Label("Model"),
html.Br(),
dbc.InputGroup(
[
dcc.Input(
id="upload_model",
disabled=True,
style={"width": "60%"}
),
dbc.InputGroupAddon(
dcc.Upload(dbc.Button(html.I(className="fas fa-upload"), outline=True, color="primary"), id="upload_model_picker", style={"display": "inline-block"}),
addon_type="append",
),
]
),
],
style={"border": "1px solid #dee2e6", "padding": "5px"}
),
html.Br(),
html.Div([
html.Label("Target Feature Store"),
html.Br(),
dcc.Input(id="upload_target_fs", style={"width": "60%"}),
html.Br(),
html.Br(),
html.Label("Feature Store"),
html.Br(),
dbc.InputGroup(
[
dcc.Input(
id="upload_fs",
disabled=True,
style={"width": "60%"}
),
dbc.InputGroupAddon(
dcc.Upload(dbc.Button(html.I(className="fas fa-upload"), outline=True, color="primary"), id="upload_fs_picker",),
addon_type="append",
),
]
),
html.Br(),
html.Label("Target Additional File"),
html.Br(),
dcc.Input(id="upload_target_ad", style={"width": "60%"}),
html.Br(),
html.Br(),
html.Label("Additional File"),
html.Br(),
dbc.InputGroup(
[
dcc.Input(
id="upload_ad",
disabled=True,
style={"width": "60%"}
),
dbc.InputGroupAddon(
dcc.Upload(dbc.Button(html.I(className="fas fa-upload"), outline=True, color="primary"), id="upload_ad_picker", style={"display": "inline-block"}),
addon_type="append",
),
]
),
],
style={"border": "1px solid #dee2e6", "padding": "5px"}
),
html.Br(),
dbc.Button("Upload",
outline=True,
color="primary",
className="mr-1",
id="files_button"
)
],
style={"height": "650px"}
),
label="Files",
tab_id="setup_files"
),
dbc.Tab(
html.Div([continuous_spend_personality, continuous_wellness, continuous_empty],
id="continuous_div"
),
label="Continuous",
tab_id="upload_files"
)
]
)
)
)
],
# style={"border": "5px solid grey", "width": "100%"}
),
md=4
)
],
),
html.Br(),
html.Div([
html.Div(
[
dbc.Card(
dbc.CardBody([
html.Label(html.B("Scoring Output"), style={"margin-bottom": "0rem"}),
],
style={"padding": "0.75rem"}
),
),
html.Br(),
dbc.Card(
dbc.CardBody([
dbc.Tabs([
dbc.Tab(
html.Div(
id="scoring_div",
style={"overflow-y": "scroll", "max-height": "580px"}
),
label="Scoring",
tab_id="scoring_table"
),
dbc.Tab(
# html.Div([],
dbc.Textarea(
id = "scoring_text_area",
# className="tree",
style= {"width": "100%", "height": "495px"}
),
label="Scoring Result",
tab_id="scoring"
),
dbc.Tab(
html.Div([
dcc.Dropdown(
id="graph_dropdown",
options=[],
clearable=False
),
dcc.Graph(
id="graphing",
figure={}
)
],
id="graphing_div",
style= {"width": "100%", "height": "580px"}
),
label="Graph",
tab_id="graph"
),
dbc.Tab(
html.Div([
dbc.Card(
dbc.CardHeader(
dbc.Button("Advanced Options", outline=True, color="link", id="graphing_adv_collapse_button", style={"height": "100%", "width": "100%"}),
)
),
dbc.Collapse(
html.Div([
html.Br(),
html.Label("Add Field to Graph"),
dcc.Dropdown(
id="graph_adv_dropdown",
options=[],
multi=True
),
],
),
id="graphing_adv_collapse"
),
html.Div([],
id="graphing_adv_div",
style= {"width": "100%"}
)
],
),
label="Advanced Graph",
tab_id="graph_adv"
),
],
id="tabs_scoring",
active_tab="scoring_table",
)
]
)
)
]
# style={"width": "100%"}
)
],
# style={"border": "5px solid grey", "width": "100%"}
)
],
style={"padding-left": "30px", "padding-right": "30px", "padding-top": "30px", "padding-bottom": "30px"}
)
],
id="explore_component",
style={"display": "none"}
)
def register_callbacks(app):
@app.callback(
dash.dependencies.Output("table_div", "children"),
[
dash.dependencies.Input("customer_list", "value"),
dash.dependencies.Input("usecase_dropdown", "value")
],
prevent_initial_call=True)
def callback_customer_list(customer_list, usecase):
ctx = dash.callback_context
trigger_id = ctx.triggered[0]["prop_id"].split(".")[0]
if trigger_id == "customer_list":
if len(customer_list) < 1:
return [[]]
customer = customer_list[-1]
data = gv.sd.dropdown_customer_eventhandler(customer, usecase)
if len(data) > 1:
df = pd.DataFrame(data)
return dbc.Table.from_dataframe(df, striped=True, bordered=True, hover=True)
else:
group_items = []
for entry in data:
for key in entry:
# group_items.append(dbc.ListGroupItem("{}: {}".format(key, entry[key])))
group_items.append(dbc.ListGroupItem(
[
dbc.ListGroupItemHeading(key),
dbc.ListGroupItemText(entry[key]),
]
))
return dbc.ListGroup(group_items)
if trigger_id == "usecase_dropdown":
field_data = gv.sd.read_field_from_properties(usecase, "predictor.param.lookup")
return [[]]
@app.callback(
dash.dependencies.Output("find_filter_input", "value"),
[
dash.dependencies.Input("usecase_dropdown", "value")
],
prevent_initial_call=True)
def clear_find_filter(usecase):
return "{}"
@app.callback(
[
dash.dependencies.Output("customer_list", "options"),
dash.dependencies.Output("filter_toast_div", "children"),
],
[
dash.dependencies.Input("filter_button", "n_clicks"),
dash.dependencies.Input("usecase_dropdown", "value"),
],
State(component_id="find_filter_input", component_property="value"),
prevent_initial_call=True)
def callback_find_filter_button(n_clicks, usecase, find_filter):
ctx = dash.callback_context
trigger_id = ctx.triggered[0]["prop_id"].split(".")[0]
if trigger_id == "filter_button":
if usecase == "" or usecase == None:
return [], functions.generate_toast("Error: Could not filter, usecase is not selected.", "Error", "danger")
try:
opts = gv.sd.find_btn_eventhandler(usecase, find_filter)
return opts, []
except Exception as e:
print(e)
return [], functions.generate_toast("Error: Could not filter: {}".format(e), "Error", "danger")
if trigger_id == "usecase_dropdown":
return [], []
@app.callback(
[
dash.dependencies.Output("score_buffer", "children"),
dash.dependencies.Output("score_toast_div", "children")
],
[dash.dependencies.Input("score_button", "n_clicks")],
State(component_id="usecase_dropdown", component_property="value"),
State(component_id="score_value_input", component_property="value"),
prevent_initial_call=True)
def callback_score_button(n_clicks, usecase, score_value):
if score_value == "":
return None, functions.generate_toast("Error: Could not score: Score Value field is empty.", "Error", "danger")
try:
acts = gv.sd.get_active_states()
acts.exp_score_button_busy = True
acts.exp_score_button_busy_changed = True
outputs = gv.sd.score_btn_eventhandler(usecase, score_value)
global graphing_adv_refresh
graphing_adv_refresh = True
acts.exp_score_button_busy = False
acts.exp_score_button_busy_changed = True
return outputs, []
except Exception as e:
print(e)
return None, functions.generate_toast("Error: Could not score: {}".format(e), "Error", "danger")
@app.callback(
dash.dependencies.Output("score_value_input", "value"),
[
dash.dependencies.Input("batch_score_picker", "contents"),
dash.dependencies.Input("customer_list", "value"),
dash.dependencies.Input("usecase_dropdown", "value")
],
State(component_id="score_value_input", component_property="value"),
prevent_initial_call=True)
def batch_uploader(contents, customer_list, usecase, input_contents):
ctx = dash.callback_context
trigger_id = ctx.triggered[0]["prop_id"].split(".")[0]
if trigger_id == "batch_score_picker":
content_type, content_string = contents.split(",")
decoded = base64.b64decode(content_string)
text = decoded.decode("utf-8")
custs = text.split("\n")
return ",".join(custs)
if trigger_id == "customer_list":
return ",".join(customer_list)
if trigger_id == "usecase_dropdown":
return ""
@app.callback(
dash.dependencies.Output("customer_list", "value"),
[
dash.dependencies.Input("usecase_dropdown", "value")
],
prevent_initial_call=True)
def refresh_customer_list(usecase):
return []
@app.callback(
dash.dependencies.Output("properties_textarea", "value"),
[
dash.dependencies.Input("upload_properties_picker", "contents"),
dash.dependencies.Input("usecase_dropdown", "value")
],
prevent_initial_call=True)
def upload_properties(contents, usecase):
ctx = dash.callback_context
trigger_id = ctx.triggered[0]["prop_id"].split(".")[0]
if trigger_id == "upload_properties_picker":
return ecosystem_scoring_pdash.decode_text(contents)
if trigger_id == "usecase_dropdown":
return gv.sd.get_properties(usecase)
@app.callback(
dash.dependencies.Output("wellness_continuous_toast_div", "children"),
[dash.dependencies.Input("wellness_process_uploads_button", "n_clicks")],
State(component_id="usecase_dropdown", component_property="value"),
State(component_id="wellness_customer_upload_picker", component_property="filename"),
State(component_id="wellness_customer_upload_picker", component_property="contents"),
prevent_initial_call=True)
def callback_process_uploads(clicks, usecase, c_filename, c_content):
try:
gv.sd.wellness_process_uploads(usecase, gv.tmp_dir, c_filename, c_content)
return functions.generate_toast("Successfully processed new uploads.", "Success", "success")
except Exception as e:
print(e)
return functions.generate_toast("Error: Could not process new uploads.", "Error", "danger")
@app.callback(
dash.dependencies.Output("spend_personality_continuous_toast_div", "children"),
[dash.dependencies.Input("spend_personality_process_uploads_button", "n_clicks")],
State(component_id="usecase_dropdown", component_property="value"),
State(component_id="spend_personality_customer_upload_picker", component_property="filename"),
State(component_id="spend_personality_customer_upload_picker", component_property="contents"),
State(component_id="spend_personality_transaction_upload_picker", component_property="filename"),
State(component_id="spend_personality_transaction_upload_picker", component_property="contents"),
State(component_id="spend_personality_cto_upload_picker", component_property="filename"),
State(component_id="spend_personality_cto_upload_picker", component_property="contents"),
prevent_initial_call=True)
def callback_process_uploads(clicks, usecase, c_filename, c_content, t_filename, t_content, cto_filename, cto_content):
try:
gv.sd.spend_personality_process_uploads(usecase, gv.tmp_dir + "to_upload.csv", gv.tmp_dir, c_filename, c_content, gv.tmp_dir, t_filename, t_content, gv.tmp_dir, cto_filename, cto_content)
return functions.generate_toast("Successfully processed new uploads.", "Success", "success")
except Exception as e:
print(e)
return functions.generate_toast("Error: Could not process new uploads.", "Error", "danger")
@app.callback(
dash.dependencies.Output("connection_test_toast_div", "children"),
[dash.dependencies.Input("test_conn_button", "n_clicks")],
State(component_id="usecase_dropdown", component_property="value"),
prevent_initial_call=True)
def test_connection(n_clicks, usecase_name):
if usecase_name == "" or usecase_name == None:
return functions.generate_toast("Error: No usecase selected.", "Error", "danger")
if gv.sd.test_connection(usecase_name):
return functions.generate_toast("Connection Successful.", "Success", "success")
return functions.generate_toast("Error: Could not connected to '{}' runtime server.".format(usecase_name), "Error", "danger")
@app.callback(
dash.dependencies.Output("usecase_name", "value"),
[dash.dependencies.Input("usecase_dropdown", "value")],
prevent_initial_call=True)
def display_usecase_name(usecase):
return usecase
@app.callback(
dash.dependencies.Output("usecase_runtime_url", "value"),
[dash.dependencies.Input("usecase_dropdown", "value")],
prevent_initial_call=True)
def display_runtime_url(usecase):
return gv.sd.get_runtime_url(usecase)
@app.callback(
dash.dependencies.Output("usecase_api_endpoint", "value"),
[dash.dependencies.Input("usecase_dropdown", "value")],
prevent_initial_call=True)
def display_api_endpoint(usecase):
return gv.sd.get_api_endpoint(usecase)
@app.callback(
dash.dependencies.Output("files_toast_div", "children"),
[dash.dependencies.Input("files_button", "n_clicks")],
State(component_id="usecase_dropdown", component_property="value"),
State(component_id="upload_database", component_property="value"),
State(component_id="upload_target_fs", component_property="value"),
State(component_id="upload_target_ad", component_property="value"),
State(component_id="upload_model_picker", component_property="filename"),
State(component_id="upload_model_picker", component_property="contents"),
State(component_id="upload_fs_picker", component_property="filename"),
State(component_id="upload_fs_picker", component_property="contents"),
State(component_id="upload_ad_picker", component_property="filename"),
State(component_id="upload_ad_picker", component_property="contents"),
prevent_initial_call=True)
def upload_files(n_clicks, usecase, database, target_fs, target_ad, model_name, model_content, fs_name, fs_content, ad_name, ad_content):
if usecase == "" or usecase == None:
return functions.generate_toast("Error: Could not upload files: Use case is not selected.", "Error", "danger")
if database == None or database == "":
return functions.generate_toast("Error: Could not upload files: Database field is empty.", "Error", "danger")
if (target_fs == None or target_fs == "") and (fs_name != None and fs_name != ""):
return functions.generate_toast("Error: Could not upload files: Feature Store field is empty.", "Error", "danger")
if (target_fs != None and target_fs != "") and (fs_name == None or fs_name == ""):
return functions.generate_toast("Error: Could not upload files: Target Feature Store field is empty.", "Error", "danger")
if (target_ad == None or target_ad == "") and (ad_name != None and ad_name != ""):
return functions.generate_toast("Error: Could not upload files: Additional File field is empty.", "Error", "danger")
if (target_ad != None and target_ad != "") and (ad_name == None or ad_name == ""):
return functions.generate_toast("Error: Could not upload files: Target Additional File field is empty.", "Error", "danger")
model_path = gv.tmp_dir + str(model_name)
fs_path = gv.tmp_dir + str(fs_name)
ad_path = gv.tmp_dir + str(ad_name)
try:
gv.sd.upload_use_case_files(usecase, database, model_path, model_content, fs_path, fs_content, target_fs, ad_path=ad_path, ad_content=ad_content, additional=target_ad)
return functions.generate_toast("Successfully uploaded files.", "Success", "success")
except Exception as e:
print(e)
return functions.generate_toast("Error: Could not upload files. {}".format(e), "Error", "danger")
@app.callback(
dash.dependencies.Output("continuous_wellness_div", "style"),
[
dash.dependencies.Input("usecase_dropdown", "value")
],
prevent_initial_call=True
)
def toggle_continuous(dropdown_value):
predictor = gv.sd.get_predictor_type(dropdown_value)
if predictor == "wellness_score":
return {"height": "650px"}
return {"display": "none"}
@app.callback(
dash.dependencies.Output("continuous_spend_personality_div", "style"),
[
dash.dependencies.Input("usecase_dropdown", "value")
],
prevent_initial_call=True
)
def toggle_continuous(dropdown_value):
predictor = gv.sd.get_predictor_type(dropdown_value)
if predictor == "spending_personality":
return {"height": "650px"}
return {"display": "none"}
@app.callback(
dash.dependencies.Output("continuous_empty_div", "style"),
[
dash.dependencies.Input("usecase_dropdown", "value")
],
prevent_initial_call=True
)
def toggle_continuous(dropdown_value):
predictor = gv.sd.get_predictor_type(dropdown_value)
if predictor == "wellness_score":
return {"display": "none"}
if predictor == "spending_personality":
return {"display": "none"}
return {"height": "650px"}
@app.callback(
[
dash.dependencies.Output("score_button", "children"),
dash.dependencies.Output("score_button", "disabled")
],
[dash.dependencies.Input("interval", "n_intervals")],
prevent_initial_call=True)
def callback_score_button_busy(intervals):
try:
acts = gv.sd.get_active_states()
if acts.exp_score_button_busy_changed:
acts.exp_score_button_busy_changed = False
if acts.score_button_busy:
return [dbc.Spinner(size="sm"), " Scoring..."], True
else:
return "Score", False
else:
return dash.no_update, dash.no_update
except:
return dash.no_update, dash.no_update
# app.clientside_callback(
# dash.dependencies.ClientsideFunction(
# namespace="clientside",
# function_name="json_viewer"
# ),
# dash.dependencies.Output("scoring_text_area", "style"),
# [dash.dependencies.Input("score_buffer", "children")],
# prevent_initial_call=True)
@app.callback(
dash.dependencies.Output("scoring_text_area", "value"),
[
dash.dependencies.Input("score_buffer", "children")
],
prevent_initial_call=True)
def callback_score_buffer(score_b):
try:
j = json.loads(score_b)
pj = json.dumps(j, indent=4, sort_keys=True)
return pj
except Exception as e:
print(e)
return None
@app.callback(
dash.dependencies.Output("graph_dropdown", "options"),
[dash.dependencies.Input("score_buffer", "children")],
prevent_initial_call=True)
def tabs_content_graphing2(children):
try:
jstr = json.loads(children)
value = jstr[0]
flat = functions.json_flatten(value, "")
l = list(flat.keys())
return functions.convert_list(l)
except Exception as e:
print(e)
return None
@app.callback(
dash.dependencies.Output("graphing", "figure"),
[dash.dependencies.Input("graph_dropdown", "value")],
State(component_id="score_buffer", component_property="children"),
prevent_initial_call=True)
def tabs_content_graphing3(graph_dropdown_value, children):
jstr = json.loads(children)
data_points = []
for value in jstr:
flat = functions.json_flatten(value, "")
data_points.append(flat)
xs = []
ys = []
y_string = graph_dropdown_value
for value in data_points:
xs.append("Customer: " + str(value["customer"]))
ys.append(value[y_string])
figure=dict(
data=[
dict(
x=xs,
y=ys,
name=y_string,
marker=dict(
color="rgb(26, 118, 255)"
)
)
],
layout=dict(
title="{} for Customers".format(y_string),
xaxes={"type": "category"}
)
)
return figure
@app.callback(
[
dash.dependencies.Output("graphing_adv_div", "children"),
dash.dependencies.Output("graph_adv_dropdown", "options"),
],
[
dash.dependencies.Input("interval", "n_intervals"),
dash.dependencies.Input("graph_adv_dropdown", "value"),
],
State(component_id="score_buffer", component_property="children"),
State(component_id="graphing_adv_div", component_property="children"),
prevent_initial_call=True)
def tabs_content_graphing4(interval, dropdown_values, scoring_results, children):
global graphing_adv_refresh
if graphing_adv_refresh:
graphing_adv_refresh = False
return "Loading", []
ctx = dash.callback_context
trigger_id = ctx.triggered[0]["prop_id"].split(".")[0]
if dropdown_values == None:
dropdown_values = []
if trigger_id == "graph_adv_dropdown":
graphing_adv_refresh = True
return dash.no_update, dash.no_update
if trigger_id == "interval":
try:
if children == "Loading":
jstr = json.loads(scoring_results)
data_points = []
for value in jstr:
flat = functions.json_flatten(value, "")
data_points.append(flat)
df = pd.DataFrame(data_points)
columns = list(df.columns)
l = [columns]
l.extend(df.values.tolist())
return dash_pivottable.PivotTable(
id="graphing_adv_table",
data=l,
cols=["customer"],
colOrder="key_a_to_z",
rows=[],
rowOrder="key_a_to_z",
rendererName="Line Chart",
aggregatorName="List Unique Values",
vals=[],
unusedOrientationCutoff="Infinity",
hiddenAttributes=dropdown_values
), functions.convert_list(columns)
return dash.no_update, dash.no_update
except Exception as e:
print(e)
return dash.no_update, dash.no_update
@app.callback(
dash.dependencies.Output("scoring_div", "children"),
[dash.dependencies.Input("score_buffer", "children")],
prevent_initial_call=True)
def tabs_content_scoring_tab(children):
try:
jstr = json.loads(children)
data_points = []
for value in jstr:
flat = functions.json_flatten(value, "")
data_points.append(flat)
df = pd.DataFrame(data_points)
return dbc.Table.from_dataframe(df, striped=True, bordered=True, hover=True)
except Exception as e:
print(e)
return None
@app.callback(
dash.dependencies.Output("usecase_toast_div", "children"),
[dash.dependencies.Input("properties_button", "n_clicks")],
State(component_id="usecase_name", component_property="value"),
State(component_id="usecase_runtime_url", component_property="value"),
State(component_id="usecase_api_endpoint", component_property="value"),
State(component_id="properties_textarea", component_property="value"),
prevent_initial_call=True)
def process_properties(n_clicks, usecase_name, runtime_url, api_endpoint, properties):
# try:
gv.sd.preprocess_properties(usecase_name, runtime_url, api_endpoint, properties)
return functions.generate_toast("Successfully uploaded usecase: {}.".format(usecase_name), "Success", "success")
# except Exception as e:
# print(e)
# return functions.generate_toast("Error: Could not upload usecase: {}.".format(e), "Error", "danger")
@app.callback(
dash.dependencies.Output("upload_model", "value"),
[dash.dependencies.Input("upload_model_picker", "contents")],
State(component_id="upload_model_picker", component_property="filename"),
prevent_initial_call=True)
def upload_prep_model(contents, filename):
return filename
@app.callback(
dash.dependencies.Output("upload_fs", "value"),
[dash.dependencies.Input("upload_fs_picker", "contents")],
State(component_id="upload_fs_picker", component_property="filename"),
prevent_initial_call=True)
def upload_prep_fs(contents, filename):
return filename
@app.callback(
dash.dependencies.Output("upload_ad", "value"),
[dash.dependencies.Input("upload_ad_picker", "contents")],
State(component_id="upload_ad_picker", component_property="filename"),
prevent_initial_call=True)
def upload_prep_af(contents, filename):
return filename
@app.callback(
dash.dependencies.Output("wellness_upload_customer_data", "value"),
[dash.dependencies.Input("wellness_customer_upload_picker", "contents")],
State(component_id="wellness_customer_upload_picker", component_property="filename"),
prevent_initial_call=True)
def upload_prep_customers(contents, filename):
return filename
@app.callback(
dash.dependencies.Output("spend_personality_upload_customer_data", "value"),
[dash.dependencies.Input("spend_personality_customer_upload_picker", "contents")],
State(component_id="spend_personality_customer_upload_picker", component_property="filename"),
prevent_initial_call=True)
def upload_prep_customers(contents, filename):
return filename
@app.callback(
dash.dependencies.Output("spend_personality_upload_transaction_data", "value"),
[dash.dependencies.Input("spend_personality_transaction_upload_picker", "contents")],
State(component_id="spend_personality_transaction_upload_picker", component_property="filename"),
prevent_initial_call=True)
def upload_prep_transactions(contents, filename):
return filename
@app.callback(
dash.dependencies.Output("spend_personality_upload_cto_data", "value"),
[dash.dependencies.Input("spend_personality_cto_upload_picker", "contents")],
State(component_id="spend_personality_cto_upload_picker", component_property="filename"),
prevent_initial_call=True)
def upload_prep_cto(contents, filename):
return filename | StarcoderdataPython |
167237 | <gh_stars>10-100
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from hrl4in.utils.distributions import DiagGaussianNet, CategoricalNet
from hrl4in.utils.networks import Net
class MetaPolicy(nn.Module):
def __init__(self,
observation_space,
subgoal_space,
use_action_masks=False,
action_masks_dim=3,
hidden_size=512,
cnn_layers_params=None,
initial_stddev=1.0 / 3.0,
min_stddev=0.0,
stddev_transform=torch.nn.functional.softplus):
super().__init__()
self.net = Net(
observation_space=observation_space,
hidden_size=hidden_size,
cnn_layers_params=cnn_layers_params,
)
assert len(subgoal_space.shape) == 1, 'only supports one dimensional subgoal space'
self.subgoal_distribution = DiagGaussianNet(self.net.output_size,
subgoal_space.shape[0],
subgoal_space,
squash_mean=True,
squash_distribution=False,
initial_stddev=initial_stddev,
min_stddev=min_stddev,
stddev_transform=stddev_transform)
self.use_action_masks = use_action_masks
# base + arm or base-only
if self.use_action_masks:
self.action_mask_distribution = CategoricalNet(self.net.output_size, action_masks_dim)
def forward(self, *x):
raise NotImplementedError
def act(self, observations, rnn_hidden_states, masks, deterministic=False):
value, actor_features, rnn_hidden_states = self.net(observations, rnn_hidden_states, masks)
subgoal_distribution = self.subgoal_distribution(actor_features)
if deterministic:
subgoals = subgoal_distribution.mode()
else:
subgoals = subgoal_distribution.sample()
subgoal_log_probs = subgoal_distribution.log_probs(subgoals)
# print("mean", subgoal_distribution.loc)
# print("std", subgoal_distribution.scale)
if self.use_action_masks:
action_mask_distribution = self.action_mask_distribution(actor_features)
if deterministic:
action_mask_indices = action_mask_distribution.mode()
else:
action_mask_indices = action_mask_distribution.sample()
action_mask_log_probs = action_mask_distribution.log_probs(action_mask_indices)
else:
action_mask_indices = torch.zeros_like(subgoal_log_probs, dtype=torch.long)
action_mask_log_probs = torch.zeros_like(subgoal_log_probs)
return (
value,
subgoals,
subgoal_log_probs,
action_mask_indices,
action_mask_log_probs,
rnn_hidden_states,
)
def get_value(self, observations, rnn_hidden_states, masks):
value, _, _ = self.net(observations, rnn_hidden_states, masks)
return value
def evaluate_actions(self,
observations,
rnn_hidden_states,
masks,
subgoals,
action_mask_indices
):
value, actor_features, rnn_hidden_states = self.net(observations, rnn_hidden_states, masks)
subgoal_distribution = self.subgoal_distribution(actor_features)
subgoal_log_probs = subgoal_distribution.log_probs(subgoals)
subgoal_dist_entropy = subgoal_distribution.entropy()
if self.use_action_masks:
action_mask_distribution = self.action_mask_distribution(actor_features)
action_mask_log_probs = action_mask_distribution.log_probs(action_mask_indices)
action_mask_dist_entropy = action_mask_distribution.entropy()
else:
action_mask_log_probs = torch.zeros_like(subgoal_log_probs)
action_mask_dist_entropy = torch.zeros_like(subgoal_dist_entropy)
action_log_probs = subgoal_log_probs + action_mask_log_probs
dist_entropy = subgoal_dist_entropy + action_mask_dist_entropy
return (
value,
action_log_probs,
dist_entropy,
rnn_hidden_states
)
| StarcoderdataPython |
3225866 | <gh_stars>0
from django import template
from git import Repo
from os import environ, getcwd
register = template.Library()
@register.simple_tag
def dfirtrack_version():
versionnumber = 'v0.4.6'
return versionnumber
"""
following conditions are necessary for Pull Requests
GitHub actions do some kind of `git checkout` on PRs, which causes
'TypeError: HEAD is a detached symbolic reference as it points to...'
multiple issues exists on this in https://github.com/gitpython-developers/GitPython
actually solved, but does still not work here
"""
# check for GitHub action
if not "CI" in environ:
@register.simple_tag
def dfirtrack_branch():
# not in GitHub action --> get and return current branch
working_dir = getcwd()
repo = Repo(working_dir)
branch = repo.active_branch
return branch
else:
@register.simple_tag
def dfirtrack_branch():
# in GitHub action --> return dummy value to avoid errors
return "unknown"
# check for GitHub action
@register.simple_tag
def github_ci():
if not "CI" in environ:
# not in GitHub action --> show branch in maintemplate
ci = False
else:
# in GitHub action --> do not show branch in maintemplate
ci = True
return ci
| StarcoderdataPython |
4816954 | <reponame>mitama-org/mitama-py
import unittest
from mitama._extra import _classproperty
class TestClassProperty(unittest.TestCase):
def test_getter(self):
class ClassA:
@_classproperty
def value(cls):
return "hello, world!"
self.assertEqual(ClassA.value, "hello, world!")
| StarcoderdataPython |
3223927 | #!/usr/bin/env python
# example gtkvscrollbar.py
import pygtk
pygtk.require('2.0')
import gtk
class VScrollbar:
def delete_event(self, widget, event, data=None):
gtk.main_quit()
return False
def value_changed(self, range, data=None):
self.label.set_text("Value: " + str(self.vscrollbar.get_adjustment().value))
def __init__(self):
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_title("VScrollbar")
self.window.connect("delete_event", self.delete_event)
self.window.set_border_width(12)
self.window.resize(200, 400)
hbox = gtk.HBox()
hbox.set_spacing(6)
self.window.add(hbox)
self.label = gtk.Label("Value: 0.0")
hbox.pack_start(self.label, True, True, 0)
self.vscrollbar = gtk.VScrollbar(gtk.Adjustment(0, 0, 119, 10, 20, 50))
self.vscrollbar.connect("value_changed", self.value_changed)
hbox.pack_start(self.vscrollbar, False, False, 0)
hbox.show_all()
self.window.show()
def main():
gtk.main()
return 0
if __name__ == "__main__":
VScrollbar()
main()
| StarcoderdataPython |
1726258 | <reponame>J08nY/sec-certs<gh_stars>1-10
import copy
import itertools
import json
import locale
import shutil
import tempfile
from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
from typing import Callable, ClassVar, Dict, Iterator, List, Mapping, Optional, Set, Tuple, Union
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup, Tag
from sec_certs import helpers as helpers
from sec_certs import parallel_processing as cert_processing
from sec_certs.config.configuration import config
from sec_certs.dataset.dataset import Dataset, logger
from sec_certs.dataset.protection_profile import ProtectionProfileDataset
from sec_certs.model.dependency_finder import DependencyFinder
from sec_certs.sample.cc_maintenance_update import CommonCriteriaMaintenanceUpdate
from sec_certs.sample.certificate import Certificate
from sec_certs.sample.common_criteria import CommonCriteriaCert
from sec_certs.sample.protection_profile import ProtectionProfile
from sec_certs.serialization.json import ComplexSerializableType, CustomJSONDecoder, serialize
class CCDataset(Dataset, ComplexSerializableType):
@dataclass
class DatasetInternalState(ComplexSerializableType):
meta_sources_parsed: bool = False
pdfs_downloaded: bool = False
pdfs_converted: bool = False
certs_analyzed: bool = False
def __bool__(self):
return any(vars(self))
certs: Dict[str, "CommonCriteriaCert"]
# TODO: Figure out how to type this. The problem is that this breaks covariance of the types, which mypy doesn't allow.
def __init__(
self,
certs: Mapping[str, "Certificate"],
root_dir: Path,
name: str = "dataset name",
description: str = "dataset_description",
state: Optional[DatasetInternalState] = None,
):
super().__init__(certs, root_dir, name, description)
if state is None:
state = self.DatasetInternalState()
self.state = state
def __iter__(self) -> Iterator[CommonCriteriaCert]:
yield from self.certs.values()
def to_dict(self):
return {**{"state": self.state}, **super().to_dict()}
def to_pandas(self):
df = pd.DataFrame([x.pandas_tuple for x in self.certs.values()], columns=CommonCriteriaCert.pandas_columns)
df = df.set_index("dgst")
df.not_valid_before = pd.to_datetime(df.not_valid_before, infer_datetime_format=True)
df.not_valid_after = pd.to_datetime(df.not_valid_after, infer_datetime_format=True)
df = df.astype({"category": "category", "status": "category", "scheme": "category"})
df = df.fillna(value=np.nan)
return df
@classmethod
def from_dict(cls, dct: Dict):
dset = super().from_dict(dct)
dset.state = copy.deepcopy(dct["state"])
return dset
@Dataset.root_dir.setter # type: ignore
def root_dir(self, new_dir: Union[str, Path]):
old_dset = copy.deepcopy(self)
Dataset.root_dir.fset(self, new_dir) # type: ignore
self.set_local_paths()
if self.state and old_dset.root_dir != Path(".."):
logger.info(f"Changing root dir of partially processed dataset. All contents will get copied to {new_dir}")
self.copy_dataset_contents(old_dset)
self.to_json()
def copy_dataset_contents(self, old_dset: "CCDataset"):
if old_dset.state.meta_sources_parsed:
try:
shutil.copytree(old_dset.web_dir, self.web_dir)
except FileNotFoundError as e:
logger.warning(f"Attempted to copy non-existing file: {e}")
if old_dset.state.pdfs_downloaded:
try:
shutil.copytree(old_dset.certs_dir, self.certs_dir)
except FileNotFoundError as e:
logger.warning(f"Attempted to copy non-existing file: {e}")
if old_dset.state.certs_analyzed:
try:
shutil.copytree(old_dset.auxillary_datasets_dir, self.auxillary_datasets_dir)
except FileNotFoundError as e:
logger.warning(f"Attempted to copy non-existing file: {e}")
@property
def certs_dir(self) -> Path:
return self.root_dir / "certs"
@property
def reports_dir(self) -> Path:
return self.certs_dir / "reports"
@property
def reports_pdf_dir(self) -> Path:
return self.reports_dir / "pdf"
@property
def reports_txt_dir(self) -> Path:
return self.reports_dir / "txt"
@property
def targets_dir(self) -> Path:
return self.certs_dir / "targets"
@property
def targets_pdf_dir(self) -> Path:
return self.targets_dir / "pdf"
@property
def targets_txt_dir(self) -> Path:
return self.targets_dir / "txt"
@property
def pp_dataset_path(self) -> Path:
return self.auxillary_datasets_dir / "pp_dataset.json"
BASE_URL: ClassVar[str] = "https://www.commoncriteriaportal.org"
HTML_PRODUCTS_URL = {
"cc_products_active.html": BASE_URL + "/products/",
"cc_products_archived.html": BASE_URL + "/products/index.cfm?archived=1",
}
HTML_LABS_URL = {"cc_labs.html": BASE_URL + "/labs"}
CSV_PRODUCTS_URL = {
"cc_products_active.csv": BASE_URL + "/products/certified_products.csv",
"cc_products_archived.csv": BASE_URL + "/products/certified_products-archived.csv",
}
PP_URL = {
"cc_pp_active.html": BASE_URL + "/pps/",
"cc_pp_collaborative.html": BASE_URL + "/pps/collaborativePP.cfm?cpp=1",
"cc_pp_archived.html": BASE_URL + "/pps/index.cfm?archived=1",
}
PP_CSV = {"cc_pp_active.csv": BASE_URL + "/pps/pps.csv", "cc_pp_archived.csv": BASE_URL + "/pps/pps-archived.csv"}
@property
def active_html_tuples(self) -> List[Tuple[str, Path]]:
return [(x, self.web_dir / y) for y, x in self.HTML_PRODUCTS_URL.items() if "active" in y]
@property
def archived_html_tuples(self) -> List[Tuple[str, Path]]:
return [(x, self.web_dir / y) for y, x in self.HTML_PRODUCTS_URL.items() if "archived" in y]
@property
def active_csv_tuples(self) -> List[Tuple[str, Path]]:
return [(x, self.web_dir / y) for y, x in self.CSV_PRODUCTS_URL.items() if "active" in y]
@property
def archived_csv_tuples(self) -> List[Tuple[str, Path]]:
return [(x, self.web_dir / y) for y, x in self.CSV_PRODUCTS_URL.items() if "archived" in y]
@classmethod
def from_web_latest(cls):
with tempfile.TemporaryDirectory() as tmp_dir:
dset_path = Path(tmp_dir) / "cc_latest_dataset.json"
helpers.download_file(config.cc_latest_snapshot, dset_path)
return cls.from_json(dset_path)
def set_local_paths(self):
for cert in self:
cert.set_local_paths(self.reports_pdf_dir, self.targets_pdf_dir, self.reports_txt_dir, self.targets_txt_dir)
def _merge_certs(self, certs: Dict[str, "CommonCriteriaCert"], cert_source: Optional[str] = None):
"""
Merges dictionary of certificates into the dataset. Assuming they all are CommonCriteria certificates
"""
new_certs = {x.dgst: x for x in certs.values() if x not in self}
certs_to_merge = [x for x in certs.values() if x in self]
self.certs.update(new_certs)
for crt in certs_to_merge:
self[crt.dgst].merge(crt, cert_source)
logger.info(f"Added {len(new_certs)} new and merged further {len(certs_to_merge)} certificates to the dataset.")
def download_csv_html_resources(self, get_active: bool = True, get_archived: bool = True):
self.web_dir.mkdir(parents=True, exist_ok=True)
html_items = []
csv_items = []
if get_active is True:
html_items.extend(self.active_html_tuples)
csv_items.extend(self.active_csv_tuples)
if get_archived is True:
html_items.extend(self.archived_html_tuples)
csv_items.extend(self.archived_csv_tuples)
html_urls, html_paths = [x[0] for x in html_items], [x[1] for x in html_items]
csv_urls, csv_paths = [x[0] for x in csv_items], [x[1] for x in csv_items]
logger.info("Downloading required csv and html files.")
self._download_parallel(html_urls, html_paths)
self._download_parallel(csv_urls, csv_paths)
@serialize
def process_protection_profiles(self, to_download: bool = True, keep_metadata: bool = True):
logger.info("Processing protection profiles.")
constructor: Dict[bool, Callable[..., ProtectionProfileDataset]] = {
True: ProtectionProfileDataset.from_web,
False: ProtectionProfileDataset.from_json,
}
if to_download is True and not self.auxillary_datasets_dir.exists():
self.auxillary_datasets_dir.mkdir()
pp_dataset = constructor[to_download](self.pp_dataset_path)
for cert in self:
if cert.protection_profiles is None:
raise RuntimeError("Building of the dataset probably failed - this should not be happening.")
cert.protection_profiles = {pp_dataset.pps.get((x.pp_name, x.pp_link), x) for x in cert.protection_profiles}
if not keep_metadata:
self.pp_dataset_path.unlink()
@serialize
def get_certs_from_web(
self, to_download: bool = True, keep_metadata: bool = True, get_active: bool = True, get_archived: bool = True
):
"""
Parses all metadata about certificates
"""
if to_download is True:
self.download_csv_html_resources(get_active, get_archived)
logger.info("Adding CSV certificates to CommonCriteria dataset.")
csv_certs = self._get_all_certs_from_csv(get_active, get_archived)
self._merge_certs(csv_certs, cert_source="csv")
# TODO: Someway along the way, 3 certificates get lost. Investigate and fix.
logger.info("Adding HTML certificates to CommonCriteria dataset.")
html_certs = self._get_all_certs_from_html(get_active, get_archived)
self._merge_certs(html_certs, cert_source="html")
logger.info(f"The resulting dataset has {len(self)} certificates.")
if not keep_metadata:
shutil.rmtree(self.web_dir)
self.set_local_paths()
self.state.meta_sources_parsed = True
def _get_all_certs_from_csv(self, get_active: bool, get_archived: bool) -> Dict[str, "CommonCriteriaCert"]:
"""
Creates dictionary of new certificates from csv sources.
"""
csv_sources = list(self.CSV_PRODUCTS_URL.keys())
csv_sources = [x for x in csv_sources if "active" not in x or get_active]
csv_sources = [x for x in csv_sources if "archived" not in x or get_archived]
new_certs = {}
for file in csv_sources:
partial_certs = self._parse_single_csv(self.web_dir / file)
logger.info(f"Parsed {len(partial_certs)} certificates from: {file}")
new_certs.update(partial_certs)
return new_certs
@staticmethod
def _parse_single_csv(file: Path) -> Dict[str, "CommonCriteriaCert"]:
"""
Using pandas, this parses a single CSV file.
"""
def map_ip_to_hostname(url: str) -> str:
if not url:
return url
tokens = url.split("/")
relative_path = "/" + "/".join(tokens[3:])
return CCDataset.BASE_URL + relative_path
def _get_primary_key_str(row: Tag):
prim_key = row["category"] + row["cert_name"] + row["report_link"]
return prim_key
if "active" in str(file):
cert_status = "active"
else:
cert_status = "archived"
csv_header = [
"category",
"cert_name",
"manufacturer",
"scheme",
"security_level",
"protection_profiles",
"not_valid_before",
"not_valid_after",
"report_link",
"st_link",
"maintenance_date",
"maintenance_title",
"maintenance_report_link",
"maintenance_st_link",
]
# TODO: Now skipping bad lines, smarter heuristics to be built for dumb files
df = pd.read_csv(file, engine="python", encoding="windows-1252", error_bad_lines=False)
df = df.rename(columns={x: y for (x, y) in zip(list(df.columns), csv_header)})
df["is_maintenance"] = ~df.maintenance_title.isnull()
df = df.fillna(value="")
df[["not_valid_before", "not_valid_after", "maintenance_date"]] = df[
["not_valid_before", "not_valid_after", "maintenance_date"]
].apply(pd.to_datetime)
df["dgst"] = df.apply(lambda row: helpers.get_first_16_bytes_sha256(_get_primary_key_str(row)), axis=1)
df_base = df.loc[~df.is_maintenance].copy()
df_main = df.loc[df.is_maintenance].copy()
df_base.report_link = df_base.report_link.map(map_ip_to_hostname)
df_base.st_link = df_base.st_link.map(map_ip_to_hostname)
df_main.maintenance_report_link = df_main.maintenance_report_link.map(map_ip_to_hostname)
df_main.maintenance_st_link = df_main.maintenance_st_link.map(map_ip_to_hostname)
n_all = len(df_base)
n_deduplicated = len(df_base.drop_duplicates(subset=["dgst"]))
if (n_dup := n_all - n_deduplicated) > 0:
logger.warning(f"The CSV {file} contains {n_dup} duplicates by the primary key.")
df_base = df_base.drop_duplicates(subset=["dgst"])
df_main = df_main.drop_duplicates()
profiles = {
x.dgst: set(
[ProtectionProfile(pp_name=y) for y in helpers.sanitize_protection_profiles(x.protection_profiles)]
)
for x in df_base.itertuples()
}
updates: Dict[str, Set] = {x.dgst: set() for x in df_base.itertuples()}
for x in df_main.itertuples():
updates[x.dgst].add(
CommonCriteriaCert.MaintenanceReport(
x.maintenance_date.date(), x.maintenance_title, x.maintenance_report_link, x.maintenance_st_link
)
)
certs = {
x.dgst: CommonCriteriaCert(
cert_status,
x.category,
x.cert_name,
x.manufacturer,
x.scheme,
x.security_level,
x.not_valid_before,
x.not_valid_after,
x.report_link,
x.st_link,
None,
None,
profiles.get(x.dgst, None),
updates.get(x.dgst, None),
None,
None,
None,
)
for x in df_base.itertuples()
}
return certs
def _get_all_certs_from_html(self, get_active: bool, get_archived: bool) -> Dict[str, "CommonCriteriaCert"]:
"""
Prepares dictionary of certificates from all html files.
"""
html_sources = list(self.HTML_PRODUCTS_URL.keys())
if get_active is False:
html_sources = [x for x in html_sources if "active" not in x]
if get_archived is False:
html_sources = [x for x in html_sources if "archived" not in x]
new_certs = {}
for file in html_sources:
partial_certs = self._parse_single_html(self.web_dir / file)
logger.info(f"Parsed {len(partial_certs)} certificates from: {file}")
new_certs.update(partial_certs)
return new_certs
@staticmethod
def _parse_single_html(file: Path) -> Dict[str, "CommonCriteriaCert"]:
"""
Prepares a dictionary of certificates from a single html file.
"""
def _get_timestamp_from_footer(footer):
locale.setlocale(locale.LC_ALL, "en_US")
footer_text = list(footer.stripped_strings)[0]
date_string = footer_text.split(",")[1:3]
time_string = footer_text.split(",")[3].split(" at ")[1]
formatted_datetime = date_string[0] + date_string[1] + " " + time_string
return datetime.strptime(formatted_datetime, " %B %d %Y %I:%M %p")
def _parse_table(
soup: BeautifulSoup, cert_status: str, table_id: str, category_string: str
) -> Dict[str, "CommonCriteriaCert"]:
tables = soup.find_all("table", id=table_id)
assert len(tables) <= 1
if not tables:
return {}
table = tables[0]
rows = list(table.find_all("tr"))
# header, footer = rows[0], rows[1]
body = rows[2:]
# TODO: It's possible to obtain timestamp of the moment when the list was generated. It's identical for each table and should thus only be obtained once. Not necessarily in each table
# timestamp = _get_timestamp_from_footer(footer)
# TODO: Do we have use for number of expected certs? We get rid of duplicites, so no use for assert expected == actual
# caption_str = str(table.findAll('caption'))
# n_expected_certs = int(caption_str.split(category_string + ' – ')[1].split(' Certified Products')[0])
table_certs = {
x.dgst: x for x in [CommonCriteriaCert.from_html_row(row, cert_status, category_string) for row in body]
}
return table_certs
if "active" in str(file):
cert_status = "active"
else:
cert_status = "archived"
cc_cat_abbreviations = ["AC", "BP", "DP", "DB", "DD", "IC", "KM", "MD", "MF", "NS", "OS", "OD", "DG", "TC"]
cc_table_ids = ["tbl" + x for x in cc_cat_abbreviations]
cc_categories = [
"Access Control Devices and Systems",
"Boundary Protection Devices and Systems",
"Data Protection",
"Databases",
"Detection Devices and Systems",
"ICs, Smart Cards and Smart Card-Related Devices and Systems",
"Key Management Systems",
"Mobility",
"Multi-Function Devices",
"Network and Network-Related Devices and Systems",
"Operating Systems",
"Other Devices and Systems",
"Products for Digital Signatures",
"Trusted Computing",
]
cat_dict = {x: y for (x, y) in zip(cc_table_ids, cc_categories)}
with file.open("r") as handle:
soup = BeautifulSoup(handle, "html5lib")
certs = {}
for key, val in cat_dict.items():
certs.update(_parse_table(soup, cert_status, key, val))
return certs
def _download_reports(self, fresh=True):
self.reports_pdf_dir.mkdir(parents=True, exist_ok=True)
certs_to_process = [x for x in self if x.state.report_is_ok_to_download(fresh) and x.report_link]
cert_processing.process_parallel(
CommonCriteriaCert.download_pdf_report,
certs_to_process,
config.n_threads,
progress_bar_desc="Downloading reports",
)
def _download_targets(self, fresh=True):
self.targets_pdf_dir.mkdir(parents=True, exist_ok=True)
certs_to_process = [x for x in self if x.state.report_is_ok_to_download(fresh)]
cert_processing.process_parallel(
CommonCriteriaCert.download_pdf_target,
certs_to_process,
config.n_threads,
progress_bar_desc="Downloading targets",
)
@serialize
def download_all_pdfs(self, fresh: bool = True):
if self.state.meta_sources_parsed is False:
logger.error("Attempting to download pdfs while not having csv/html meta-sources parsed. Returning.")
return
logger.info("Downloading CC sample reports")
self._download_reports(fresh)
logger.info("Downloading CC security targets")
self._download_targets(fresh)
if fresh is True:
logger.info("Attempting to re-download failed report links.")
self._download_reports(False)
logger.info("Attempting to re-download failed security target links.")
self._download_targets(False)
self.state.pdfs_downloaded = True
def _convert_reports_to_txt(self, fresh: bool = True):
self.reports_txt_dir.mkdir(parents=True, exist_ok=True)
certs_to_process = [x for x in self if x.state.report_is_ok_to_convert(fresh)]
cert_processing.process_parallel(
CommonCriteriaCert.convert_report_pdf,
certs_to_process,
config.n_threads,
progress_bar_desc="Converting reports to txt",
)
def _convert_targets_to_txt(self, fresh: bool = True):
self.targets_txt_dir.mkdir(parents=True, exist_ok=True)
certs_to_process = [x for x in self if x.state.st_is_ok_to_convert(fresh)]
cert_processing.process_parallel(
CommonCriteriaCert.convert_target_pdf,
certs_to_process,
config.n_threads,
progress_bar_desc="Converting targets to txt",
)
@serialize
def convert_all_pdfs(self, fresh: bool = True):
if self.state.pdfs_downloaded is False:
logger.info("Attempting to convert pdf while not having them downloaded. Returning.")
return
logger.info("Converting CC sample reports to .txt")
self._convert_reports_to_txt(fresh)
logger.info("Converting CC security targets to .txt")
self._convert_targets_to_txt(fresh)
if fresh is True:
logger.info("Attempting to re-convert failed report pdfs")
self._convert_reports_to_txt(False)
logger.info("Attempting to re-convert failed target pdfs")
self._convert_targets_to_txt(False)
self.state.pdfs_converted = True
def update_with_certs(self, certs: List[CommonCriteriaCert]):
if any([x not in self for x in certs]):
logger.warning("Updating dataset with certificates outside of the dataset!")
self.certs.update({x.dgst: x for x in certs})
def _extract_report_metadata(self, fresh: bool = True):
certs_to_process = [x for x in self if x.state.report_is_ok_to_analyze(fresh)]
processed_certs = cert_processing.process_parallel(
CommonCriteriaCert.extract_report_pdf_metadata,
certs_to_process,
config.n_threads,
use_threading=False,
progress_bar_desc="Extracting report metadata",
)
self.update_with_certs(processed_certs)
def _extract_targets_metadata(self, fresh: bool = True):
certs_to_process = [x for x in self if x.state.st_is_ok_to_analyze(fresh)]
processed_certs = cert_processing.process_parallel(
CommonCriteriaCert.extract_st_pdf_metadata,
certs_to_process,
config.n_threads,
use_threading=False,
progress_bar_desc="Extracting target metadata",
)
self.update_with_certs(processed_certs)
def extract_pdf_metadata(self, fresh: bool = True):
logger.info("Extracting pdf metadata from CC dataset")
self._extract_report_metadata(fresh)
self._extract_targets_metadata(fresh)
def _extract_report_frontpage(self, fresh: bool = True):
certs_to_process = [x for x in self if x.state.report_is_ok_to_analyze(fresh)]
processed_certs = cert_processing.process_parallel(
CommonCriteriaCert.extract_report_pdf_frontpage,
certs_to_process,
config.n_threads,
use_threading=False,
progress_bar_desc="Extracting report frontpages",
)
self.update_with_certs(processed_certs)
def _extract_targets_frontpage(self, fresh: bool = True):
certs_to_process = [x for x in self if x.state.st_is_ok_to_analyze(fresh)]
processed_certs = cert_processing.process_parallel(
CommonCriteriaCert.extract_st_pdf_frontpage,
certs_to_process,
config.n_threads,
use_threading=False,
progress_bar_desc="Extracting target frontpages",
)
self.update_with_certs(processed_certs)
def extract_pdf_frontpage(self, fresh: bool = True):
logger.info("Extracting pdf frontpages from CC dataset.")
self._extract_report_frontpage(fresh)
self._extract_targets_frontpage(fresh)
def _extract_report_keywords(self, fresh: bool = True):
certs_to_process = [x for x in self if x.state.report_is_ok_to_analyze(fresh)]
processed_certs = cert_processing.process_parallel(
CommonCriteriaCert.extract_report_pdf_keywords,
certs_to_process,
config.n_threads,
use_threading=False,
progress_bar_desc="Extracting report keywords",
)
self.update_with_certs(processed_certs)
def _extract_targets_keywords(self, fresh: bool = True):
certs_to_process = [x for x in self if x.state.st_is_ok_to_analyze(fresh)]
processed_certs = cert_processing.process_parallel(
CommonCriteriaCert.extract_st_pdf_keywords,
certs_to_process,
config.n_threads,
use_threading=False,
progress_bar_desc="Extracting target keywords",
)
self.update_with_certs(processed_certs)
def extract_pdf_keywords(self, fresh: bool = True):
logger.info("Extracting pdf keywords from CC dataset.")
self._extract_report_keywords(fresh)
self._extract_targets_keywords(fresh)
def _extract_data(self, fresh: bool = True):
logger.info("Extracting various stuff from converted txt filed from CC dataset.")
self.extract_pdf_metadata(fresh)
self.extract_pdf_frontpage(fresh)
self.extract_pdf_keywords(fresh)
if fresh is True:
logger.info("Attempting to re-extract failed data from report txts")
self._extract_report_metadata(False)
self._extract_report_frontpage(False)
self._extract_report_keywords(False)
logger.info("Attempting to re-extract failed data from ST txts")
self._extract_targets_metadata(False)
self._extract_targets_frontpage(False)
self._extract_targets_keywords(False)
def _compute_cert_labs(self):
logger.info("Deriving information about laboratories involved in certification.")
certs_to_process = [x for x in self if x.state.report_is_ok_to_analyze()]
for cert in certs_to_process:
cert.compute_heuristics_cert_lab()
def _compute_cert_ids(self):
logger.info("Deriving information about sample ids from pdf scan.")
certs_to_process = [x for x in self if x.state.report_is_ok_to_analyze()]
for cert in certs_to_process:
cert.compute_heuristics_cert_id()
def _compute_heuristics(self, use_nist_cpe_matching_dict: bool = True):
self._compute_cert_labs()
self._compute_cert_ids()
self._compute_dependencies()
self.compute_cpe_heuristics()
self.compute_related_cves(use_nist_cpe_matching_dict=use_nist_cpe_matching_dict)
def _compute_dependencies(self):
finder = DependencyFinder()
finder.fit(self.certs)
for dgst in self.certs:
self.certs[dgst].heuristics.directly_affecting = finder.get_directly_affecting(dgst)
self.certs[dgst].heuristics.indirectly_affecting = finder.get_indirectly_affecting(dgst)
self.certs[dgst].heuristics.directly_affected_by = finder.get_directly_affected_by(dgst)
self.certs[dgst].heuristics.indirectly_affected_by = finder.get_indirectly_affected_by(dgst)
@serialize
def analyze_certificates(self, fresh: bool = True):
if self.state.pdfs_converted is False:
logger.info(
"Attempting run analysis of txt files while not having the pdf->txt conversion done. Returning."
)
return
self._extract_data(fresh)
self._compute_heuristics()
self.state.certs_analyzed = True
def get_certs_from_name(self, cert_name: str) -> List[Certificate]:
return [crt for crt in self if crt.name == cert_name]
def process_maintenance_updates(self):
maintained_certs: List[CommonCriteriaCert] = [x for x in self if x.maintenance_updates]
updates = list(
itertools.chain.from_iterable(
[CommonCriteriaMaintenanceUpdate.get_updates_from_cc_cert(x) for x in maintained_certs]
)
)
update_dset: CCDatasetMaintenanceUpdates = CCDatasetMaintenanceUpdates(
{x.dgst: x for x in updates}, root_dir=self.certs_dir / "maintenance", name="Maintenance updates"
)
update_dset.set_local_paths()
update_dset.download_all_pdfs()
update_dset.convert_all_pdfs()
update_dset._extract_data()
def generate_cert_name_keywords(self) -> Set[str]:
df = self.to_pandas()
certificate_names = set(df["name"])
keywords = set(itertools.chain.from_iterable([x.lower().split(" ") for x in certificate_names]))
keywords.add("1.02.013")
return {x for x in keywords if len(x) > config.minimal_token_length}
class CCDatasetMaintenanceUpdates(CCDataset, ComplexSerializableType):
"""
Should be used merely for actions related to Maintenance updates: download pdfs, convert pdfs, extract data from pdfs
"""
# TODO: Types - if I use dictionary in CCDataset, I can't use more specific dictionary here (otherwise the CCDataset
# one would have to be a Mapping - not mutable)
certs: Dict[str, "CommonCriteriaMaintenanceUpdate"] # type: ignore
def __init__(
self,
certs: Mapping[str, "Certificate"],
root_dir: Path,
name: str = "dataset name",
description: str = "dataset_description",
state: Optional[CCDataset.DatasetInternalState] = None,
):
super().__init__(certs, root_dir, name, description, state)
self.state.meta_sources_parsed = True
@property
def certs_dir(self) -> Path:
return self.root_dir
def __iter__(self) -> Iterator[CommonCriteriaMaintenanceUpdate]:
yield from self.certs.values()
def _compute_heuristics(self, download_fresh_cpes: bool = False):
raise NotImplementedError
def compute_related_cves(self, download_fresh_cves: bool = False):
raise NotImplementedError
@classmethod
def from_json(cls, input_path: Union[str, Path]):
input_path = Path(input_path)
with input_path.open("r") as handle:
dset = json.load(handle, cls=CustomJSONDecoder)
return dset
def to_pandas(self):
df = pd.DataFrame(
[x.pandas_tuple for x in self.certs.values()], columns=CommonCriteriaMaintenanceUpdate.pandas_columns
)
df = df.set_index("dgst")
df.index.name = "dgst"
df.maintenance_date = pd.to_datetime(df.maintenance_date, infer_datetime_format=True)
df = df.fillna(value=np.nan)
return df
@classmethod
def from_web_latest(cls):
with tempfile.TemporaryDirectory() as tmp_dir:
dset_path = Path(tmp_dir) / "cc_maintenances_latest_dataset.json"
helpers.download_file(config.cc_maintenances_latest_snapshot, dset_path)
return cls.from_json(dset_path)
| StarcoderdataPython |
1635367 | from decode_and_calculate import decode_and_calculate, MODEL
from time import strftime, localtime
import cv2 as cv
from flask import Flask, render_template, request, Response, send_file, redirect, url_for
import os
# Silence TensorFlow log
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
app = Flask(__name__)
class Camera(object):
RESIZE_RATIO = 1.0
def __init__(self):
self.video = cv.VideoCapture(0)
def __del__(self):
self.video.release()
def get_frame(self):
success, frame = self.video.read()
if not success:
return None
if (Camera.RESIZE_RATIO != 1):
frame = cv.resize(frame, None, fx=Camera.RESIZE_RATIO,
fy=Camera.RESIZE_RATIO)
return frame
def get_feed(self):
frame = self.get_frame()
if frame is not None:
ret, jpeg = cv.imencode('.jpg', frame)
return jpeg.tobytes()
def capture(self):
frame = self.get_frame()
return frame
camera = None
model = None
def get_camera():
global camera
if not camera:
camera = Camera()
return camera
@app.route('/')
def root():
return redirect(url_for('index'))
@app.route('/index/')
def index():
return render_template('index.html', expression=None, calculated=None)
def gen(camera):
while True:
frame = camera.get_feed()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/video_feed/')
def video_feed():
camera = get_camera()
return Response(gen(camera),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/capture/', methods=['GET', 'POST'])
def capture():
camera = get_camera()
image = camera.capture()
expression, calculated = decode_and_calculate(image, model)
del camera
return render_template('index.html', expression=expression, calculated=str(calculated))
def main():
global model
from keras.models import load_model
model = load_model(MODEL)
app.run(host='0.0.0.0', port=8080, debug=True)
if __name__ == '__main__':
main()
| StarcoderdataPython |
164013 | <reponame>eternalflow/push-money<filename>minter/api.py
import logging
from time import sleep
from mintersdk.minterapi import MinterAPI
from requests import ReadTimeout, ConnectTimeout, HTTPError
from helpers.misc import retry
class MinterAPIException(Exception):
def __init__(self, response):
err = response['error']
self.code = err.get('tx_result', {}).get('code') or err.get('code')
self.message = err.get('tx_result', {}).get('log') or err.get('message')
class CustomMinterAPI(MinterAPI):
"""
Грубая обертка над MinterAPI из U-Node SDK
- делает повторные попытки запросов, если API не отвечает
- при успешном результате возвращает содержимое ключа 'result'
- MinterAPIException только в случае отсутствия ключа 'result' в ответе API
send_tx:
- возвращает hash с привычным префикcом Mt + хэш транзакции в lowercase
- то же что send_transaction, а если wait=True - ждет успешного выполнения транзакции
"""
to_handle = ReadTimeout, ConnectTimeout, ConnectionError, HTTPError, ValueError, KeyError
headers = {}
@retry(to_handle, tries=3, delay=0.5, backoff=2)
def _request(self, command, request_type='get', **kwargs):
r = super()._request(command, request_type=request_type, **kwargs)
if 'result' not in r:
logging.info(f'Minter API Exception {r}')
raise MinterAPIException(r)
return r['result']
def get_addresses(self, addresses):
if not addresses:
return []
return self._request('addresses', params={'addresses': str(addresses).replace("'", '"')})
def send_tx(self, tx, wait=False):
r = super().send_transaction(tx.signed_tx)
if wait:
self._wait_tx(r['hash'].lower())
r['hash'] = 'Mt' + r['hash'].lower()
return r
def _wait_tx(self, tx_hash):
while True:
try:
self.get_transaction(tx_hash)
except MinterAPIException:
sleep(1)
continue
break
| StarcoderdataPython |
539 | import numpy as np
import pytest
import theano
import theano.tensor as tt
# Don't import test classes otherwise they get tested as part of the file
from tests import unittest_tools as utt
from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name
from tests.tensor.test_basic import (
TestAlloc,
TestComparison,
TestJoinAndSplit,
TestReshape,
)
from tests.tensor.utils import rand, safe_make_node
from theano.gpuarray.basic_ops import (
GpuAlloc,
GpuAllocEmpty,
GpuContiguous,
GpuEye,
GpuFromHost,
GpuJoin,
GpuReshape,
GpuSplit,
GpuToGpu,
GpuTri,
HostFromGpu,
gpu_contiguous,
gpu_join,
host_from_gpu,
)
from theano.gpuarray.elemwise import GpuDimShuffle, GpuElemwise
from theano.gpuarray.subtensor import GpuSubtensor
from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor
from theano.tensor import TensorType
from theano.tensor.basic import alloc
pygpu = pytest.importorskip("pygpu")
gpuarray = pygpu.gpuarray
utt.seed_rng()
rng = np.random.RandomState(seed=utt.fetch_seed())
def inplace_func(
inputs,
outputs,
mode=None,
allow_input_downcast=False,
on_unused_input="raise",
name=None,
):
if mode is None:
mode = mode_with_gpu
return theano.function(
inputs,
outputs,
mode=mode,
allow_input_downcast=allow_input_downcast,
accept_inplace=True,
on_unused_input=on_unused_input,
name=name,
)
def fake_shared(value, name=None, strict=False, allow_downcast=None, **kwargs):
from theano.tensor.sharedvar import scalar_constructor, tensor_constructor
for c in (gpuarray_shared_constructor, tensor_constructor, scalar_constructor):
try:
return c(
value, name=name, strict=strict, allow_downcast=allow_downcast, **kwargs
)
except TypeError:
continue
def rand_gpuarray(*shape, **kwargs):
r = rng.rand(*shape) * 2 - 1
dtype = kwargs.pop("dtype", theano.config.floatX)
cls = kwargs.pop("cls", None)
if len(kwargs) != 0:
raise TypeError("Unexpected argument %s", list(kwargs.keys())[0])
return gpuarray.array(r, dtype=dtype, cls=cls, context=get_context(test_ctx_name))
def makeTester(
name,
op,
gpu_op,
cases,
checks=None,
mode_gpu=mode_with_gpu,
mode_nogpu=mode_without_gpu,
skip=False,
eps=1e-10,
):
if checks is None:
checks = {}
_op = op
_gpu_op = gpu_op
_cases = cases
_skip = skip
_checks = checks
class Checker(utt.OptimizationTestMixin):
op = staticmethod(_op)
gpu_op = staticmethod(_gpu_op)
cases = _cases
skip = _skip
checks = _checks
def setup_method(self):
eval(self.__class__.__module__ + "." + self.__class__.__name__)
def test_all(self):
if skip:
pytest.skip(skip)
for testname, inputs in cases.items():
for _ in range(len(inputs)):
if type(inputs[_]) is float:
inputs[_] = np.asarray(inputs[_], dtype=theano.config.floatX)
self.run_case(testname, inputs)
def run_case(self, testname, inputs):
inputs_ref = [theano.shared(inp) for inp in inputs]
inputs_tst = [theano.shared(inp) for inp in inputs]
try:
node_ref = safe_make_node(self.op, *inputs_ref)
node_tst = safe_make_node(self.op, *inputs_tst)
except Exception as exc:
err_msg = (
"Test %s::%s: Error occurred while making " "a node with inputs %s"
) % (self.gpu_op, testname, inputs)
exc.args += (err_msg,)
raise
try:
f_ref = inplace_func([], node_ref.outputs, mode=mode_nogpu)
f_tst = inplace_func([], node_tst.outputs, mode=mode_gpu)
except Exception as exc:
err_msg = (
"Test %s::%s: Error occurred while trying to " "make a Function"
) % (self.gpu_op, testname)
exc.args += (err_msg,)
raise
self.assertFunctionContains1(f_tst, self.gpu_op)
ref_e = None
try:
expecteds = f_ref()
except Exception as exc:
ref_e = exc
try:
variables = f_tst()
except Exception as exc:
if ref_e is None:
err_msg = (
"Test %s::%s: exception when calling the " "Function"
) % (self.gpu_op, testname)
exc.args += (err_msg,)
raise
else:
# if we raised an exception of the same type we're good.
if isinstance(exc, type(ref_e)):
return
else:
err_msg = (
"Test %s::%s: exception raised during test "
"call was not the same as the reference "
"call (got: %s, expected %s)"
% (self.gpu_op, testname, type(exc), type(ref_e))
)
exc.args += (err_msg,)
raise
for i, (variable, expected) in enumerate(zip(variables, expecteds)):
condition = (
variable.dtype != expected.dtype
or variable.shape != expected.shape
or not TensorType.values_eq_approx(variable, expected)
)
assert not condition, (
"Test %s::%s: Output %s gave the wrong "
"value. With inputs %s, expected %s "
"(dtype %s), got %s (dtype %s)."
% (
self.op,
testname,
i,
inputs,
expected,
expected.dtype,
variable,
variable.dtype,
)
)
for description, check in self.checks.items():
assert check(inputs, variables), (
"Test %s::%s: Failed check: %s " "(inputs were %s, ouputs were %s)"
) % (self.op, testname, description, inputs, variables)
Checker.__name__ = name
if hasattr(Checker, "__qualname__"):
Checker.__qualname__ = name
return Checker
def test_transfer_cpu_gpu():
a = tt.fmatrix("a")
g = GpuArrayType(dtype="float32", broadcastable=(False, False))("g")
av = np.asarray(rng.rand(5, 4), dtype="float32")
gv = gpuarray.array(av, context=get_context(test_ctx_name))
f = theano.function([a], GpuFromHost(test_ctx_name)(a))
fv = f(av)
assert GpuArrayType.values_eq(fv, gv)
f = theano.function([g], host_from_gpu(g))
fv = f(gv)
assert np.all(fv == av)
def test_transfer_gpu_gpu():
g = GpuArrayType(
dtype="float32", broadcastable=(False, False), context_name=test_ctx_name
)()
av = np.asarray(rng.rand(5, 4), dtype="float32")
gv = gpuarray.array(av, context=get_context(test_ctx_name))
mode = mode_with_gpu.excluding(
"cut_gpua_host_transfers", "local_cut_gpua_host_gpua"
)
f = theano.function([g], GpuToGpu(test_ctx_name)(g), mode=mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, GpuToGpu)
fv = f(gv)
assert GpuArrayType.values_eq(fv, gv)
def test_transfer_strided():
# This is just to ensure that it works in theano
# libgpuarray has a much more comprehensive suit of tests to
# ensure correctness
a = tt.fmatrix("a")
g = GpuArrayType(dtype="float32", broadcastable=(False, False))("g")
av = np.asarray(rng.rand(5, 8), dtype="float32")
gv = gpuarray.array(av, context=get_context(test_ctx_name))
av = av[:, ::2]
gv = gv[:, ::2]
f = theano.function([a], GpuFromHost(test_ctx_name)(a))
fv = f(av)
assert GpuArrayType.values_eq(fv, gv)
f = theano.function([g], host_from_gpu(g))
fv = f(gv)
assert np.all(fv == av)
def gpu_alloc_expected(x, *shp):
g = gpuarray.empty(shp, dtype=x.dtype, context=get_context(test_ctx_name))
g[:] = x
return g
TestGpuAlloc = makeTester(
name="GpuAllocTester",
# The +1 is there to allow the lift to the GPU.
op=lambda *args: alloc(*args) + 1,
gpu_op=GpuAlloc(test_ctx_name),
cases=dict(
correct01=(rand(), np.int32(7)),
# just gives a DeepCopyOp with possibly wrong results on the CPU
# correct01_bcast=(rand(1), np.int32(7)),
correct02=(rand(), np.int32(4), np.int32(7)),
correct12=(rand(7), np.int32(4), np.int32(7)),
correct13=(rand(7), np.int32(2), np.int32(4), np.int32(7)),
correct23=(rand(4, 7), np.int32(2), np.int32(4), np.int32(7)),
bad_shape12=(rand(7), np.int32(7), np.int32(5)),
),
)
class TestGPUAlloc(TestAlloc):
dtype = "float32"
mode = mode_with_gpu
shared = staticmethod(gpuarray_shared_constructor)
allocs = [GpuAlloc(test_ctx_name), GpuAlloc(test_ctx_name), tt.Alloc()]
def test_alloc_empty():
for dt in ["float32", "int8"]:
f = theano.function([], GpuAllocEmpty(dt, context_name=test_ctx_name)(2, 3))
assert len(f.maker.fgraph.apply_nodes) == 1
out = f()
assert out.shape == (2, 3)
assert out.dtype == dt
f = theano.function(
[],
[
GpuAllocEmpty("uint64", test_ctx_name)(3, 2),
GpuAllocEmpty("uint64", test_ctx_name)(3, 2),
],
)
out = f()
assert out[0].shape == (3, 2)
assert out[0].dtype == "uint64"
assert out[1].shape == (3, 2)
assert out[1].dtype == "uint64"
assert (
len(
[
node
for node in f.maker.fgraph.apply_nodes
if isinstance(node.op, GpuAllocEmpty)
]
)
== 1
)
def test_shape():
x = GpuArrayType(dtype="float32", broadcastable=[False, False, False])()
v = gpuarray.zeros((3, 4, 5), dtype="float32", context=get_context(test_ctx_name))
f = theano.function([x], x.shape)
topo = f.maker.fgraph.toposort()
assert np.all(f(v) == (3, 4, 5))
if theano.config.mode != "FAST_COMPILE":
assert len(topo) == 4
assert isinstance(topo[0].op, tt.opt.Shape_i)
assert isinstance(topo[1].op, tt.opt.Shape_i)
assert isinstance(topo[2].op, tt.opt.Shape_i)
assert isinstance(topo[3].op, tt.opt.MakeVector)
mode = mode_with_gpu.excluding("local_shape_to_shape_i")
f = theano.function([x], x.shape, mode=mode)
topo = f.maker.fgraph.toposort()
assert np.all(f(v) == (3, 4, 5))
assert len(topo) == 1
assert isinstance(topo[0].op, tt.Shape)
def test_gpu_contiguous():
a = tt.fmatrix("a")
i = tt.iscalar("i")
a_val = np.asarray(np.random.rand(4, 5), dtype="float32")
# The reshape is needed otherwise we make the subtensor on the CPU
# to transfer less data.
f = theano.function(
[a, i], gpu_contiguous(a.reshape((5, 4))[::i]), mode=mode_with_gpu
)
topo = f.maker.fgraph.toposort()
assert any([isinstance(node.op, GpuSubtensor) for node in topo])
assert any([isinstance(node.op, GpuContiguous) for node in topo])
assert f(a_val, 1).flags.c_contiguous
assert f(a_val, 2).flags.c_contiguous
assert f(a_val, 2).flags.c_contiguous
class TestGPUReshape(TestReshape):
def setup_method(self):
self.shared = gpuarray_shared_constructor
self.op = GpuReshape
self.mode = mode_with_gpu
self.ignore_topo = (
HostFromGpu,
GpuFromHost,
theano.compile.DeepCopyOp,
GpuDimShuffle,
GpuElemwise,
tt.opt.Shape_i,
tt.opt.MakeVector,
)
assert self.op == GpuReshape
class TestGPUComparison(TestComparison):
def setup_method(self):
utt.seed_rng()
self.mode = mode_with_gpu
self.shared = gpuarray_shared_constructor
self.dtypes = ["float64", "float32"]
class TestGPUJoinAndSplit(TestJoinAndSplit):
def setup_method(self):
self.mode = mode_with_gpu.excluding("constant_folding")
self.join_op = GpuJoin()
self.split_op_class = GpuSplit
# Use join instead of MakeVector since there is no MakeVector on GPU
self.make_vector_op = GpuJoin()
# this is to avoid errors with limited devices
self.floatX = "float32"
self.hide_error = theano.config.mode not in ["DebugMode", "DEBUG_MODE"]
def shared(x, **kwargs):
return gpuarray_shared_constructor(x, target=test_ctx_name, **kwargs)
self.shared = shared
def test_gpusplit_opt(self):
# Test that we move the node to the GPU
# Also test float16 computation at the same time.
rng = np.random.RandomState(seed=utt.fetch_seed())
m = self.shared(rng.rand(4, 6).astype("float16"))
o = tt.Split(2)(m, 0, [2, 2])
assert o[0].dtype == "float16"
f = theano.function([], o, mode=self.mode)
assert any(
[
isinstance(node.op, self.split_op_class)
for node in f.maker.fgraph.toposort()
]
)
o1, o2 = f()
assert np.allclose(o1, m.get_value(borrow=True)[:2])
assert np.allclose(o2, m.get_value(borrow=True)[2:])
def test_gpujoin_gpualloc():
a = tt.fmatrix("a")
a_val = np.asarray(np.random.rand(4, 5), dtype="float32")
b = tt.fmatrix("b")
b_val = np.asarray(np.random.rand(3, 5), dtype="float32")
f = theano.function(
[a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_without_gpu
)
f_gpu = theano.function(
[a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)), mode=mode_with_gpu
)
f_gpu2 = theano.function(
[a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_with_gpu
)
assert sum([node.op == tt.alloc for node in f.maker.fgraph.toposort()]) == 2
assert sum([node.op == tt.join_ for node in f.maker.fgraph.toposort()]) == 1
assert (
sum([isinstance(node.op, GpuAlloc) for node in f_gpu.maker.fgraph.toposort()])
== 2
)
assert sum([node.op == gpu_join for node in f_gpu.maker.fgraph.toposort()]) == 1
assert (
sum([isinstance(node.op, GpuAlloc) for node in f_gpu2.maker.fgraph.toposort()])
== 2
)
assert sum([node.op == gpu_join for node in f_gpu2.maker.fgraph.toposort()]) == 1
assert np.allclose(f(a_val, b_val), f_gpu2(a_val, b_val))
def test_gpueye():
def check(dtype, N, M_=None, k=0):
# Theano does not accept None as a tensor.
# So we must use a real value.
M = M_
# Currently DebugMode does not support None as inputs even if this is
# allowed.
if M is None:
M = N
N_symb = tt.iscalar()
M_symb = tt.iscalar()
k_symb = tt.iscalar()
out = tt.eye(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype)
f = theano.function([N_symb, M_symb, k_symb], out, mode=mode_with_gpu)
result = np.asarray(f(N, M, k)) - np.array(1).astype(dtype)
assert np.allclose(result, np.eye(N, M_, k, dtype=dtype))
assert result.dtype == np.dtype(dtype)
assert any([isinstance(node.op, GpuEye) for node in f.maker.fgraph.toposort()])
for dtype in ["float32", "int32", "float16"]:
check(dtype, 3)
# M != N, k = 0
check(dtype, 3, 5)
check(dtype, 5, 3)
# N == M, k != 0
check(dtype, 3, 3, 1)
check(dtype, 3, 3, -1)
# N < M, k != 0
check(dtype, 3, 5, 1)
check(dtype, 3, 5, -1)
# N > M, k != 0
check(dtype, 5, 3, 1)
check(dtype, 5, 3, -1)
# k > M, -k > N, k > M, k > N
check(dtype, 5, 3, 3)
check(dtype, 3, 5, 3)
check(dtype, 5, 3, -3)
check(dtype, 3, 5, -3)
check(dtype, 5, 3, 6)
check(dtype, 3, 5, -6)
def test_hostfromgpu_shape_i():
# Test that the shape is lifted over hostfromgpu
m = mode_with_gpu.including(
"local_dot_to_dot22", "local_dot22_to_dot22scalar", "specialize"
)
a = tt.fmatrix("a")
ca = theano.gpuarray.type.GpuArrayType("float32", (False, False))()
av = np.asarray(np.random.rand(5, 4), dtype="float32")
cv = gpuarray.asarray(
np.random.rand(5, 4), dtype="float32", context=get_context(test_ctx_name)
)
f = theano.function([a], GpuFromHost(test_ctx_name)(a), mode=m)
assert any(isinstance(x.op, GpuFromHost) for x in f.maker.fgraph.toposort())
f = theano.function([a], GpuFromHost(test_ctx_name)(a).shape, mode=m)
topo = f.maker.fgraph.toposort()
assert isinstance(topo[0].op, tt.opt.Shape_i)
assert isinstance(topo[1].op, tt.opt.Shape_i)
assert isinstance(topo[2].op, tt.opt.MakeVector)
assert tuple(f(av)) == (5, 4)
f = theano.function([ca], host_from_gpu(ca), mode=m)
assert host_from_gpu in [x.op for x in f.maker.fgraph.toposort()]
f = theano.function([ca], host_from_gpu(ca).shape, mode=m)
topo = f.maker.fgraph.toposort()
assert isinstance(topo[0].op, theano.compile.Shape_i)
assert isinstance(topo[1].op, theano.compile.Shape_i)
assert isinstance(topo[2].op, tt.opt.MakeVector)
assert tuple(f(cv)) == (5, 4)
def test_Gpujoin_inplace():
# Test Gpujoin to work inplace.
#
# This function tests the case when several elements are passed to the
# Gpujoin function but all except one of them are empty. In this case
# Gpujoin should work inplace and the output should be the view of the
# non-empty element.
s = tt.lscalar()
data = np.array([3, 4, 5], dtype=theano.config.floatX)
x = gpuarray_shared_constructor(data, borrow=True)
z = tt.zeros((s,))
join = GpuJoin(view=0)
c = join(0, x, z)
f = theano.function([s], theano.Out(c, borrow=True))
if not isinstance(mode_with_gpu, theano.compile.DebugMode):
assert x.get_value(borrow=True, return_internal_type=True) is f(0)
assert np.allclose(f(0), [3, 4, 5])
def test_gpu_tril_triu():
def check_l(m, k=0):
m_symb = tt.matrix(dtype=m.dtype)
k_symb = tt.iscalar()
f = theano.function(
[m_symb, k_symb], tt.tril(m_symb, k_symb), mode=mode_with_gpu
)
result = f(m, k)
assert np.allclose(result, np.tril(m, k))
assert result.dtype == np.dtype(dtype)
assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()])
def check_u(m, k=0):
m_symb = tt.matrix(dtype=m.dtype)
k_symb = tt.iscalar()
f = theano.function(
[m_symb, k_symb], tt.triu(m_symb, k_symb), mode=mode_with_gpu
)
result = f(m, k)
assert np.allclose(result, np.triu(m, k))
assert result.dtype == np.dtype(dtype)
assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()])
utt.seed_rng()
test_rng = np.random.RandomState(seed=utt.fetch_seed())
for dtype in ["float64", "float32", "float16"]:
# try a big one
m = np.asarray(test_rng.rand(5000, 5000) * 2 - 1, dtype=dtype)
check_l(m, 0)
check_l(m, 1)
check_l(m, -1)
check_u(m, 0)
check_u(m, 1)
check_u(m, -1)
m = np.asarray(test_rng.rand(10, 10) * 2 - 1, dtype=dtype)
check_l(m, 0)
check_l(m, 1)
check_l(m, -1)
check_u(m, 0)
check_u(m, 1)
check_u(m, -1)
m = np.asarray(test_rng.rand(10, 5) * 2 - 1, dtype=dtype)
check_l(m, 0)
check_l(m, 1)
check_l(m, -1)
check_u(m, 0)
check_u(m, 1)
check_u(m, -1)
def test_gputri():
def check(dtype, N, M_=None, k=0):
# Theano does not accept None as a tensor.
# So we must use a real value.
M = M_
# Currently DebugMode does not support None as inputs even if this is
# allowed.
if M is None:
M = N
N_symb = tt.iscalar()
M_symb = tt.iscalar()
k_symb = tt.iscalar()
out = tt.tri(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype)
f = theano.function([N_symb, M_symb, k_symb], out, mode=mode_with_gpu)
result = np.asarray(f(N, M, k)) - np.array(1).astype(dtype)
assert np.allclose(result, np.tri(N, M_, k, dtype=dtype))
assert result.dtype == np.dtype(dtype)
assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()])
for dtype in ["float64", "float32", "int32", "float16"]:
# try a big one
check(dtype, 1000, 1000, 0)
check(dtype, 1000, 1000, -400)
check(dtype, 1000, 1000, 400)
check(dtype, 5)
# M != N, k = 0
check(dtype, 3, 5)
check(dtype, 5, 3)
# N == M, k != 0
check(dtype, 3, 3, 1)
check(dtype, 3, 3, -1)
# N < M, k != 0
check(dtype, 3, 5, 1)
check(dtype, 3, 5, -1)
# N > M, k != 0
check(dtype, 5, 3, 1)
check(dtype, 5, 3, -1)
# k > M, -k > N, k > M, k > N
check(dtype, 5, 3, 3)
check(dtype, 3, 5, 3)
check(dtype, 5, 3, -3)
check(dtype, 3, 5, -3)
check(dtype, 5, 3, 6)
check(dtype, 3, 5, -6)
| StarcoderdataPython |
98037 | import cmd
from asm_dicts import reg_dict
commands = [
'START',
'RESET',
'MODE',
'STEP',
'LOAD',
'REQ'
]
mode_commands = [
'GET',
'SET_CONT',
'SET_STEP'
]
load_commands = [
'INSTR',
'FILE'
]
req_commands = [
'MEM_DATA',
'MEM_INSTR',
'REG',
'REG_PC',
'LATCH_FETCH_DATA',
'LATCH_FETCH_CTRL',
'LATCH_DECO_DATA',
'LATCH_DECO_CTRL',
'LATCH_EXEC_DATA',
'LATCH_EXEC_CTRL',
'LATCH_MEM_DATA',
'LATCH_MEM_CTRL'
]
watch_list = []
step_count = 0
def get_bits(data, size, lsb):
mask = ~(-1 << size)
shifted_data = data & (mask << lsb)
return shifted_data >> lsb
class MipsCli(cmd.Cmd):
prompt = 'MIPS>> '
def __init__(self, mips):
super().__init__()
self.mips = mips
def do_START(self, line):
self.mips.start()
def do_RESET(self,line):
global step_count
self.mips.reset()
step_count = 0
def do_MODE(self, line):
if line == mode_commands[0]:
print('\t0x{:08x}'.format((self.mips.get_mode())))
elif line in mode_commands[1:]:
self.mips.set_mode(line)
else:
print('\tquiacé?')
def do_INIT(self, line):
self.mips.init()
def do_WATCH(self, line):
global watch_list
if line == 'CLEAR':
watch_list = []
else:
watch_list.append(line)
def do_STEP(self, line):
global step_count
it = int(line) if line else 1
for i in range(int(it)):
status = self.mips.step()
if status == self.mips.frame.EOP:
print("\tEOP")
break
step_count += 1
print("\n========= STEP %d ========="%step_count)
for var in watch_list:
print("\n\t--" + var + '--')
self.do_REQ(var)
def do_LOAD(self, line):
comm = line.split(' ')
if comm[0] == load_commands[0]:
if not self.mips.load_instruction(bytearray.fromhex(comm[1]), int(comm[2])):
print('\tLOAD INSTRUCTION FAILED')
elif comm[0] == load_commands[1]:
self.mips.load_instructions_from_file(comm[1])
else:
print('\tquiacé?')
def do_REQ(self, line):
comm = line.split(' ')
comm = [comm[0], ''.join(comm[1:])]
if comm[0] in req_commands:
frame = self.mips.frame['REQ_'+comm[0]]
else:
print('\tquiacé?')
return
#MEM_DATA & MEM_INSTR
if comm[0] == req_commands[0] or comm[0] == req_commands[1]:
if len(comm) < 2:
print('\tquiacé?')
return
if ':' in comm[1]:
memrange = comm[1].split(':')
for addr in range(int(memrange[0]), int(memrange[1])):
print('\taddr {}: 0x{:08x}'.format(addr, (self.mips.req_data(frame, addr))))
else:
try:
addr = int(comm[1])
except:
print("address invalida")
return
print('\taddr {}: 0x{:08x}'.format(addr, (self.mips.req_data(frame, addr))))
# REG
elif comm[0] == req_commands[2]:
if len(comm) < 2:
print('\tquiacé?')
return
if ',' in comm[1]:
for reg in comm[1].split(','):
if reg not in reg_dict.keys():
print('\tquiacé?')
return
addr = reg_dict[reg.strip()]
print('\treg {}: 0x{:08x}'.format(reg.strip(), (self.mips.req_data(frame, addr))))
else:
if comm[1] not in reg_dict.keys():
print('\tquiacé?')
return
addr = reg_dict[comm[1]]
print('\treg {}: 0x{:08x}'.format(comm[1].strip(), (self.mips.req_data(frame, addr))))
# LATCH_FETCH_DATA
elif comm[0] == req_commands[4]:
print('\tIR: 0x{:08x}'.format((self.mips.req_data(frame))))
# LATCH_FETCH_CTRL
elif comm[0] == req_commands[5]:
data = self.mips.req_data(frame)
# print('\tPC+4: 0x{:08x}'.format((data & 0xFFFF)))
# print('\tPC: 0x{:08x}'.format((data & (0xFFFFFFFF << 32))))
print('\tPC: 0x{:08x}'.format(get_bits(data, 32, 32)))
# LATCH_DECO_DATA
elif comm[0] == req_commands[6]:
data = self.mips.req_data(frame)
# print('\tSHAMT: 0x{:02x}'.format((data & (0x1F << (16+32+32)))))
print('\tSHAMT: 0x{:02x}'.format(get_bits(data, 5,80)))
# print('\tA: 0x{:08x}'.format((data & (0xFFFFFFFF << (16+32)))))
print('\tA: 0x{:08x}'.format(get_bits(data, 32, 48)))
# print('\tB: 0x{:08x}'.format((data & (0xFFFFFFFF << 16))))
print('\tB: 0x{:08x}'.format(get_bits(data, 32, 16)))
# print('\tINM: 0x{:04x}'.format((data & (0xFFFF))))
print('\tINM: 0x{:04x}'.format(get_bits(data, 16, 0)))
# LATCH_DECO_CTRL
elif comm[0] == req_commands[7]:
data = self.mips.req_data(frame)
# print('\tEX_CTRL: 0x{:02x}'.format((data & (0x7F << (8+5+32)))))
print('\tEX_CTRL: 0x{:02x}'.format(get_bits(data, 7, 45)))
# print('\tMEM_CTRL: 0x{:02x}'.format((data & (0x1F << (8+32)))))
print('\tMEM_CTRL: 0x{:02x}'.format(get_bits(data, 5, 40)))
# print('\tWB_CTRL: 0x{:02x}'.format((data & (0xFF << 32))))
print('\tWB_CTRL: 0x{:02x}'.format(get_bits(data, 8, 32)))
# print('\tPC: 0x{:08x}'.format((data & (0xFFFFFFFF))))
print('\tPC: 0x{:08x}'.format(get_bits(data, 32, 0)))
# LATCH_EXEC_DATA
elif comm[0] == req_commands[8]:
data = self.mips.req_data(frame)
# print('\tALU: 0x{:08x}'.format((data & (0xFFFFFFFF << 32))))
print('\tALU: 0x{:08x}'.format(get_bits(data, 32, 32)))
# print('\tB: 0x{:08x}'.format((data & (0xFFFFFFFF))))
print('\tB: 0x{:08x}'.format(get_bits(data, 32, 0)))
# LATCH_EXEC_CTRL
elif comm[0] == req_commands[9]:
data = self.mips.req_data(frame)
# print('\tMEM_CTRL: 0x{:02x}'.format((data & (0x1F << (32+8)))))
print('\tMEM_CTRL: 0x{:02x}'.format(get_bits(data, 5, 40)))
# print('\tWB_CTRL: 0x{:02x}'.format((data & (0xFF << 32))))
print('\tWB_CTRL: 0x{:02x}'.format(get_bits(data, 8, 32)))
# print('\tPC: 0x{:08x}'.format((data & (0xFFFFFFFF))))
print('\tPC: 0x{:08x}'.format(get_bits(data, 32, 0)))
# LATCH_MEM_DATA
elif comm[0] == req_commands[10]:
data = self.mips.req_data(frame)
# print('\tREG_VAL: 0x{:08x}'.format((data & (0xFFFFFFFF << 32))))
print('\tREG_VAL: 0x{:08x}'.format(get_bits(data, 32, 32)))
# print('\tEXTENDED_MEM: 0x{:08x}'.format((data & (0xFFFFFFFF))))
print('\tEXTENDED_MEM: 0x{:08x}'.format(get_bits(data, 32, 0)))
# LATCH_MEM_CTRL
elif comm[0] == req_commands[11]:
data = self.mips.req_data(frame)
# print('\tWB_CTRL: 0x{:02x}'.format((data & (0xFF << 32))))
print('\tWB_CTRL: 0x{:02x}'.format(get_bits(data, 8, 32)))
# print('\tPC: 0x{:08x}'.format((data & (0xFFFFFFFF))))
print('\tPC: 0x{:08x}'.format(get_bits(data, 32, 0)))
else:
print('\tquiacé?')
return
def complete_START(self, text, line, start_index, end_index):
pass
def complete_RESET(self, text, line, start_index, end_index):
pass
def complete_MODE(self, text, line, start_index, end_index):
if text:
return [
comm for comm in mode_commands
if comm.startswith(text)
]
else:
return mode_commands
def complete_STEP(self, text, line, start_index, end_index):
pass
def complete_LOAD(self, text, line, start_index, end_index):
if text:
return [
comm for comm in load_commands
if comm.startswith(text)
]
else:
return load_commands
def complete_WATCH(self, text, line, start_index, end_index):
return self.complete_REQ(text, line, start_index, end_index)
def complete_REQ(self, text, line, start_index, end_index):
if text:
return [
comm for comm in req_commands
if comm.startswith(text)
]
else:
return req_commands
if __name__ == '__main__':
my_cmd = MyCmd()
my_cmd.cmdloop()
| StarcoderdataPython |
129261 | from django import template
from django.utils.http import urlquote
from endpoint_monitor.models import EndpointTest
from linda_app.lists import CATEGORIES
from linda_app.models import Vocabulary, VocabularyClass, VocabularyProperty, get_configuration, \
datasource_from_endpoint
register = template.Library()
# Load user configurable settings
config = get_configuration()
@register.filter(name="nice_name")
def nice_name(user):
return user.get_full_name() or user.username
@register.filter(name="vocabularies")
def vocabularies(objects):
return [elem for elem in objects if isinstance(elem.object, Vocabulary) or isinstance(elem, Vocabulary)]
@register.filter(name="classes")
def vocabulary_classes(objects):
return [elem for elem in objects if isinstance(elem.object, VocabularyClass) or isinstance(elem, VocabularyClass)]
@register.filter(name="properties")
def vocabulary_properties(objects):
return [elem for elem in objects if isinstance(elem.object, VocabularyProperty) or isinstance(elem, VocabularyProperty)]
@register.filter(name="get_endpoint")
def get_endpoint(datasource):
return datasource.get_endpoint()
@register.simple_tag
def url_replace(request, field, value):
dict_ = request.GET.copy()
dict_[field] = value
return dict_.urlencode()
@register.filter(name="datasource_visualize")
def datasource_visualize(datasource):
endpoint = config.private_sparql_endpoint
graph_uri = datasource.uri
return '/visualizations/#/datasource/' + datasource.name + '/' + urlquote(endpoint, safe='') + '/' + urlquote(graph_uri, safe='') + '/rdf'
@register.filter
def sparql_version(datasource):
if datasource.is_public:
tests = EndpointTest.objects.filter(datasource=datasource, up=True).order_by('-id')
if tests:
if tests[0].supports_minus:
return "1.1"
else:
return "1.0"
else:
return ""
else:
return "1.1"
@register.filter
def domain_of(cls, limit):
return cls.domain_of()[:limit]
@register.filter
def range_of(cls, limit):
return cls.range_of()[:limit]
@register.filter
def category_display_name(category):
for c in CATEGORIES:
if c[0] == category:
return c[1]
return category
@register.filter
def label_from_uri(uri):
label = uri.split('/')[-1]
if label.find('#') >= 0:
label = uri.split('#')[-1]
return label
@register.filter
def get_datasources(query):
# get initial endpoint
dt_source = query.get_datasource()
if dt_source:
datasources = [dt_source.title]
else:
datasources = [query.endpoint]
# search for additional endpoints
lines = query.sparql.split('\n')
for line in lines:
pos = line.find('SERVICE <')
if pos < 0:
continue
start = pos + len('SERVICE <')
end = start + line[start:].find('>')
endpoint = line[start:end]
dt_source = datasource_from_endpoint(endpoint)
if dt_source:
datasources.append(dt_source.title)
else:
datasources.append(label_from_uri(endpoint))
# create string
result = datasources[0]
for dt in datasources[1:-1]:
result += ", " + dt
if len(datasources) > 1:
result += " and " + datasources[-1]
return result | StarcoderdataPython |
158166 | import requests
from bs4 import BeautifulSoup
import re
from tqdm import tqdm
import math
import json
from datetime import datetime
# Taken from https://stackoverflow.com/questions/1345827/how-do-i-find-the-time-difference-between-two-datetime-objects-in-python
def get_duration( then, now = datetime.now(), interval = "default" ):
# Returns a duration as specified by variable interval
# Functions, except totalDuration, returns [quotient, remainder]
duration = now - then # For build-in functions
duration_in_s = duration.total_seconds()
def years():
return divmod(duration_in_s, 31536000) # Seconds in a year=31536000.
def days(seconds = None):
return divmod(seconds if seconds != None else duration_in_s, 86400) # Seconds in a day = 86400
def hours(seconds = None):
return divmod(seconds if seconds != None else duration_in_s, 3600) # Seconds in an hour = 3600
def minutes(seconds = None):
return divmod(seconds if seconds != None else duration_in_s, 60) # Seconds in a minute = 60
def seconds(seconds = None):
if seconds != None:
return divmod(seconds, 1)
return duration_in_s
# Give you all the correct granular info
def totalDuration():
y = years()
d = days(y[1]) # Use remainder to calculate next variable
h = hours(d[1])
m = minutes(h[1])
s = seconds(m[1])
return "{} years, {} days, {} hours, {} minutes and {} seconds".format(int(y[0]), int(d[0]), int(h[0]), int(m[0]), int(s[0]))
return {
'years': int(years()[0]),
'days': int(days()[0]),
'hours': int(hours()[0]),
'minutes': int(minutes()[0]),
'seconds': int(seconds()),
'default': totalDuration()
}[interval]
# Return the dict of the json file
def decode_json_config_file_to_dict( filepath='' ):
if filepath == '':
print('ERROR: decode_json_config_file_to_dict(): Arg filepath is empty string')
return None
try:
f = open(filepath)
data = json.load(f)
return data
except Exception as e:
print('ERROR: decode_json_config_file_to_dict(): Fail to open {}'.format(filepath), e)
return None
return None
# Only works for first level of variables, for now, until multiple level is implemented
def get_single_variable_from_json_file( filepath='', arg_name='' ):
if filepath == '':
print('ERROR: get_single_variable_from_json_file(): Arg filepath is empty string')
return None
if arg_name == '':
print('ERROR: get_single_variable_from_json_file(): Arg arg_name is empty string')
return None
json_dict = decode_json_config_file_to_dict(filepath=filepath)
if not isinstance(json_dict, dict):
print('ERROR: get_single_variable_from_json_file(): The file is not a correctly structured json file')
return None
else:
if arg_name in json_dict:
return json_dict[arg_name]
else:
print('ERROR: get_single_variable_from_json_file(): The key "{}" does not exists in dictionary from the json file'.format(arg_name))
return None
return None
# Overwrite is not true, will skip if it's the case. And will infer file type. So make sure to_filename is only the name of the file, don't include the extension
def download_image_from_url(url_image, to_filename_with_no_extension, to_dir_path):
try:
response = requests.get(url_image, stream=True)
except Exception as e:
raise ValueError( " ERROR: download_image_from_url(): Exception - {}".format(e) )
if response.status_code != 200:
raise ValueError( " ERROR: download_image_from_url(): Returned status code is not 200. Instead it is {}: {}".format(response.status_code, response.reason) )
try:
main_type, sub_type = response.headers['Content-Type'].split("/")
image_len = response.headers['Content-Length']
except Exception as e:
raise ValueError( " ERROR: download_image_from_url(): Exception - {}".format(e) )
if main_type != 'image':
raise ValueError( " ERROR: download_image_from_url(): Returned Content-Type is not image. Instead it is {}".format(main_type) )
try:
image_len = int(image_len)
except Exception as e:
raise ValueError( " ERROR: download_image_from_url(): Exception - {}".format(e) )
if not isinstance(image_len, int):
raise ValueError( " ERROR: download_image_from_url(): Returned Content-Length is not an int. Instead it is {}".format(type(image_len)) )
# Determining unix style or window style path
if to_dir_path[-1] != '/' and to_dir_path[-1] != '\\':
if '/' in to_dir_path:
to_dir_path = to_dir_path + '/'
elif '\\' in to_dir_path:
to_dir_path = to_dir_path + '\\'
else:
# Take a leap of faith here, going with my favorite style, unix path style
to_dir_path = to_dir_path + '\\'
to_path_filename_full = to_dir_path + to_filename_with_no_extension + "." + sub_type
try:
print( ' {}'.format(to_filename_with_no_extension) )
with open(file=to_path_filename_full, mode='xb') as handle:
for block in tqdm(response.iter_content(1024), total=math.ceil(image_len/1024)):
if not block:
break
handle.write(block)
except FileExistsError:
print( " \tskipping, filename already exists" )
except Exception as e:
raise ValueError( " ERROR: download_image_from_url(): Exception - {}".format(e) )
# Will replace any space in title with underscore_
def scrap_for_current_image_link_and_title(url_art):
response = requests.get(url_art)
if response.status_code == 200:
soup = BeautifulSoup(response.content, "html.parser")
else:
raise ValueError( " ERROR: scrap_for_current_image_link_and_title(): Returned status code is not 200. Instead it is {}: {}".format(response.status_code, response.reason) )
# image_div = soup.find_all(name='img', class_='_1izoQ')
image_div = soup.find(name='img', class_='_1izoQ')
if image_div is None:
raise ValueError( " ERROR: scrap_for_current_image_link_and_title(): Cannot find a single <img class='_1izoQ'> occurence" )
try:
image_link = image_div["src"]
except Exception as e:
raise ValueError( " ERROR: scrap_for_current_image_link_and_title(): Exception - {}".format(e) )
if image_link is None:
raise ValueError( " ERROR: scrap_for_current_image_link_and_title(): Cannot find attr 'src' in {}".format(image_div) )
title_div = soup.find(name='h1', attrs={'data-hook': 'deviation_title'})
if title_div is None:
raise ValueError( " ERROR: scrap_for_current_image_link_and_title(): Cannot find a single <h1 data-hook='deviation_title'> occurence" )
try:
title = title_div.contents[0]
except Exception as e:
raise ValueError( " ERROR: scrap_for_current_image_link_and_title(): Exception - {}".format(e) )
if title is None:
raise ValueError( " ERROR: scrap_for_current_image_link_and_title(): Cannot find any content in {}".format(title_div) )
title = title.replace(' ','_')
return image_link, title
# Return a list of art links and the artist name for an artist
def scrap_for_all_art_link_from_profile_link(url_profile):
response = requests.get(url_profile)
if response.status_code == 200:
soup = BeautifulSoup(response.content, "html.parser")
else:
raise ValueError( " ERROR: scrap_for_all_art_link_from_profile_link(): Returned status code is not 200. Instead it is {}: {}".format(response.status_code, response.reason) )
tag_arts = soup.find_all(name='a', attrs={'data-hook': 'deviation_link', 'href': re.compile(".*/art/.*")})
if tag_arts is None:
raise ValueError( " ERROR: scrap_for_all_art_link_from_profile_link(): Cannot find a single <a data-hook='deviation_link' */art/*> occurence" )
url_arts=[]
for each_tag in tag_arts:
try:
url_arts.append(each_tag["href"])
except Exception as e:
raise ValueError( " ERROR: scrap_for_all_art_link_from_profile_link(): Exception - {}".format(e) )
if len(url_arts) == 0:
raise ValueError( " ERROR: scrap_for_all_art_link_from_profile_link(): Cannot find attr 'href' in {}".format(each_tag) )
tag_artist = soup.find(name='a', attrs={'data-username': re.compile(".*")})
artist_name = tag_artist["data-username"]
if len(artist_name) == 0:
raise ValueError( " ERROR: scrap_for_all_art_link_from_profile_link(): Cannot find artist name in {}".format(tag_artist) )
return artist_name, url_arts
# Return the artist name for the art link
def scrap_for_artist_name_from_art_link(url_art):
response = requests.get(url_art)
if response.status_code == 200:
soup = BeautifulSoup(response.content, "html.parser")
else:
raise ValueError( " ERROR: scrap_for_artist_name_from_art_link(): Returned status code is not 200. Instead it is {}: {}".format(response.status_code, response.reason) )
tag_artist = soup.find(name='a', attrs={'data-username': re.compile(".*")})
artist_name = tag_artist["data-username"]
if len(artist_name) == 0:
raise ValueError( " ERROR: scrap_for_artist_name_from_art_link(): Cannot find artist name in {}".format(tag_artist) )
return artist_name
def main():
config_path = r'./config.json'
download_dir = get_single_variable_from_json_file( config_path, "download_dir" )
download_mode = get_single_variable_from_json_file( config_path, "download_mode" )
if download_mode == "profiles":
print( ' Download Mode: {}'.format(download_mode) )
url_profile_list = get_single_variable_from_json_file( config_path, "artist_profiles" )
for each_profile_url in url_profile_list:
artist_name, url_list = scrap_for_all_art_link_from_profile_link(url_profile=each_profile_url)
print( ' Removing dups in art links list' )
url_list = list(dict.fromkeys(url_list))
print( ' Downloading for artist {}'.format(artist_name) )
print( ' Downloading to {}'.format(download_dir) )
for url in url_list:
image_link, image_title = scrap_for_current_image_link_and_title(url)
download_image_from_url(url_image=image_link, to_filename_with_no_extension=artist_name+"__"+image_title, to_dir_path=download_dir)
elif download_mode == "art_links":
print( ' Download Mode: {}'.format(download_mode) )
print( ' Downloading to {}'.format(download_dir) )
url_art_link_list = get_single_variable_from_json_file( config_path, "art_links" )
for each_art_url in url_art_link_list:
artist_name = scrap_for_artist_name_from_art_link(url_art=each_art_url)
image_link, image_title = scrap_for_current_image_link_and_title(each_art_url)
download_image_from_url(url_image=image_link, to_filename_with_no_extension=artist_name+"__"+image_title, to_dir_path=download_dir)
else:
print(' Unknown download_mode given: {}'.format(download_mode))
if __name__ == '__main__':
t_start = datetime.now()
main()
t_end = datetime.now()
print('')
print(' Time Elapsed: {}'.format( get_duration(t_start, t_end) )) | StarcoderdataPython |
81356 | <reponame>pdehaye/COVID19-Demography
import numpy as np
import csv
'''
Code for sampling the household and age structure of a population of n
agents.
'''
def get_age_distribution(country):
age_distribution=[]
with open('World_Age_2019.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
if row[0]==country:
for i in range(101):
age_distribution.append(float(row[i+1]))
break
return np.array(age_distribution)
def get_mother_birth_age_distribution(country):
mother_birth_age_distribution=[]
with open('AgeSpecificFertility.csv',encoding='latin-1') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
if row[0]==country:
#15-19 20-24 25-29 30-34 35-39 40-44 45-49
for i in range(7):
mother_birth_age_distribution.append(float(row[i+1]))
break
return np.array(mother_birth_age_distribution)
def sample_households_china(n):
max_household_size = 4
households = np.zeros((n, max_household_size), dtype=np.int)
households[:] = -1
age = np.zeros(n, dtype=np.int)
n_ages = 101
#estimates for china from 2020
#from https://population.un.org/wpp/Download/Standard/Interpolated/
age_distribution = [16113.281,16543.361,16875.302,17118.429,17282.064,17375.527,17408.145,17389.238,17328.13,17234.143,17117.175,16987.122,16850.435,16715.289,16592.73,16484.473,16388.786,16370.261,16460.9,16637.439,16866.861,17182.465,17477.132,17702.896,17928.813,18144.994,18201.129,18841.832,20387.657,22413.391,24308.028,26355.485,27269.657,26400.295,24405.505,22597.72,20719.355,19296.916,18726.536,18750.928,18640.938,18451.511,18716.505,19599.644,20865.548,22101.75,23374.699,24376.638,24907.095,25077.435,25250.357,25414.362,25172.526,24383.003,23225.134,22043.117,20795.729,19608.86,18589.082,17703.703,16743.545,15666.543,14988.213,14917.427,15198.411,15425.124,15749.105,15550.741,14503.063,12921.733,11444.972,9939.85,8651.521,7764.623,7148.723,6478.704,5807.535,5222.027,4729.055,4307.295,3931.038,3608.42,3272.336,2887.659,2481.964,2118.152,1783.88,1480.587,1215.358,983.8,739.561,551.765,453.96,342.463,217.275,145.809,122.178,96.793,69.654,40.759,74.692]
age_distribution = np.array(age_distribution)
age_distribution = age_distribution/age_distribution.sum()
#single person, couple only, parents and unmarried children, 3-generation
#from https://link.springer.com/article/10.1186/s40711-015-0011-0/tables/2
#(2010 census, urban populations)
household_probs = np.array([0.1703, .2117, 0.3557, 0.1126])
household_probs /= household_probs.sum()
num_generated = 0
while num_generated < n:
if n - num_generated < 5:
i = 0
else:
i = np.random.choice(household_probs.shape[0], p=household_probs)
#single person household
#sample from age distribution
if i == 0:
age[num_generated] = np.random.choice(n_ages, p=age_distribution)
generated_this_step = 1
#couple, sample from age distribution conditioned on age >= 22
elif i == 1:
renormalized = age_distribution[22:]
renormalized = renormalized/renormalized.sum()
age[num_generated] = np.random.choice(n_ages-22, p=renormalized) + 22
age[num_generated+1] = np.random.choice(n_ages-22, p=renormalized) + 22
generated_this_step = 2
#some information about mother's age at birth of first child
#https://link.springer.com/article/10.1007/s42379-019-00022-9
elif i == 2:
renormalized = age_distribution[:22]
renormalized = renormalized/renormalized.sum()
child_age = np.random.choice(22, p=renormalized)
age[num_generated] = child_age
#super rough approximation, women have child at a uniformly random age between 23 and 33
renormalized = age_distribution[23:34]
renormalized = renormalized/renormalized.sum()
mother_age_at_birth = np.random.choice(11, p=renormalized) + 23
mother_current_age = mother_age_at_birth + child_age
age[num_generated + 1] = mother_current_age
age[num_generated + 2] = mother_current_age
generated_this_step = 3
elif i == 3:
#start by generating parents/unmarried child
renormalized = age_distribution[:22]
renormalized = renormalized/renormalized.sum()
child_age = np.random.choice(22, p=renormalized)
age[num_generated] = child_age
#super rough approximation, women have child at a uniformly random age between 23 and 33
renormalized = age_distribution[23:34]
renormalized = renormalized/renormalized.sum()
mother_age_at_birth = np.random.choice(11, p=renormalized) + 23
mother_current_age = mother_age_at_birth + child_age
age[num_generated + 1] = mother_current_age
age[num_generated + 2] = mother_current_age
#add grandparents
renormalized = age_distribution[23:34]
renormalized = renormalized/renormalized.sum()
grandmother_age_at_birth = np.random.choice(11, p=renormalized) + 23
grandmother_current_age = grandmother_age_at_birth + mother_current_age
age[num_generated + 3] = grandmother_current_age
age[num_generated + 4] = grandmother_current_age
generated_this_step = 5
#update list of household contacts
for i in range(num_generated, num_generated+generated_this_step):
curr_pos = 0
for j in range(num_generated, num_generated+generated_this_step):
if i != j:
households[i, curr_pos] = j
curr_pos += 1
num_generated += generated_this_step
return households, age
def sample_households_italy(n):
max_household_size = 6
households = np.zeros((n, max_household_size), dtype=np.int)
households[:] = -1
age = np.zeros(n, dtype=np.int)
n_ages = 101
age_distribution = get_age_distribution("Italy")
age_distribution = np.array(age_distribution)
age_distribution = age_distribution/age_distribution.sum()
# List of household types: single household, couple without children, single parent +1/2/3 children, couple +1/2/3 children,
# family without a nucleus, nucleus with other persons, households with two or more nuclei (a and b)
household_probs = np.array([0.308179, 0.191000, 0.0694283, 0.0273065, 0.00450268, 0.152655, 0.132429, 0.0340969,
0.043821, 0.033, 0.0150])
household_probs /= household_probs.sum()
num_generated = 0
# from fertility data
mother_birth_age_distribution=get_mother_birth_age_distribution("Italy")
renormalized_mother = mother_birth_age_distribution/mother_birth_age_distribution.sum()
renormalized_adult = age_distribution[18:]
renormalized_adult = renormalized_adult/renormalized_adult.sum()
# 18 considered as majority age, maybe should consider that children may still live with parents until 30 or so
renormalized_child = age_distribution[:30]
renormalized_child = renormalized_child/renormalized_child.sum()
renormalized_adult_older = age_distribution[30:]
renormalized_adult_older /= renormalized_adult_older.sum()
# 60 considered as retirement threshold, maybe should be larger, but reasonable for first pass
renormalized_grandparent = age_distribution[60:]
renormalized_grandparent = renormalized_grandparent/renormalized_grandparent.sum()
while num_generated < n:
if n - num_generated < (max_household_size+1):
i = 0
else:
i = np.random.choice(household_probs.shape[0], p=household_probs)
#single person household
#sample from age distribution
if i == 0:
age[num_generated] = np.random.choice(n_ages-30, p=renormalized_adult_older) + 30
generated_this_step = 1
# couple, sample from age distribution conditioned on age >= 18
elif i == 1:
age_adult = np.random.choice(n_ages-30, p=renormalized_adult_older) + 30
age[num_generated] = age_adult
age[num_generated+1] = min(n_ages-1,age_adult+3) # man three years older on average
generated_this_step = 2
# single parent, 1 child
elif i == 2:
child_age = np.random.choice(30, p=renormalized_child)
age[num_generated] = child_age
#super rough approximation
mother_age_at_birth = (np.random.choice(7, p=renormalized_mother) + 3)*5+np.random.randint(5)
mother_current_age = min(n_ages-1,mother_age_at_birth + child_age)
age[num_generated + 1] = mother_current_age
generated_this_step = 2
# single parent, 2 children
elif i == 3:
for j in range(2):
child_age = np.random.choice(30, p=renormalized_child)
age[num_generated+j] = child_age
mother_age_at_birth = (np.random.choice(7, p=renormalized_mother) + 3)*5+np.random.randint(5)
mother_current_age = min(n_ages-1,mother_age_at_birth + max(age[num_generated:(num_generated+2)]))
age[num_generated + 2] = mother_current_age
generated_this_step = 3
# single parent, 3 children
elif i == 4:
for j in range(3):
child_age = np.random.choice(30, p=renormalized_child)
age[num_generated+j] = child_age
mother_age_at_birth = (np.random.choice(7, p=renormalized_mother) + 3)*5+np.random.randint(5)
mother_current_age = min(n_ages-1,mother_age_at_birth + max(age[num_generated:(num_generated+3)]))
age[num_generated + 3] = mother_current_age
generated_this_step = 4
# couple, 1 child
elif i == 5:
child_age = np.random.choice(30, p=renormalized_child)
age[num_generated] = child_age
#super rough approximation
mother_age_at_birth = (np.random.choice(7, p=renormalized_mother) + 3)*5+np.random.randint(5)
mother_current_age = min(n_ages-1,mother_age_at_birth + child_age)
age[num_generated + 1] = mother_current_age
age[num_generated + 2] = min(n_ages-1,mother_current_age+3)
generated_this_step = 3
# couple, 2 children
elif i == 6:
for j in range(2):
child_age = np.random.choice(30, p=renormalized_child)
age[num_generated+j] = child_age
mother_age_at_birth = (np.random.choice(7, p=renormalized_mother) + 3)*5+np.random.randint(5)
mother_current_age = min(n_ages-1,mother_age_at_birth + max(age[num_generated:(num_generated+2)]))
age[num_generated + 2] = mother_current_age
age[num_generated + 3] = min(n_ages-1,mother_current_age+3)
generated_this_step = 4
# couple, 3 children
elif i == 7:
for j in range(3):
child_age = np.random.choice(30, p=renormalized_child)
age[num_generated+j] = child_age
mother_age_at_birth = (np.random.choice(7, p=renormalized_mother) + 3)*5+np.random.randint(5)
mother_current_age = min(n_ages-1,mother_age_at_birth + max(age[num_generated:(num_generated+3)]))
age[num_generated + 3] = mother_current_age
age[num_generated + 4] = min(n_ages-1,mother_current_age+3)
generated_this_step = 5
# family without nucleus
elif i == 8:
age[num_generated] = np.random.choice(n_ages-30, p=renormalized_adult_older) + 30
age[num_generated+1] = np.random.choice(n_ages-30, p=renormalized_adult_older) + 30
generated_this_step = 2
# nucleus with other persons (couple, 2 children, adult >= 60)
elif i == 9:
for j in range(2):
child_age = np.random.choice(30, p=renormalized_child)
age[num_generated+j] = child_age
mother_age_at_birth = (np.random.choice(7, p=renormalized_mother) + 3)*5+np.random.randint(5)
mother_current_age = min(n_ages-1,mother_age_at_birth + max(age[num_generated:(num_generated+2)]))
age[num_generated + 2] = mother_current_age
age[num_generated + 3] = min(n_ages-1,mother_current_age+3)
age[num_generated + 4] = np.random.choice(n_ages-60, p=renormalized_grandparent) + 60
generated_this_step = 5
# households with 2 or more nuclei
# a - couple with same age for mother/father sampled from > 18 + 2 children <= 18 + 2 grand-parents
# b - couple with same age for mother/father sampled from > 18 + 2 children <= 18 + 2 children from other marriage <= 18
# scenario b removed for now
elif i == 10:
for j in range(2):
child_age = np.random.choice(30, p=renormalized_child)
age[num_generated+j] = child_age
mother_age_at_birth = (np.random.choice(7, p=renormalized_mother) + 3)*5+np.random.randint(5)
mother_current_age = min(n_ages-1,mother_age_at_birth + max(age[num_generated:(num_generated+2)]))
age[num_generated + 2] = mother_current_age
age[num_generated + 3] = min(n_ages-1,mother_current_age+3)
#grandparent_age = np.random.choice(n_ages-60, p=renormalized_grandparent) + 60
grandmother_age_at_birth = (np.random.choice(7, p=renormalized_mother) + 3)*5+np.random.randint(5)
grandmother_current_age = min(n_ages-1,grandmother_age_at_birth + mother_current_age)
#age[num_generated + 4] = grandparent_age
#age[num_generated + 5] = grandparent_age+3
age[num_generated + 4] = grandmother_current_age
age[num_generated + 5] = min(n_ages-1,grandmother_current_age+3)
generated_this_step = 6
#elif i == 11:
#for j in range(4):
#child_age = np.random.choice(30, p=renormalized_child)
#age[num_generated+j] = child_age
#mother_age_at_birth = (np.random.choice(7, p=renormalized_mother) + 3)*5+np.random.randint(5)
#mother_current_age = min(n_ages-1,mother_age_at_birth + max(age[num_generated:(num_generated+4)]))
#age[num_generated + 4] = mother_current_age
#age[num_generated + 5] = min(n_ages-1,mother_current_age+3)
#generated_this_step = 6
#update list of household contacts
for i in range(num_generated, num_generated+generated_this_step):
curr_pos = 0
for j in range(num_generated, num_generated+generated_this_step):
if i != j:
households[i, curr_pos] = j
curr_pos += 1
num_generated += generated_this_step
return households, age | StarcoderdataPython |
3355031 | import sys, os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from math import sqrt
from a_star.node import Node
from game_map import GameMap, Cell
class PathFinder:
def __init__(self, width, height):
self.mapWidth = width
self.mapHeight = height
self.nodes = []
self.node_start = None
self.node_end = None
self.initialize_nodes()
def initialize_nodes(self):
self.nodes = []
# Fill the array
for y in range(self.mapHeight):
for x in range(self.mapWidth):
node = Node()
node.x = x
node.y = y
node.bObstacle = False
node.bVisited = False
node.parent = None
self.nodes.append(node)
# Create nodes' connections
for y in range(self.mapHeight):
for x in range(self.mapWidth):
# North
if y > 0:
# print("x", x, "y", y)
self.nodes[y * self.mapWidth + x].neighbours.append(self.nodes[(y - 1) * self.mapWidth + (x + 0)])
# South
if y < self.mapHeight - 1:
self.nodes[y * self.mapWidth + x].neighbours.append(self.nodes[(y + 1) * self.mapWidth + (x + 0)])
# West
if x > 0:
self.nodes[y * self.mapWidth + x].neighbours.append(self.nodes[(y - 0) * self.mapWidth + (x - 1)])
# East
if x < self.mapWidth - 1:
self.nodes[y * self.mapWidth + x].neighbours.append(self.nodes[(y + 0) * self.mapWidth + (x + 1)])
def set_start_end(self, start_pos, end_pos):
self.node_start = self.nodes[start_pos[1] * self.mapWidth + start_pos[0]]
self.node_end = self.nodes[end_pos[1] * self.mapWidth + end_pos[0]]
def set_obstacle(self, obstacle_pos):
self.nodes[obstacle_pos[1] * self.mapWidth + obstacle_pos[0]].bObstacle = True
def get_shortest_path_node_iterator(self):
# def yield_from_start(node):
# if node != None:
# yield_from_start(node.parent)
# else:
# return
# yield node
# yield_from_start(self.node_end)
path = []
node = self.node_end
while node is not None:
path.insert(0, node)
node = node.parent
return path
def get_shortest_path_next_node(self):
node = self.node_end
while node is not None:
node = node.parent
return node
def solve_astar(self):
# Clear the array
for y in range(self.mapHeight):
for x in range(self.mapWidth):
self.nodes[y * self.mapWidth + x].bVisited = False
self.nodes[y * self.mapWidth + x].fGlobalGoal = float('inf')
self.nodes[y * self.mapWidth + x].fLocalGoal = float('inf')
self.nodes[y * self.mapWidth + x].parent = None
def distance(node_a, node_b):
return sqrt((node_a.x - node_b.x)**2 + (node_a.y - node_b.y)**2)
def distance_manhattan(node_a, node_b):
return abs(node_a.x - node_b.x) + abs(node_a.y - node_b.y)
def heuristic(node_a, node_b):
return distance_manhattan(node_a, node_b)
# Init current node
node_cur = self.node_start
self.node_start.fLocalGoal = 0.0
self.node_start.fGlobalGoal = heuristic(self.node_start, self.node_end)
nodes_not_tested = []
nodes_not_tested.append(self.node_start)
while len(nodes_not_tested) != 0:
# Sort untested nodes by global goal, so lower is first
nodes_not_tested = sorted(nodes_not_tested)
# Front of the list is potentially the lowest distance
# Our list may also contains nodes that have been visited, so ditch these
while len(nodes_not_tested) != 0 and nodes_not_tested[0].bVisited:
nodes_not_tested.pop(0)
# Abort because there are no valid nodes left to test
if len(nodes_not_tested) == 0:
break
# Now the node of the front of the list is the best candidate
node_cur = nodes_not_tested[0]
node_cur.bVisited = True # We only explore a node once
# Iterate on the node's neighbours
for node_neighbour in node_cur.neighbours:
# and only if the neighbour is not visited
# and is not an obstacle, add it to the NotTested list
if not node_neighbour.bVisited and not node_neighbour.bObstacle:
nodes_not_tested.append(node_neighbour)
# Calculate the neighbours potential lowest parent distance
fPossibilityLowerGoal = node_cur.fLocalGoal + distance(node_cur, node_neighbour)
# If choosing to path through this node is a lower distance than what
# the neighbour currently has set, update the neighbour to use this node
# as the path source, and set its distance scores as necessary
if fPossibilityLowerGoal < node_neighbour.fLocalGoal:
node_neighbour.parent = node_cur
node_neighbour.fLocalGoal = fPossibilityLowerGoal
# The best path length to the neighbour being tested has changed
# so update the neighbour's score.
# The heuristic is used to globally bias the path algorithm
# so it knows if it's getting better or worse.
# At some points the algorithm will realize this path is worse and abandon it,
# and then go and search along the next best path
node_neighbour.fGlobalGoal = node_neighbour.fGlobalGoal + heuristic(node_neighbour, self.node_end)
if __name__ == "__main__":
print("test")
path_finder = PathFinder(10, 10)
start = (0, 0)
end = (9, 9)
path_finder.set_start_end(start, end)
osbtacles = [(0, 1), (1, 1), (2, 3), (3, 3), (4, 2), (5, 1), (6, 0)]
for obs in osbtacles:
path_finder.set_obstacle(obs)
path_finder.solve_astar()
game = GameMap(10, 10)
for node in path_finder.get_shortest_path_node_iterator():
print(node)
game.setCell((node.x, node.y), Cell.CHARACTER)
game.setCell(start, Cell.CHARACTER)
game.setCell(end, Cell.DOWNSTAIRS)
for obs in osbtacles:
game.setCell(obs, Cell.WALL)
print(game) | StarcoderdataPython |
1616601 | <filename>tests/test_plot.py
import numpy as np
import pandas as pd
from scipy import stats
import math
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
# import class to be tested
# from heat.plot import ParaHeatPlot
import heat.plot as ph
# generate some realistic pandas random data and write tests to generate the tests i desire :)
rng = np.random.default_rng()
df = pd.DataFrame(rng.integers(0, 100, size=(100000, 2)), columns=list('XY'))
binx = np.arange(0,100,1)
biny = binx
# use function wrapped in heat module
ret = stats.binned_statistic_2d(df.X, df.Y, None, 'count', bins=[binx, biny])
# load a background image
bg_img = mpimg.imread('/Users/lukasgehrke/Documents/temp/matb.png')
def test_create_figure_axes():
nr_axes = 16
fig1, axes1 = ph.create_figure_axes(number_of_axes=nr_axes)
assert axes1.shape[0] == math.ceil(nr_axes // 2), "returned number of axes as expected"
def test_get_image_extent():
# TODO take iamge from web
extent = ph.get_image_extent(bg_img)
assert extent[2] == bg_img.shape[0], "image size matches extent"
def test_get_transparant_colormap():
cmap = plt.cm.coolwarm
my_cm = ph.make_cm_transparent(cmap)
assert np.min(my_cm._lut) == 0, "min of alpha in colormap is 0"
assert np.max(my_cm._lut) == 1, "max of alpha in colormap is 0"
def test_get_transparant_diverging_colormap():
cmap = plt.cm.coolwarm
my_cm = ph.make_divergent_cm_transparent(cmap)
assert np.min(my_cm._lut) == 1, "min of alpha in colormap is 0"
assert np.max(my_cm._lut) == 1, "max of alpha in colormap is 0"
def test_make_axes_publication_ready():
pass
def test_make_plot():
# more like a functional test...
# plot settings and parameters
extent = ph.get_image_extent(bg_img)
my_cm = ph.make_cm_transparent(plt.cm.coolwarm)
# create plot and add elements and formatting
fig, ax = ph.create_figure_axes(1)
ph.add_background_image(bg_img, ax)
mask = ret.statistic# > 1
lims = [-20,20]
sig_levels = 5
heat = ph.add_heat(ret.statistic, ax, extent, cm=my_cm, lims=lims, add_contour=True, contour_mask=mask, levels=sig_levels)
ph.add_colorbar(heat, ax)
ph.set_labelnames(ax, title="some title", xlabel="some x label", ylabel="some y label")
ph.format_axes(ax)
ph.show()
def test_remove_islands():
mask = ret.statistic
mask1 = ph.gaussian(mask)
assert mask.shape == mask1.shape
def test_poly_mask():
# cut a square
poly = [(.5, .5), (.5, 1.5), (1.5, 1.5), (1.5, .5)]
mask = ph.poly_mask(ret.statistic, poly)
assert mask.shape == ret.statistic.shape, "mask and input array shape are equal"
# def test_poly_clip():
# ph.clip_img_poly_patch(bg_img) | StarcoderdataPython |
3215509 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CheckNameResult(Model):
"""The result returned from a check name availability request.
:param name_available: Specifies a Boolean value that indicates if the
name is available.
:type name_available: bool
:param message: Message indicating an unavailable name due to a conflict,
or a description of the naming rules that are violated.
:type message: str
:param reason: Message providing the reason why the given name is invalid.
Possible values include: 'Invalid', 'AlreadyExists'
:type reason: str or ~controlplane.models.Reason
"""
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'message': {'key': 'message', 'type': 'str'},
'reason': {'key': 'reason', 'type': 'str'},
}
def __init__(self, *, name_available: bool=None, message: str=None, reason=None, **kwargs) -> None:
super(CheckNameResult, self).__init__(**kwargs)
self.name_available = name_available
self.message = message
self.reason = reason
| StarcoderdataPython |
1780093 | # -*- coding: utf-8 -*-
"""The GUID Partition Table (GPT) directory implementation."""
from dfvfs.path import gpt_path_spec
from dfvfs.vfs import directory
class GPTDirectory(directory.Directory):
"""File system directory that uses pyvsgpt."""
def _EntriesGenerator(self):
"""Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
GPTPathSpec: a path specification.
"""
entry_index = getattr(self.path_spec, 'entry_index', None)
location = getattr(self.path_spec, 'location', None)
# Only the virtual root file has directory entries.
if (entry_index is None and location is not None and
location == self._file_system.LOCATION_ROOT):
vsgpt_volume = self._file_system.GetGPTVolume()
for partition in vsgpt_volume.partitions:
location = '/p{0:d}'.format(partition.entry_index + 1)
yield gpt_path_spec.GPTPathSpec(
entry_index=entry_index, location=location,
parent=self.path_spec.parent)
| StarcoderdataPython |
120632 | from math import sin, pi
from random import uniform
X = []
for i in range(10000):
s = uniform(0, 2*pi)
X.append([sin(s), s])
X = [str(i) + ',' + str(j) + '\n' for i, j in X]
fh = open('sinx.csv', 'w')
fh.writelines(X)
fh.close()
| StarcoderdataPython |
1799403 | <filename>cave/com.raytheon.viz.gfe/localization/gfe/userPython/utilities/HazardUtils.py<gh_stars>0
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: <NAME>
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# ----------------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#
# HazardUtils
#
# SOFTWARE HISTORY
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# Jan 16, 2015 4006 njensen create _getUniqueKeys() mask with dtype bool
# 06/08/16 19096 ryu Change mask to boolean data type
#
# ----------------------------------------------------------------------------
##
# This is a base file that is not intended to be overridden.
##
import SmartScript
import time, string
import VTECTable
import LogStream
import numpy
from AbsTime import AbsTime
from AbsTime import current
from TimeRange import TimeRange
from java.util import Date
from java.util import ArrayList
import jep
from JUtil import JavaWrapperClass
def LOCK_HOURS():
return 192
def HOUR_SECONDS():
return 3600
MODEL = "Fcst"
ELEMENT = "Hazards"
LEVEL = "SFC"
# Status return codes for _separateHazardGrids
SUCCESS = 1
FAIL_REDUNDANT = 0
FAIL_LOCK = -1
class HazardUtils(SmartScript.SmartScript):
def __init__(self, dbss, eaMgr, mdMode=None, toolType="numeric"):
SmartScript.SmartScript.__init__(self, dbss)
# self.setUp(eaMgr, mdMode, toolType)
##
# Get timeRanges that make up the inventory of the given weather element.
# This is normally only used for the hazards inventory, so model is "Fcst"
# and level is "SFC" in the lookup.
#
# @param WEName: The weather element whose time ranges are to be acquired.
# @type WEName: string
# @param timeRange: optional time range of the inventory. If not specified,
# the default is from 24 hours ago to ten days from now.
# @type timeRange: Java or Python TimeRange
# @param asJava: If True, the inventory is returned as a list of Java
# TimeRanges; if False, the inventory is returned as a list
# of Python TimeRanges. The default is False.
# @type asJava: boolean
# @return: The time ranges for WEName that overlap the specified or default
# time range.
def _getWEInventory(self, WEName, timeRange=None, asJava=False):
# set up a timeRange if it is None
if timeRange is None:
now = current()
yesterday = now - (24 * 3600) # one day ago
later = now + 10 * 24 * 3600 # 10 days from now
timeRange = self._makeTimeRange(yesterday.unixTime(), later.unixTime())
parm = self.getParm(MODEL, WEName, LEVEL)
trList = []
if parm is not None:
if isinstance(timeRange, JavaWrapperClass):
timeRange = timeRange.toJavaObj()
gridInventory = parm.getGridInventory(timeRange)
for g in gridInventory:
gridTimeRange = g.getGridTime()
tr = gridTimeRange.clone()
if not asJava:
tr = TimeRange(tr)
trList.append(tr)
return trList
# makes a TimeRange from a start and end integers
def _makeTimeRange(self, start, end):
return TimeRange(AbsTime(start), AbsTime(end))
##
# Get timeRanges that correspond to gaps in the specified WEName inventory
# within the specified time ranges.
#
# @param WEName: A weather element name
# @type WEName: string
# @param trList: Time ranges of interest
# @type trList: list of Python or Java TimeRange
# @return: time ranges overlapping timeRange that are missing from the
# inventory of WEName
# @rtype: list of Python TimeRanges
def _getGaps(self, WEName, trList):
fullHazardInv = self._getWEInventory(WEName)
gaps = []
for timeRange in trList:
# Convert Java TimeRange to Python for comparisons
if not isinstance(timeRange, TimeRange):
timeRange = TimeRange(timeRange)
hazInv = []
for h in fullHazardInv:
if timeRange.overlaps(h):
hazInv.append(h)
# check for empty inventory
if len(hazInv) == 0: # no grids at all
gaps.append(timeRange)
continue
# see if we have a gap at the beginning
if timeRange.startTime() < hazInv[0].startTime():
tr = TimeRange(timeRange.startTime(),
hazInv[0].startTime())
gaps.append(tr)
# Find any gaps in the middle of the inventory
for i in range(len(hazInv) - 1):
if hazInv[i].endTime() != hazInv[i+1].startTime():
gapTR = TimeRange(hazInv[i].endTime(),
hazInv[i+1].startTime())
gaps.append(gapTR)
# see if we have a gap at the end of the inventory
if timeRange.endTime() > hazInv[-1].endTime():
tr = TimeRange(hazInv[-1].endTime(),
timeRange.endTime())
gaps.append(tr)
return gaps
##
# Create an empty hazards-type grid with the specified
# name and timeRange
#
# @param weName: The name of the weather element to create.
# @type weName: string
# @param timeRange: The time range of the new grid.
# @type timeRange: a Java or Python TimeRange
# @raise JepException: when raised by SmartScript methods.
def _makeEmptyHazardGrid(self, weName, timeRange):
gridShape = self.getGridShape()
byteGrid = numpy.zeros(gridShape, dtype=numpy.int8)
hazKeys = self.getDiscreteKeys(ELEMENT)
currentKeys = ["<None>"]
# make the grid
if weName == ELEMENT:
self.createGrid(MODEL, weName, "DISCRETE", (byteGrid, currentKeys),
timeRange, discreteKeys=hazKeys,
discreteAuxDataLength=4, discreteOverlap=1)
else:
hazard = self._tempWENameToKey(weName)
discreteKeys = ["<None>", hazard]
hazKeyDesc = self._addHazardDesc(discreteKeys)
self.createGrid(MODEL, weName, "DISCRETE", (byteGrid, currentKeys),
timeRange, discreteKeys=hazKeyDesc,
discreteAuxDataLength=4, discreteOverlap=0,
defaultColorTable="YesNo")
return
##
# Prepare the Hazards inventory so that it can be merged with the
# activeTable. This includes splitting grids and adding new ones where
# we have gaps.
#
# @param weName: Name of a weather element
# @type weName: string
# @param trList: Time ranges of interest
# @type trList: list of Python or Java TimeRanges
def _setupHazardsInventory(self, weName, trList):
# see if the element exists yet, if not, make a new grid
# This is a painful way just to see if the grid exists
# but all other techniques fail for temporary weather elements
now = current()
yesterday = now - (24 * 3600) # one day ago
later = now + 10 * 24 * 3600 # 10 days from now
timeRange = TimeRange(yesterday, later).toJavaObj()
try:
gridInfo = self.getGridInfo(MODEL, weName, LEVEL, timeRange)
except: # this means the WE does not exist, so make a grid
if len(trList) <= 0:
return
for tr in trList:
self._makeEmptyHazardGrid(weName, tr)
return
# fill any gaps in the inventory
gapList = self._getGaps(weName, trList)
for g in gapList:
self._makeEmptyHazardGrid(weName, g)
# Split the grids at the timeRange boundaries
unix_now = now.unixTime()
for tr in trList:
# If tr is a java timerange, convert it to a python TimeRange
if not isinstance(tr, TimeRange):
tr = TimeRange(tr)
end = tr.endTime().unixTime()
if end > unix_now:
# parm.splitTR() will split timeRanges with non-zero minutes
# to the next hour. So, truncate start and end times to the
# previous hour and then split
start = tr.startTime().unixTime()
start = int(start / 3600) * 3600
end = int(end / 3600) * 3600
roundedTR = TimeRange(AbsTime(start), AbsTime(end)).toJavaObj()
parm = self.getParm(MODEL, weName, LEVEL)
self.splitCmd([weName], roundedTR)
return
# returns a Numeric mask where each zone in zoneList is set to 1
def _makeMask(self, zoneList):
mask = self.empty(dtype=numpy.bool)
eaList = self.editAreaList()
for z in zoneList:
if z in eaList:
zoneArea = self.getEditArea(z)
zoneMask = self.encodeEditArea(zoneArea)
mask = numpy.logical_or(mask, zoneMask)
return mask
# Fetches the gridSize from the GFE and returns it as a tuple.
def _getGridSize(self):
return self.getGridShape()
##
# Determine whether temporary weather elements are loaded.
#
# @return: 1 if temporary weather elements are loaded;
# 0 otherwise.
def _tempWELoaded(self):
parms = self.loadedParms()
for weName, level, dbID in parms:
if string.find(weName, "haz") == 0:
return 1
return 0
##
# Create a temporary weather element name from key.
#
# @param key: String like BZ.W:1234, or LCLKEY, or BZ.W
# @type key: string
# @return: key with 'haz' prepended and any '.' or ':' chars removed.
# @rtype: string
def _makeTempWEName(self, key):
"Create a temporary weather element name from a key string."
#key is BZ.W:1234, or LCLKEY, or BZ.W
key = string.replace(key, ".","")
key = string.replace(key, ":","")
weName = "haz" + key
return weName
##
# Create a key string from a temporary weather element name.
#
# @param wename: A temporary weather element name
# @type wename: string
# @return: The key string from which the temporary element was derived.
# @rtype: string
def _tempWENameToKey(self, wename):
"Make a key string from a temporary weather element name."
#wename is hazBZW, hazBZW1234, hazLCLK
if len(wename) > 3 and wename[0:3] == 'haz':
key = wename[3:] #eliminate "haz"
if len(key) >= 3:
vkey = key[0:2] + '.' + key[2]
if VTECTable.VTECTable.has_key(vkey):
seg = key[3:]
if len(seg):
return vkey + ':' + seg
else:
return vkey
# local key, look for segment via digits
else:
lkey = key
for i in xrange(len(key)):
if key[i:].isdigit():
lkey = key[0:i] + ":" + key[i:]
break
return lkey
else:
# TODO: or should I fail?
return key
else:
raise Exception, "Illegal wename: " + wename
##
# Gets the unique list of keys over the specified mask
# if no mask is passed, the entire grid is used
#
# @param byteGrid: Grid of indices
# @type byteGrid: Numpy array of int8
# @param keys: Keys associated with byteGrid. If byteGrid[2,2] is 3, then
# keys[3] describes its state.
# @type keys: List of strings
# @param mask: Optional mask of points to include; defaults to all ones.
# @type mask: Numpy array of boolean, same dimensions as byteGrid;
# @return: The keys referenced by the masked byteGrid, without duplicates.
# @rtype: List of strings
def _getUniqueKeys(self, byteGrid, keys, mask = None):
uniqueKeys = []
# if mask is None, make a mask of the whole area
if mask is None:
mask = numpy.ones(byteGrid.shape, numpy.bool)
# get the list of values over the mask area only
valueList = numpy.compress(mask.flat, byteGrid.flat)
# remove the duplciates to get unique values
uniqueValues = list( numpy.unique(valueList) )
# extract the keys that correspond to the byte values
for u in uniqueValues:
uniqueKeys.append(keys[u])
return uniqueKeys
##
# Get the phen portion of key.
# If key is not a VTEC hazard key, returns ""
# @param key: A grid key
# @type key: string
# @return: The phen portion of key.
# @rtype: string
def _keyPhen(self, key):
pos = string.find(key, ".")
if pos == -1: # not found
return ""
return key[0:pos]
##
# Get the sig portion of key.
# If key is not a VTEC hazard key, returns ""
#
# @param key: A grid key.
# @type key: string
# @return: The sig portion of key.
# @rtype: string
def _keySig(self, key):
pos = string.find(key, ".")
if pos == -1: # not found
return ""
return key[pos + 1]
##
# Combine newKey with subKeys and return a new combined key. Enforces the
# rule that keys with the same phen returns the one key with the highest
# priority sig.
#
# @param subKeys: The old key.
# @type subKeys: string
# @param newKey: The key to add.
# @type newKey: string
# @return: The key made by combining subKeys with newKey.
# @rtype: string
def _combinedKey(self, subKeys, newKey):
if newKey is None:
return subKeys
subKeyList = string.split(subKeys, "^")
# check for same keys
if newKey in subKeyList:
return subKeys
defaultCombo = subKeys + "^" + newKey
# check for non-VTEC key
if string.find(newKey, ".") == -1:
return defaultCombo
# more exceptions - these phens are above the law
exceptions = ["TO", "SV", "FF"]
sigList = ["W", "Y", "A"]
if self._keyPhen(newKey) in exceptions:
return defaultCombo
subKeyList = string.split(subKeys, "^")
for sk in subKeyList:
if self._keyPhen(sk) == self._keyPhen(newKey):
subSig = self._keySig(sk)
newSig = self._keySig(newKey)
if subSig == newSig:
return subKeys
if subSig not in sigList or newSig not in sigList:
continue
if sigList.index(subSig) > sigList.index(newSig):
subKeys = subKeys.replace(sk, newKey)
return subKeys
return defaultCombo
# Makes a new hazard given the old key oldKey and a new watch phenSig.
# @param oldKey: The old key
# @type oldKey: string
# @param phenSig: The new watch phen and sig
# @type phenSig: string
# @return: A new combined key.
# @rtype: string
def _makeNewKey(self, oldKey, phenSig):
# check for the dumb cases
if oldKey == "<None>" or oldKey == phenSig:
return phenSig
# split up the key, add the hazard, sort, and reassemble
parts = string.split(oldKey, "^")
parts.append(phenSig)
parts.sort() # makes sure the same set of subKeys look the same
# assemble the new key
newKey = ""
for p in parts:
if newKey == "":
newKey = p
else:
newKey = self._combinedKey(newKey, p)
# just in case
if newKey == "":
newKey = "<None>"
return newKey
##
# Get the subkeys of key
#
# @param key: A key to divide into subkeys
# @type key: String
# @return: The subkeys of key
# @rtype: List of strings
def _getSubKeys(self, key):
parts = string.split(key, "^")
if "<None>" in parts:
parts.remove("<None>")
return parts
def _removeSubKey(self, key, subKey):
newKey = ""
for p in string.split(key, "^"):
if p == subKey:
continue
if newKey == "":
newKey = p
else:
newKey = newKey + "^" + p
if newKey == "":
newKey = "<None>"
return newKey
##
# Take a sequence or set of time ranges and produce a set of time ranges by
# combining all adjacent or overlapping time ranges in the sequence.
#
# @param timeranges: the timeranges to merge
# @type timeranges : sequence, set or frozenset of TimeRange
# @return: the merged timeranges
# @rtype: set of TimeRange
def _mergeTimeranges(self, timeranges):
trset = set(timeranges)
# Loop until a pass doesn't merge any time ranges
moreToDo = True
while moreToDo:
moreToDo = False
merged = []
for tr in trset:
found = False
for idx, mtr in enumerate(merged):
if tr == mtr:
found = True
break
elif tr.overlaps(mtr) or tr.isAdjacentTo(mtr):
found = True
merged[idx] = mtr.join(tr)
moreToDo = True
break
if not found:
merged.append(tr)
trset = set(merged)
return trset
##
# Determine whether the time ranges of any (temporary) parm in hazParms
# overlaps a locked time range of the Hazards element. If not, add the
# time ranges of the temporary parms to the locked time ranges of the
# Hazards parm.
#
# @param hazParms: Temporary hazard parm names.
# @type hazParms: sequence of string
# @return: 0 if there are not conflicting locks, 1 if there are
# @rtype: int
def _conflictingLocks(self, hazParms):
# find all the time ranges that should be locked
neededTRs = set()
for hazParm in hazParms:
trList = self._getWEInventory(hazParm)
neededTRs = neededTRs.union(trList)
# Find all the time ranges that are locked in Hazards
myTRs = self.lockedByMe(ELEMENT, LEVEL)
myTRs = set(myTRs)
# Add locks we already have to the needed TRs,
# in case grids were deleted
neededTRs = neededTRs.union(myTRs)
# Squish the TRs into contiguous blocks
neededTRs = self._mergeTimeranges(neededTRs)
# See if there are any blocks we don't have yet
missingTRs = neededTRs - myTRs
# If not, then there are no conflicts and we're done.
if len(missingTRs) == 0:
return 0
startTimes = jep.jarray(len(missingTRs), Date)
midx = 0
for missingTR in missingTRs:
startTimes[midx] = missingTR.toJavaObj().getStart()
midx += 1
hazardParm = self.getParm(MODEL, ELEMENT, LEVEL)
gridData = None
try:
gridData = hazardParm.startParmEdit(startTimes)
except RuntimeError, runtimeErr:
if runtimeErr.message is None:
raise
if runtimeErr.message.startswith("com.raytheon.viz.gfe.GFEOperationFailedException:"):
return 1
else:
raise
if gridData is not None and len(gridData) != 0:
if not hazardParm.endParmEdit():
return 1
# The locks acquired in the endParmEdit() call may not have been quite right.
# However, we needed to end the parm edit.
# Negotiate the locks we _really_ need now that it's done.
locktable = hazardParm.getLockTable()
LOCK = locktable.getClass().getLockMode("LOCK");
from com.raytheon.uf.common.dataplugin.gfe.server.request import LockRequest
desiredLocks = ArrayList()
for missingTR in missingTRs:
newLock = LockRequest()
newLock.setParmId(hazardParm.getParmID())
newLock.setTimeRange(missingTR.toJavaObj())
newLock.setMode(LOCK)
desiredLocks.add(newLock)
client = hazardParm.getDataManager().getClient()
serverResponse = client.requestLockChange(desiredLocks)
if not serverResponse.isOkay():
hazardParm.undo()
return 1
return 0
##
# Create a list of (key, desc) tuples from keys.
# For each key in keys, look up the key in VTECTable.
# If the key is found, use its headline value as its description;
# otherwise, use the key as its own description.
#
# @param keys: Keys to look up descriptions for.
# @type keys: iterable of strings
# @return: keys and descriptions for the key
# @rtype: list of 2-tuples
def _addHazardDesc(self, keys):
newKeys = []
for k in keys:
index = string.find(k, ':')
if index != -1:
k = k[0:index] #eliminate the colon and segment #
if not VTECTable.VTECTable.has_key(k):
desc = k
else:
# get the description
desc = VTECTable.VTECTable[k]['hdln']
newKeys.append((k, desc))
return newKeys
##
# Determine whether the Hazards forecast weather element is loaded.
#
# @param weName: The name of the weather element. Defaults to "Hazards".
# @type wename: string
# @return: 1 if the weather element is loaded, 0 otherwise
# @rtype: int
def _hazardsLoaded(self, weName=ELEMENT):
tupleList = self.loadedParms()
## look for the Hazards Weather element
for element, level, databaseID in tupleList:
modelName = databaseID.modelName()
if element == weName and level == LEVEL and modelName == MODEL:
return 1
# if we got this far we didn't find it.
return 0
##
# Remove any grids for weName whose end times are in the past
#
# @param weName: A weather element name.
# @type weName: string
# @raise JepException: if calls to Java methods fail.
def _removeOldGrids(self, weName):
# get the inventory
trList = self._getWEInventory(weName)
for tr in trList:
if tr.endTime().unixTime() < current().unixTime():
self.deleteCmd([weName], tr)
return
##
# Remove any data grids for MODEL, ELEMENT, and LEVEL over the default
# inventory timerange (from now to 10 days in the future). The parm
# remains in the parm manager.
def _removeAllHazardsGrids(self):
removeTRList = self._getWEInventory(ELEMENT, asJava=True)
# Remove the real Hazards grid
for tr in removeTRList:
if not self.deleteGrid(MODEL, ELEMENT, LEVEL, tr):
return False
return True
##
# Destroy all the temporary hazards (they are removed from the parm manager).
#
def _removeTempHazardWEs(self):
parms = self.loadedParms()
for weName, level, dbID in parms:
if string.find(weName, "haz") == 0 and len(weName) > 3:
self.unloadWE(MODEL, weName, level)
return
##
# Determine whether the indicated grids are consecutive in time and
# identical in value at every point.
# @attention: This method assumes timeRange1 begins before timeRange2.
# It will give wrong answers if their order is reversed
#
# @param weName: Weather element name
# @type weName: string
# @param timeRange1: First time range for weather element
# @type timeRange1: Python TimeRange
# @param timeRange2: Second time range for weather element
# @type timeRange2: Python TimeRange
# @return: True if the end time for timeRange1 matches the start time of
# timeRange2 and the grid for weName during timeRange1 is identical
# to the grid for weName during timeRange2, False otherwise.
# @rtype: boolean
def _consecutiveIdenticalGrids(self, weName, timeRange1, timeRange2):
if timeRange1.endTime() == timeRange2.startTime():
# get the grids
firstGrid, key = self.getGrids(MODEL, weName, LEVEL,
timeRange1.toJavaObj(), mode="First", cache=0)
secondGrid, key = self.getGrids(MODEL, weName, LEVEL,
timeRange2.toJavaObj(), mode="First", cache=0)
if numpy.sometrue(numpy.logical_xor(firstGrid, secondGrid)):
return 0
else:
return 1
return 0
##
# Replace existing grids for weName with a single grid over the
# time range from groupStart to groupEnd.
#
# This function should only be used by _consolidateTimes(); it
# exists only to be sure we create the consolidated grid the same way in
# the "last timeRange" code block as we do in the "broke the string" block.
# @param groupStart: Starting time as seconds since the epoch
# @type groupStart: int
# @param groupEnd: Ending time as seconds since the epoch
# @type groupEnd: int
# @param weName: (temporary) weather element name
# @type weName: string
# @return: None
def _createConsolidatedGrid(self, groupStart, groupEnd, weName):
"Used internally by _consolidateTimes()"
timeRange = self._makeTimeRange(groupStart, groupEnd).toJavaObj()
byteGrid, hazKey = self.getGrids(MODEL, weName, LEVEL,
timeRange, mode="First", cache=0)
if isinstance(hazKey, str):
hazKey = eval(hazKey)
self.createGrid(MODEL, weName, "DISCRETE", (byteGrid, hazKey),
timeRange, discreteOverlap=1,
discreteAuxDataLength=4)
##
# Consolidate grid times for each weather element in weNameList.
# For each weather element, find time ranges that touch whose grids are
# identical and turn them into a single grid for the combined time range.
def _consolidateTimes(self, weNameList):
for weName in weNameList:
# Get "all" the time ranges for this element
trList = self._getWEInventory(weName)
if len(trList) == 0:
return
count = 1
groupStart = int(trList[0].startTime().unixTime())
groupEnd = int(trList[0].endTime().unixTime())
for i in range(0, len(trList) - 1):
if self._consecutiveIdenticalGrids(weName, trList[i], trList[i+1]):
# keep looking for the end
count = count + 1
groupEnd = int(trList[i+1].endTime().unixTime())
else: # broke the string of grids
if count > 1: # store the new time-consolidated grid
self._createConsolidatedGrid(groupStart, groupEnd, weName)
# reset the times
groupStart = int(trList[i+1].startTime().unixTime())
groupEnd = int(trList[i+1].endTime().unixTime())
count = 1
# make sure we catch the last timeRange
if count > 1: # store the new time-consolidated grid
self._createConsolidatedGrid(groupStart, groupEnd, weName)
return
##
# Lock any grids in the hazards parm from now to 10 hours in the future.
#
# @return: the hazards parm and its igrids
# @rtype: a 2-tuple; the first item is a Parm, the second is a list of IGridDatas,
# which, for discrete grids, translate to a 2-tuple containing a numpy
# array and a key string. So, like this:
# (parm,[(arr0,key0), (arr1,key1), ...])
#
def _lockHazards(self):
"Flag the hazards parm as being edited. Return the hazards parm and its grid."
hazParm = self.getParm(MODEL, ELEMENT, LEVEL)
startAbsTime = AbsTime(int(current().unixTime() /3600)*3600)
endAbsTime = startAbsTime + LOCK_HOURS() * HOUR_SECONDS()
timeRange = TimeRange(startAbsTime, endAbsTime)
inventory = self._getWEInventory(ELEMENT, timeRange, asJava=True)
startTimes = jep.jarray(len(inventory), Date)
for trNum in range(len(inventory)):
startTimes[trNum] = inventory[trNum].getStart()
gridData = None
try:
# startParmEdit() refreshes the grids and sets up the times that endParmEdit() will lock.
gridData = hazParm.startParmEdit(startTimes)
except RuntimeError, runtimeErr:
if runtimeErr.message is None:
raise
if runtimeErr.message.startswith("com.raytheon.viz.gfe.GFEOperationFailedException:"):
self.statusBarMsg("There are conflicting locks. " + \
"Please resolve these before adding any hazards", "S")
hazParm = None
else:
raise
# endParmEdit() locks the grids.
# The locks will be released when the forecast is saved.
if hazParm is not None:
locked = True
if len(startTimes) != 0:
locked = hazParm.endParmEdit()
if locked:
locked = hazParm.forceLockTR(timeRange.toJavaObj())
if not locked:
self.statusBarMsg("There are conflicting locks. " + \
"Please resolve these before adding any hazards", "S")
hazParm = None
return (hazParm, gridData)
##
# Let other users edit the hazards parm.
#
# @return: True for success, False otherwise.
# @raise JepException: if the hazards parm was not being edited.
def _endEdit(self):
"Let other users edit the hazards parm. Return True for success."
hazParm = self.getParm(MODEL, ELEMENT, LEVEL)
return hazParm.endParmEdit()
##
# Make temporary hazard grids for each hazard subkey.
# Hazards are "being edited" until they are merged again.
#
# @return: True if separation succeeded, false otherwise.
#
def _separateHazardGrids(self):
"Make temporary hazard grids for each hazard subkey."
# if any temp hazard grids are loaded, don't separate again
if self._tempWELoaded():
return FAIL_REDUNDANT #already separated
hazParm, gridData = self._lockHazards()
if hazParm is None:
return FAIL_LOCK # unavailable
# get a collection of distinct Java TimeRange objects
trSet = set()
for gd in gridData:
trSet.add(gd.getGridTime())
# Create a set of temporary weather element names
weNameSet = set()
for tr in trSet:
# Get the index grid and key list for the real Hazards element
byteGrid, hazKey = self.getGrids(MODEL, ELEMENT, LEVEL, tr,
mode="First")
if isinstance(hazKey, str):
hazKey = eval(hazKey)
# Only work with the keys that have points in the grid
uniqueKeys = self._getUniqueKeys(byteGrid, hazKey)
if len(uniqueKeys) > 0:
# build list of split hazKeys for use in loop below
splitHazKeys = []
for haz in hazKey:
splitHazKeys.append(self._getSubKeys(haz))
for uKey in uniqueKeys:
if uKey == "<None>":
continue
# split the current key into its subkeys
subKeys = self._getSubKeys(uKey)
for subKey in subKeys:
# make the temporary name
weName = self._makeTempWEName(subKey)
# make the mask - find all areas that contain the subKey
mask = numpy.zeros(byteGrid.shape, dtype=numpy.bool)
for hazIndex in range(len(hazKey)):
if subKey in splitHazKeys[hazIndex]:
mask |= (byteGrid==hazIndex)
# make the grid
self._addHazard(weName, tr, subKey, mask)
pytr = TimeRange(tr)
logmsg = " ".join(["Separate", weName,
self._printTime(pytr.startTime().unixTime()),
self._printTime(pytr.endTime().unixTime()), subKey])
LogStream.logEvent(logmsg)
# save the weNames for later
weNameSet.add(weName)
# Combine time ranges for the temporary weather elements we created
self._consolidateTimes(weNameSet)
return SUCCESS
##
# Add the specified hazard to weName over the specified timeRange
# and spatially over the specified mask. Combines the specified
# hazard with the existing hazards by default. For replaceMode,
# specify 0 in the combineField.
#
# @param weName: The weather element name.
# @type wename: string
# @param timeRange: Time range of the hazard.
# @type timeRange: Java or Python TimeRange
# @param addHaz: Key for the new hazard
# @type addHaz: string
# @return: None
def _addHazard(self, weName, timeRange, addHaz, mask, combine=1):
# Python TimeRanges are easy to compare.
# Java methods require Java TimeRanges.
# Make sure we have one of each.
if isinstance(timeRange, JavaWrapperClass):
pyTimeRange = timeRange
timeRange = timeRange.toJavaObj()
else:
pyTimeRange = TimeRange(timeRange)
# refuse to make new grids that are more than one hour in the past
if pyTimeRange.endTime().unixTime() < current().unixTime() - HOUR_SECONDS():
msg = "skipped time range creation: %s < %s" % (pyTimeRange.endTime().string(), current().string())
return
# set up the inventory first
self._setupHazardsInventory(weName, [timeRange])
# get the inventory
trList = self._getWEInventory(weName, timeRange, asJava=True)
# coerce mask into a boolean array if it isn't already
if not (isinstance(mask, numpy.ndarray) and mask.dtype==numpy.bool):
mask = numpy.array(mask, dtype=numpy.bool)
for tr in trList:
# get the grid of index values and list of keys those indices select
byteGrid, hazKey = self.getGrids(MODEL, weName, LEVEL, tr,
mode="First", cache=0)
if isinstance(hazKey, str):
hazKey = eval(hazKey)
# Eliminate keys that aren't in the grid from the list.
uniqueKeys = self._getUniqueKeys(byteGrid, hazKey, mask)
for uKey in uniqueKeys:
# Figure out what the new key is
if combine:
newKey = self._makeNewKey(uKey, addHaz)
else: #replace
newKey = addHaz
# Find the index number for the old key
oldIndex = self.getIndex(uKey, hazKey)
# Find the index number for the new key (newKey is added if not in hazKey)
newIndex = self.getIndex(newKey, hazKey)
# calculate the mask - intersection of mask and oldIndex values
editMask = (byteGrid==oldIndex) & mask
# poke in the new values
byteGrid[editMask] = newIndex
# Save the updated byteGrid and hazKey
if weName == ELEMENT:
self.createGrid(MODEL, ELEMENT, "DISCRETE", (byteGrid, hazKey),
tr, discreteOverlap=1, discreteAuxDataLength=4)
else: # it's a temporary WE - special key
hazKey = ["<None>", addHaz]
hazKeyDesc = self._addHazardDesc(hazKey)
self.createGrid(MODEL, weName, "DISCRETE", (byteGrid, hazKey),
tr, discreteOverlap=0, discreteAuxDataLength=4,
discreteKeys=hazKeyDesc,
defaultColorTable="YesNo")
# remove any grids that are completely in the past
self._removeOldGrids(weName)
return
##
# Removes the specified hazard from the specified grid over the mask.
#
# @param weName: Name of the weather element to remove hazards from.
# @type weName: string
# @param timeRange: Time range from which to remove hazards.
# @type timeRange: Python or Java TimeRange
# @param removeHaz: Hazard phensig to remove
# @type removeHaz: string
# @param mask: Grid that is True for points where removeHaz should be removed,
# false where it should not. Defaults to all points selected if
# omitted or passed as None.
# @type mask: numpy array of boolean or Nonetype
def _removeHazard(self, weName, timeRange, removeHaz, mask = None):
# get the inventory
trList = self._getWEInventory(weName, timeRange)
# make sure we have a real mask
if mask is None:
gridShape = self._getGridSize()
mask = numpy.ones(gridShape, bool)
for tr in trList:
byteGrid, hazKey = self.getGrids(MODEL, weName, LEVEL, tr,
mode="First", cache=0)
uniqueKeys = self._getUniqueKeys(byteGrid, hazKey, mask)
for uKey in uniqueKeys:
if string.find(uKey, removeHaz) >= 0:
newKey = self._removeSubKey(uKey, removeHaz)
oldIndex = self.getIndex(uKey, hazKey)
newIndex = self.getIndex(newKey, hazKey)
# calculate the mask - intersection of mask and oldIndex values
editMask = (byteGrid == oldIndex) & mask
# poke in the new values
byteGrid[editMask] = newIndex
# see if there's any hazards left and if not, delete the whole grid
noneIndex = self.getIndex("<None>", hazKey)
noneGrid = (byteGrid == noneIndex)
if noneGrid.all():
self.deleteCmd([weName], tr)
else:
self.createGrid(MODEL, weName, "DISCRETE", (byteGrid, hazKey),
tr, discreteOverlap= 0,
discreteAuxDataLength=4,
defaultColorTable="YesNo")
return
##
# Format time as yyyymmdd_hhmm
#
# @param t: Time
# @type t: seconds since the epoch
# @return: Formatted version of t
# @rtype: string
def _printTime(self, t):
gm = time.gmtime(t)
s = time.strftime("%Y%m%d_%H%M", gm)
return s
#print areas, from dictionary
def _printAreas(self, areas):
ara = list(areas)
ara.sort()
return ara
#filter vtec table based on gfe operating mode, returns vtec table
def _filterVTECBasedOnGFEMode(self, vtecTable):
#get gfe mode
rawGfeMode = self.gfeOperatingMode()
gfeMode = rawGfeMode
if gfeMode is None:
gfeMode = ""
gfeMode = gfeMode.strip().lower()
#practice mode = accept all records
if "practice" == gfeMode:
return vtecTable #allow all records
#test mode -- only accept records that have "T" vtec
elif "test" == gfeMode:
fvtecTable = []
for rec in vtecTable:
testEntry = (rec['vtecstr'].find('/T.') == 0)
if testEntry:
fvtecTable.append(rec)
return fvtecTable
#regular/operational mode -- accept records that don't have "T" vtec
elif "standard" == gfeMode or "operational" == gfeMode:
fvtecTable = []
for rec in vtecTable:
testEntry = (rec['vtecstr'].find('/T.') == 0)
if not testEntry:
fvtecTable.append(rec)
return fvtecTable
else:
raise Exception, "Unknown GFE operating mode: " + rawGfeMode
##
# A Python access to the looseLocks() method of the Hazards parm.
def _unlockHazards(self):
hazParm = self.getParm(MODEL, ELEMENT, LEVEL)
hazParm.looseLocks()
| StarcoderdataPython |
2574 | """For neatly implementing static typing in packaging.
`mypy` - the static type analysis tool we use - uses the `typing` module, which
provides core functionality fundamental to mypy's functioning.
Generally, `typing` would be imported at runtime and used in that fashion -
it acts as a no-op at runtime and does not have any run-time overhead by
design.
As it turns out, `typing` is not vendorable - it uses separate sources for
Python 2/Python 3. Thus, this codebase can not expect it to be present.
To work around this, mypy allows the typing import to be behind a False-y
optional to prevent it from running at runtime and type-comments can be used
to remove the need for the types to be accessible directly during runtime.
This module provides the False-y guard in a nicely named fashion so that a
curious maintainer can reach here to read this.
In packaging, all static-typing related imports should be guarded as follows:
from packaging._typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import ...
Ref: https://github.com/python/mypy/issues/3216
"""
__all__ = ["TYPE_CHECKING", "cast"]
# The TYPE_CHECKING constant defined by the typing module is False at runtime
# but True while type checking.
TYPE_CHECKING = False # pragma: no cover
# typing's cast syntax requires calling typing.cast at runtime, but we don't
# want to import typing at runtime. Here, we inform the type checkers that
# we're importing `typing.cast` as `cast` and re-implement typing.cast's
# runtime behavior in a block that is ignored by type checkers.
if TYPE_CHECKING: # pragma: no cover
# not executed at runtime
from typing import cast
else:
# executed at runtime
def cast(type_, value): # noqa
return value
| StarcoderdataPython |
8040 | <reponame>Awannaphasch2016/CDKFAUCovid19Cralwer
'''
Original code contributor: mentzera
Article link: https://aws.amazon.com/blogs/big-data/building-a-near-real-time-discovery-platform-with-aws/
'''
import boto3
import json
import twitter_to_es
# from Examples.Demo.AWS_Related.TwitterStreamWithAWS.LambdaWithS3Trigger import \
# twitter_to_es
from tweet_utils import \
get_tweet, id_field, get_tweet_mapping
headers = {"Content-Type": "application/json"}
s3 = boto3.client('s3')
kinesis_client = boto3.client('kinesis')
# dynamoDb_client = boto3.client('dynamodb')
# Lambda execution starts here
def handler(event, context):
for record in event['Records']:
# Get the bucket name and key for the new file
bucket = record['s3']['bucket']['name']
key = record['s3']['object']['key']
# Get s3 object, read, and split the file into lines
try:
obj = s3.get_object(Bucket=bucket, Key=key)
except Exception as e:
print(e)
print(
'Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(
key, bucket))
raise e
# Parse s3 object content (JSON)
try:
# https://stackoverflow.com/questions/31976273/open-s3-object-as-a-string-with-boto3
s3_file_content = obj['Body'].read().decode('utf-8')
# clean trailing comma
if s3_file_content.endswith(',\n'):
s3_file_content = s3_file_content[:-2]
tweets_str = '[' + s3_file_content + ']'
# print(tweets_str)
tweets = json.loads(tweets_str)
except Exception as e:
print(e)
print('Error loading json from object {} in bucket {}'.format(key,
bucket))
raise e
for doc in tweets:
tweet = get_tweet(doc)
# print(tweet['sentiments'])
print(tweet)
print('===\n\n\n')
#=====================
#==send data to dynamoDB
#=====================
# Get the service resource.
dynamodb = boto3.resource('dynamodb')
# Instantiate a table resource object without actually
# creating a DynamoDB table. Note that the attributes of this table
# are lazy-loaded: a request is not made nor are the attribute
# values populated until the attributes
# on the table resource are accessed or its load() method is called.
table = dynamodb.Table('faucovidstream_twitter_with_sentiment')
# Print out some data about the table.
# This will cause a request to be made to DynamoDB and its attribute
# values will be set based on the response.
print(table.creation_date_time)
dynamodb.put_item(
Item=tweet
)
| StarcoderdataPython |
1718960 | <filename>tests/importer/tflite_/basic/test_gather.py
# Copyright 2019-2021 Canaan Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""System test: test gather"""
# pylint: disable=invalid-name, unused-argument, import-outside-toplevel
import pytest
import tensorflow as tf
import numpy as np
from tflite_test_runner import TfliteTestRunner
def _make_module(in_shape, indice, axis, batch_dims):
class GatherModule(tf.Module):
def __init__(self):
super(GatherModule).__init__()
@tf.function(input_signature=[tf.TensorSpec(in_shape, tf.float32)])
def __call__(self, x):
return tf.gather(x, indice, axis=axis, batch_dims=batch_dims)
return GatherModule()
in_shape_indice_axis_batch_dims = [
([11], [1, 3, 10, 0, 2], 0, 0),
([11], [[2, 4], [1, 3]], 0, 0),
([7, 5], [1, 3], 0, 0),
([7, 5], [[1, 4, 3]], 1, 0),
([2, 3, 5], [1, 0, 1], 0, 0),
([2, 3, 5], [[2, 1], [1, 1], [1, 2]], 1, 0),
([2, 3, 5], [2, 4, 1], 2, 0),
([4, 5, 8, 3], [1, 0, 2], 1, 0),
([2, 3, 4, 7], [[1, 1], [1, 2]], 2, 0),
([4, 6, 5, 7], [[[1], [2]], [[3], [1]]], 3, 0),
([2, 3, 4, 7], [[1, 1], [1, 2]], -1, 0),
]
@pytest.mark.parametrize('in_shape,indice,axis,batch_dims', in_shape_indice_axis_batch_dims)
def test_gather(in_shape, indice, axis, batch_dims, request):
module = _make_module(in_shape, indice, axis, batch_dims)
runner = TfliteTestRunner(request.node.name)
model_file = runner.from_tensorflow(module)
runner.run(model_file)
if __name__ == "__main__":
pytest.main(['-vv', 'test_gather.py'])
| StarcoderdataPython |
176175 | <filename>tests/test_bitround.py
import pytest
import xarray as xr
from dask import is_dask_collection
from xarray.testing import assert_allclose, assert_equal
import xbitinfo as xb
@pytest.mark.parametrize("dtype", ["float16", "float32", "float64"])
@pytest.mark.parametrize("implementation", ["xarray", "julia"])
@pytest.mark.parametrize("input_type", ["Dataset", "DataArray"])
@pytest.mark.parametrize("keepbits", ["dict", "int"])
def test_xr_bitround(air_temperature, dtype, input_type, implementation, keepbits):
"""Test xr_bitround to different keepbits of type dict or int."""
ds = air_temperature.astype(dtype)
i = 6
if keepbits == "dict":
keepbits = {v: i for v in ds.data_vars}
elif keepbits == "int":
keepbits = i
if input_type == "DataArray":
v = list(ds.data_vars)[0]
ds = ds[v]
bitround = xb.xr_bitround if implementation == "xarray" else xb.jl_bitround
ds_bitrounded = bitround(ds, keepbits)
def check(da, da_bitrounded):
# check close
assert_allclose(da, da_bitrounded, atol=0.01, rtol=0.01)
# attrs set
assert da_bitrounded.attrs["_QuantizeBitRoundNumberOfSignificantDigits"] == i
# different after bitrounding
diff = (da - da_bitrounded).compute()
assert (diff != 0).any()
if input_type == "DataArray":
check(ds, ds_bitrounded)
else:
for v in ds.data_vars:
check(ds[v], ds_bitrounded[v])
@pytest.mark.parametrize(
"implementation,dask",
[("xarray", True), ("xarray", False), ("julia", False)],
)
def test_bitround_dask(air_temperature, implementation, dask):
"""Test xr_bitround and jl_bitround keeps dask and successfully computes."""
ds = air_temperature
i = 15
keepbits = i
if dask:
ds = ds.chunk("auto")
bitround = xb.xr_bitround if implementation == "xarray" else xb.jl_bitround
ds_bitrounded = bitround(ds, keepbits)
assert is_dask_collection(ds_bitrounded) == dask
if dask:
assert ds_bitrounded.compute()
@pytest.mark.parametrize(
"dtype,keepbits",
[("float16", range(1, 9)), ("float32", range(1, 23)), ("float64", range(1, 52))],
)
def test_bitround_xarray_julia_equal(air_temperature, dtype, keepbits):
"""Test jl_bitround and xr_bitround yield identical results."""
ds = air_temperature.astype(dtype)
for keep in keepbits:
ds_xr_bitrounded = xb.xr_bitround(ds, keep)
ds_jl_bitrounded = xb.jl_bitround(ds, keep)
assert_equal(ds_jl_bitrounded, ds_xr_bitrounded)
| StarcoderdataPython |
3365764 | # Generated by Django 2.2.9 on 2020-04-10 21:24
from django.db import migrations, models
import django.db.models.deletion
import jsonfield.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='ReportConfiguration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dimensions', jsonfield.fields.JSONField(default=None, max_length=8192, null=True)),
('measures', jsonfield.fields.JSONField(default=None, max_length=8192, null=True)),
('filters', jsonfield.fields.JSONField(default=None, max_length=8192, null=True)),
('model', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='reports', to='contenttypes.ContentType')),
],
),
]
| StarcoderdataPython |
1612112 | <reponame>daxAKAhackerman/one-time-secret
import os
import pymongo
def get_mongo_col():
if not os.getenv("TESTING"):
MONGO_HOST = os.environ["MONGO_HOST"]
MONGO_PORT = os.environ["MONGO_PORT"]
MONGO_USERNAME = os.environ["MONGO_USERNAME"]
MONGO_PASSWORD = os.environ["MONGO_PASSWORD"]
MONGO_DB = os.environ["MONGO_DB"]
MONGO_COL = os.environ["MONGO_COL"]
mongo_client = pymongo.MongoClient(host=MONGO_HOST, port=int(MONGO_PORT), username=MONGO_USERNAME, password=<PASSWORD>)
secret_col = mongo_client[MONGO_DB][MONGO_COL]
return secret_col
else:
from tests.conftest import FakeSecretCol
return FakeSecretCol
| StarcoderdataPython |
1688955 | # Name: Nathanael
# Date: June 12
# proj01: A Simple Program
# This program asks the user for his/her name and age.
# Then, it prints a sentence that says when the user will turn 100.
# If you complete extensions, describe your extensions here!
user_input = raw_input("Enter your age: ")
user_input2 = raw_input("Enter your name: ")
year100 = 100 - int(user_input)
birthday = raw_input("enter Y if you had a birthday or N if you have not this year: ")
if birthday == "N":
year100 = 99 - int(user_input)
user_input3 = raw_input(" Enter your Gender, either Male or Female:")
year2017 = 2017 + year100
if year100 < user_input:
print
print
print 'you are already at least 100 years old'
print
print user_input2
print "is"
print user_input
print "years old"
print " and will be"
print year100
print 'years until you are 100 years old'
print " You will be 100 years old at year"
print year2017
print " you are the"
print user_input3
print "Gender"
| StarcoderdataPython |
3364319 | <reponame>scottwedge/OpenStack-Stein<filename>panko-6.0.0/panko/tests/functional/test_bin.py
# Copyright 2012 eNovance <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import subprocess
from oslo_utils import fileutils
import six
from panko.tests import base
class BinTestCase(base.BaseTestCase):
def setUp(self):
super(BinTestCase, self).setUp()
content = ("[database]\n"
"connection=log://localhost\n")
if six.PY3:
content = content.encode('utf-8')
self.tempfile = fileutils.write_to_tempfile(content=content,
prefix='panko',
suffix='.conf')
def tearDown(self):
super(BinTestCase, self).tearDown()
os.remove(self.tempfile)
def test_dbsync_run(self):
subp = subprocess.Popen(['panko-dbsync',
"--config-file=%s" % self.tempfile])
self.assertEqual(0, subp.wait())
def test_run_expirer_ttl_disabled(self):
subp = subprocess.Popen(['panko-expirer',
'-d',
"--config-file=%s" % self.tempfile],
stdout=subprocess.PIPE)
out, __ = subp.communicate()
self.assertEqual(0, subp.poll())
self.assertIn(b"Nothing to clean, database event "
b"time to live is disabled", out)
def _test_run_expirer_ttl_enabled(self, ttl_name, data_name):
content = ("[database]\n"
"%s=1\n"
"connection=log://localhost\n" % ttl_name)
if six.PY3:
content = content.encode('utf-8')
self.tempfile = fileutils.write_to_tempfile(content=content,
prefix='panko',
suffix='.conf')
subp = subprocess.Popen(['panko-expirer',
'-d',
"--config-file=%s" % self.tempfile],
stdout=subprocess.PIPE)
out, __ = subp.communicate()
self.assertEqual(0, subp.poll())
msg = "Dropping %s data with TTL 1" % data_name
if six.PY3:
msg = msg.encode('utf-8')
self.assertIn(msg, out)
def test_run_expirer_ttl_enabled(self):
self._test_run_expirer_ttl_enabled('event_time_to_live', 'event')
| StarcoderdataPython |
1645561 | <filename>frappe-bench/apps/erpnext/erpnext/healthcare/doctype/physician/test_physician.py
# -*- coding: utf-8 -*-
# Copyright (c) 2015, ESS LLP and Contributors
# See license.txt
from __future__ import unicode_literals
import unittest
import frappe
test_dependencies = ['Physician Schedule']
class TestPhysician(unittest.TestCase):
def tearDown(self):
frappe.delete_doc_if_exists('Physician', '_Testdoctor2', force=1)
def test_new_physician_without_schedule(self):
physician = frappe.new_doc('Physician')
physician.first_name = '_Testdoctor2'
physician.insert()
self.assertEqual(frappe.get_value('Physician', '_Testdoctor2', 'first_name'), '_Testdoctor2')
| StarcoderdataPython |
1716220 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by <NAME> at 2019-06-14
"""add_metacyc_rea_archive.py
:description : script
:param :
:returns:
:rtype:
"""
# note: not finissed
import os
import pickle
import re
import sys
import cobra
import pandas as pd
import My_def
# Disable
def blockPrint():
sys.stdout = open(os.devnull, 'w')
# Restore
def enablePrint():
sys.stdout = sys.__stdout__
# reload(My_def)
os.chdir('../../ComplementaryData/Step_03_Compare_Refine/')
# %% load data
Lreu_draft_3_refined = cobra.io.load_json_model('../Step_02_DraftModels/Lreu_draft_3_refined.json')
Lreu_metset = set([i.id for i in Lreu_draft_3_refined.metabolites])
Lreu_reaset = set([i.id for i in Lreu_draft_3_refined.reactions])
Lreu_metacyc = cobra.io.read_sbml_model(
'/Users/lhao/Box Sync/Projects/Project_Lreuteri/Lactobacillus_reuteri_MM41A_GEM/ComplementaryData/Step_02_DraftModels/RAVEN/Lreu_ra_me.xml')
Lreu_metacyc.id = 'Lreu_metacyc'
all_map = pd.read_csv('../Step_02_DraftModels/all_map.csv', sep='\t')
all_map = all_map.fillna('')
Lreu_metacyc_map = all_map[all_map.model == 'Lreu_metacyc'].copy()
iML1515_report = all_map[(all_map.model == 'iML1515')].copy()
iML1515_reaset = set(iML1515_report[(iML1515_report.type == 'rea')].id_in_tp.values)
iML1515_metset = set(iML1515_report[(iML1515_report.type == 'met')].id_in_tp.values)
iNF517_report = all_map[(all_map.model == 'iNF517')].copy()
iNF517_reaset = set(iNF517_report[(iNF517_report.type == 'rea')].id_in_tp.values)
iNF517_metset = set(iNF517_report[(iNF517_report.type == 'met')].id_in_tp.values)
with open('../bigg_database/universal_model.pickle', 'rb') as f:
bigg_model = pickle.load(f)
bigg_reaset = set([i.id for i in bigg_model.reactions])
bigg_met_set = set([i.id for i in bigg_model.metabolites])
# %% compare id_map and rea_map
def find_one(str, modelset, bigg_model):
if str == '':
return ''
else:
str = eval(str)
biggids = [i for i in str if not (i.startswith('R_') or i.startswith('M_'))]
if len(biggids) == 1:
return biggids[0]
else:
a = set(biggids) & modelset
if len(a) == 1:
return list(a)[0]
elif len(a) > 1:
biggids = list(a)
for i in biggids:
try:
rea = bigg_model.reactions.get_by_id(i)
if '_p' not in rea.reaction and '_m' not in rea.reaction:
return i
except KeyError:
continue
return biggids[0]
# %% <option1> reaid map, metacyc-->bigg find the euqation
temp_rea_df = Lreu_metacyc_map[Lreu_metacyc_map.type == 'rea'].copy()
modelset = iML1515_reaset | iNF517_reaset
temp_rea_df['uniqueid'] = temp_rea_df.bigg.apply(lambda x: find_one(x, modelset, bigg_model))
temp_rea_df = temp_rea_df[temp_rea_df.uniqueid != '']
# uniquelist1 = set(temp_rea_df['uniqueid'].values) #-Lreu_reaset
# temp_rea_df = temp_rea_df[~temp_rea_df['uniqueid'].isin(uniquelist1) ]
# %% change model:
remove_rea_list_1 = list(temp_rea_df.id_in_tp)
add_rea_list_1 = list(temp_rea_df.uniqueid)
Lrue_metacyc_bigg_rearep = Lreu_metacyc.copy()
for row in temp_rea_df.itertuples():
try:
rea_a = bigg_model.reactions.get_by_id(row.uniqueid)
except KeyError:
print(row)
continue
rea_r = Lrue_metacyc_bigg_rearep.reactions.get_by_id(row.id_in_tp)
rea_a.gene_reaction_rule = rea_r.gene_reaction_rule
rea_a.notes['from'] = ['metacyc']
rea_a.notes['metacyc_id'] = row.id_in_tp
Lrue_metacyc_bigg_rearep.add_reaction(rea_a)
rea_r.remove_from_model()
# %% <option2> metaid map, metacyc-->bigg get the euqation
temp_met_df = Lreu_metacyc_map[Lreu_metacyc_map.type == 'met'].copy()
modelset = iML1515_metset | iNF517_metset
temp_met_df['uniqueid'] = temp_met_df.bigg.apply(lambda x: find_one(x, modelset, bigg_model))
temp_met_df = temp_met_df[temp_met_df.uniqueid != '']
Lrue_metacyc_bigg_metarep = Lreu_metacyc.copy()
remove_met_list = list(temp_met_df.id_in_tp.values)
add_met_list = list(temp_met_df.uniqueid.values)
remove_met_set = set(remove_met_list)
add_met_set = set(add_met_list)
remove_rea_list_2 = []
for rea in Lrue_metacyc_bigg_metarep.reactions:
rea_metset = set(i.id for i in rea.metabolites.keys())
# print(rea_metset)
rea_intersection = rea_metset & remove_met_set
# print(rea_intersection)
if rea_intersection == set():
continue
elif rea_intersection == rea_metset:
remove_rea_list_2.append(rea.id)
for i in rea_intersection:
add_met = add_met_list[remove_met_list.index(i)] + '_c'
if add_met in bigg_met_set:
rea.reaction = re.sub(r'\b%s\b' % i, add_met_list[remove_met_list.index(i)] + '_c', rea.reaction)
else:
print(add_met)
# %% add reaction intomode
Lreu_draft_3_add_metacyc = Lreu_draft_3_refined.copy()
dup_set_checked = {'TRANS__45__RXN0__45__617',
'NTP1',
'TRANS__45__RXN__45__220',
'TRANS__45__RXN__45__366',
'TRANS__45__RXN0__45__510',
'TRANS__45__RXN__45__290',
'3__46__6__46__3__46__3__45__RXN',
'TRANS__45__RXN__45__237',
'RXN__45__19779',
'PHOSPHOKETOLASE__45__RXN',
'RXN__45__16819',
'RXN0__45__5213'}
# list1:
final_add_list1 = []
for i in set(add_rea_list_1) - Lreu_reaset - dup_set_checked:
rea = Lrue_metacyc_bigg_rearep.reactions.get_by_id(i)
if '_m' not in rea.reaction and '_p' not in rea.reaction:
final_add_list1.append(rea.id)
print(rea)
# rea.notes['from'] = ['metacyc']
# rea_a.notes['metacyc_id'] = row.id_in_tp
Lreu_draft_3_add_metacyc.add_reaction(rea)
# list2:
final_add_list2 = []
for i in set(remove_rea_list_2) - set(remove_rea_list_1) - dup_set_checked:
rea = Lrue_metacyc_bigg_metarep.reactions.get_by_id(i)
final_add_list2.append(rea.id)
print(rea)
rea.notes['from'] = ['metacyc']
Lreu_draft_3_add_metacyc.add_reaction(rea)
# %% check duplication
# get dup_set_checked and avoide to add it to model
check_df = My_def.model_refine.remove_dup_rea(Lreu_draft_3_add_metacyc, remove=False, skip_met=[])
# print(check_df)
check_id = list(check_df['id'])
print('Duplicate reactions: ')
for i in list(check_id):
rea = Lreu_draft_3_add_metacyc.reactions.get_by_id(i)
print(rea, rea.bounds, rea.gene_reaction_rule, rea.notes)
# dup_set_checked = {'TRANS__45__RXN0__45__617',
# 'NTP1',
# 'TRANS__45__RXN__45__220',
# 'TRANS__45__RXN__45__366',
# 'TRANS__45__RXN0__45__510',
# 'TRANS__45__RXN__45__290',
# '3__46__6__46__3__46__3__45__RXN',
# 'TRANS__45__RXN__45__237',
# 'RXN__45__19779',
# 'PHOSPHOKETOLASE__45__RXN',
# 'RXN__45__16819',
# 'RXN0__45__5213'}
# %% <save >
cobra.io.save_json_model(Lreu_draft_3_add_metacyc, 'Lreu_draft_3_add_metacyc.json', sort='True')
print('===== Done =====')
| StarcoderdataPython |
1765538 | <reponame>MaxTechniche/DayFolderOrganizer
import os
import re
def organize(directory, keyword='(d|D)ay_?(\d+)', folder_name='Day_', regex=True):
if not regex:
folder_name=keyword
keyword = re.escape(keyword)
print(keyword)
os.chdir(directory)
folder_checks = []
for item in os.listdir():
if os.path.isdir(item):
if re.match(keyword, item):
print('match found')
pass
elif (item not in ['.git', 'README.md', ]):
folder_checks.append(os.path.join(directory, item))
else:
finder = re.findall(keyword, item)
if finder:
try:
number = int(finder[0][1])
except ValueError:
number = ''
if len(str(number)) == 1:
number = '0' + str(number)
try:
os.mkdir(folder_name + str(number))
except FileExistsError:
pass
os.rename(item, os.path.join(directory, folder_name + str(number), item))
for folder in folder_checks:
organize(folder)
| StarcoderdataPython |
3306779 | <reponame>engr-lynx/cicd-demo
from time import time
from json import dumps, loads, JSONDecodeError
from logging import getLogger, INFO
from boto3 import client
from botocore.exceptions import ClientError
logger = getLogger()
logger.setLevel(INFO)
cf = client('cloudfront')
cp = client('codepipeline')
def on_event(event, context):
logger.info('Received event: %s' % dumps(event))
cp_job = event['CodePipeline.job']
job_id = cp_job['id']
user_parameters_str = cp_job['data']['actionConfiguration']['configuration']['UserParameters']
all_files = ['/*']
time_reference = str(time()).replace('.', '')
try:
user_parameters = loads(user_parameters_str)
cf.create_invalidation(
DistributionId=user_parameters['distributionId'],
InvalidationBatch={
'Paths': {
'Quantity': 1,
'Items': all_files
},
'CallerReference': time_reference
}
)
cp.put_job_success_result(jobId=job_id)
except ClientError as e:
logger.error('Error: %s', e)
cp.put_job_failure_result(
jobId=job_id,
failureDetails={
'type': 'JobFailed',
'message': e.response['Error']['Message']
}
)
except JSONDecodeError as e:
logger.error('Error: %s', e)
cp.put_job_failure_result(
jobId=job_id,
failureDetails={
'type': 'ConfigurationError',
'message': e.msg
}
)
return | StarcoderdataPython |
78718 | <reponame>shoaibahmed/pl-cnn<filename>src/utils/visualization/deprecated/plot_res.py
try:
import cPickle as pickle # Python2
except ModuleNotFoundError:
import pickle # Python3
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="white", palette="Set1")
res = pickle.load(open("./experiments/log/02-11-2016--13-16-49-0.p", "rb"))
res = pickle.load(open("./experiments/paper/temp/03-11-2016--19-07-48-0.p", "rb"))
res = pickle.load(open("./experiments/paper/cifar10/bp/backup/adadelta-C_100-l_rate_0.01.p", "rb"))
# res = pickle.load(open("./experiments/log/31-10-2016--21-44-24-0.p", "rb"))
import numpy as np
print(np.mean(res.val_acc[-100:]))
print(np.std(res.val_acc[-100:]))
print(np.min(res.val_acc[-100:]))
print(np.max(res.val_acc[-100:]))
fig = plt.figure()
sns.set(style="white", palette="Set1")
ax1 = fig.add_subplot(211)
train_err, = ax1.plot(res.train_time, res.train_err, label="Training Objective Function")
ax1.set_ylim([0., 5.])
ax1.legend(handles=[train_err])
ax1.set_ylabel("Objective Function")
sns.set(style="white", palette="Set1")
ax2 = fig.add_subplot(212)
train_acc, = ax2.plot(res.train_time, res.train_acc, label="Training accuracy")
val_acc, = ax2.plot(res.val_time, res.val_acc, label="Validation accuracy")
ax2.set_ylim([0., 100.])
ax2.legend(handles=[train_acc, val_acc])
ax2.set_xlabel("Time (s)")
ax2.set_ylabel("Accuracy (%)")
plt.show()
| StarcoderdataPython |
1689584 |
class Solution(object):
# def reverseList(self, head):
# """
# :type head: ListNode
# :rtype: ListNode
# """
# if not head:
# return None
# if not head.next:
# return head
# cur = head
# pre = None
# while cur:
# nxt = cur.next
# cur.next = pre
# pre = cur
# cur = nxt
# return pre
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head:
return head
def traverse(node):
current = node
if node.next == None:
return node
newHead = traverse(current.next)
current.next.next = current
# must reset, otherwise the first node (old head) will not point to null (cyclic link)
current.next = None
return newHead
return traverse(head)
def reverseListIter(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head:
return None
if not head.next:
return head
cur = head
pre = None
while cur:
nxt = cur.next
# reverse pointer
cur.next = pre
# advance both
pre, cur = cur, nxt
# when loop exits, pre points to the last node, now the new head
return pre
| StarcoderdataPython |
3355975 | #!/usr/bin/env python3
import argparse
import os, atexit
import textwrap
import signal
import random
import time
from enum import Enum
from collections import defaultdict, OrderedDict
def check_positive(value):
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError("{} is an invalid positive int value".format(value))
return ivalue
def checkProcess(filePath):
i = 1
nextMessage = defaultdict(lambda : 1)
filename = os.path.basename(filePath)
with open(filePath) as f:
for lineNumber, line in enumerate(f):
tokens = line.split()
# Check broadcast
if tokens[0] == 'b':
msg = int(tokens[1])
if msg != i:
print("File {}, Line {}: Messages broadcast out of order. Expected message {} but broadcast message {}".format(filename, lineNumber, i, msg))
return False
i += 1
# Check delivery
if tokens[0] == 'd':
sender = int(tokens[1])
msg = int(tokens[2])
if msg != nextMessage[sender]:
print("File {}, Line {}: Message delivered out of order. Expected message {}, but delivered message {}".format(filename, lineNumber, nextMessage[sender], msg))
return False
else:
nextMessage[sender] = msg + 1
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--proc_num",
required=True,
type=check_positive,
dest="proc_num",
help="Total number of processes",
)
parser.add_argument('output', nargs='+')
results = parser.parse_args()
if len(results.output) != results.proc_num:
print("Not as many output files as number of processes")
exit(1)
for o in results.output:
print("Checking {}".format(o))
if checkProcess(o):
print("Validation failed!")
else:
print("Validation OK")
| StarcoderdataPython |
1627705 | import grpc
import json
import logging
import os
import sys
import pickle
import datetime
from urllib import parse
import d3m_automl_rpc.core_pb2 as pb_core
import d3m_automl_rpc.core_pb2_grpc as pb_core_grpc
import d3m_automl_rpc.value_pb2 as pb_value
from d3m_automl_rpc.utils import encode_problem_description, encode_performance_metric, encode_value
from alphad3m.grpc_api.grpc_logger import LoggingStub
logger = logging.getLogger(__name__)
def do_hello(core):
core.Hello(pb_core.HelloRequest())
def do_listprimitives(core):
core.ListPrimitives(pb_core.ListPrimitivesRequest())
def do_search(core, problem, dataset_path, time_bound=30.0, pipelines_limit=0, pipeline_template=None):
version = pb_core.DESCRIPTOR.GetOptions().Extensions[pb_core.protocol_version]
automl_hyperparameters = {'exclude_primitives': None,
'include_primitives': None}
automl_hyperparams_encoded = {k: encode_value({'type': 'object', 'value': v}, ['RAW'], '/tmp') for k, v in
automl_hyperparameters.items()}
search = core.SearchSolutions(pb_core.SearchSolutionsRequest(
user_agent='ta3_stub',
version=version,
time_bound_search=time_bound,
rank_solutions_limit=pipelines_limit,
allowed_value_types=['CSV_URI'],
problem=encode_problem_description(problem),
automl_hyperparameters=automl_hyperparams_encoded,
template=pipeline_template,
inputs=[pb_value.Value(
dataset_uri='file://%s' % dataset_path,
)],
))
start_time = datetime.datetime.now()
results = core.GetSearchSolutionsResults(
pb_core.GetSearchSolutionsResultsRequest(
search_id=search.search_id,
)
)
solutions = {}
for result in results:
if result.solution_id:
end_time = datetime.datetime.now()
solutions[result.solution_id] = (
result.internal_score,
result.scores,
str(end_time - start_time)
)
return str(search.search_id), solutions
def do_score(core, problem, solutions, dataset_path):
metrics = []
for metric in problem['problem']['performance_metrics']:
metrics.append(encode_performance_metric(metric))
for solution in solutions:
try:
response = core.ScoreSolution(pb_core.ScoreSolutionRequest(
solution_id=solution,
inputs=[pb_value.Value(
dataset_uri='file://%s' % dataset_path,
)],
performance_metrics=metrics,
users=[],
configuration=pb_core.ScoringConfiguration(
method='K_FOLD',
folds=4,
train_test_ratio=0.75,
shuffle=True,
random_seed=0
),
))
results = core.GetScoreSolutionResults(
pb_core.GetScoreSolutionResultsRequest(
request_id=response.request_id,
)
)
for _ in results:
pass
except Exception:
logger.exception("Exception during scoring %r", solution)
def do_train(core, solutions, dataset_path):
fitted = {}
for solution in solutions:
try:
response = core.FitSolution(pb_core.FitSolutionRequest(
solution_id=solution,
inputs=[pb_value.Value(
dataset_uri='file://%s' % dataset_path,
)],
expose_outputs=['outputs.0'],
expose_value_types=['CSV_URI'],
users=[],
))
results = core.GetFitSolutionResults(
pb_core.GetFitSolutionResultsRequest(
request_id=response.request_id,
)
)
for result in results:
if result.progress.state == pb_core.COMPLETED:
fitted[solution] = result.fitted_solution_id
except Exception:
logger.exception("Exception training %r", solution)
return fitted
def do_test(core, fitted, dataset_path):
tested = {}
for fitted_solution in fitted.values():
try:
response = core.ProduceSolution(pb_core.ProduceSolutionRequest(
fitted_solution_id=fitted_solution,
inputs=[pb_value.Value(
dataset_uri='file://%s' % dataset_path,
)],
expose_outputs=['outputs.0'],
expose_value_types=['CSV_URI'],
users=[],
))
results = core.GetProduceSolutionResults(
pb_core.GetProduceSolutionResultsRequest(
request_id=response.request_id,
)
)
for result in results:
if result.progress.state == pb_core.COMPLETED:
tested[fitted_solution] = result.exposed_outputs['outputs.0'].csv_uri
except Exception:
logger.exception("Exception testing %r", fitted_solution)
return tested
def do_export(core, fitted):
for i, fitted_solution in enumerate(fitted.values()):
try:
core.SolutionExport(pb_core.SolutionExportRequest(
solution_id=fitted_solution,
rank=(i + 1.0) / (len(fitted) + 1.0),
))
except Exception:
logger.exception("Exception exporting %r", fitted_solution)
def do_describe(core, solutions):
for solution in solutions:
try:
core.DescribeSolution(pb_core.DescribeSolutionRequest(
solution_id=solution,
))
except Exception:
logger.exception("Exception during describe %r", solution)
def do_save_solution(core, solution_id):
response = core.SaveSolution(pb_core.SaveSolutionRequest(solution_id=solution_id))
return response.solution_uri
def do_load_solution(core, solution_path):
solution_uri = 'file://%s' % solution_path
response = core.LoadSolution(pb_core.LoadSolutionRequest(solution_uri=solution_uri))
return response.solution_id
def do_save_fitted_solution(core, fitted):
saved = {}
for fitted_solution_id in fitted.values():
response = core.SaveFittedSolution(pb_core.SaveFittedSolutionRequest(fitted_solution_id=fitted_solution_id))
parsed_uri = parse.urlparse(response.fitted_solution_uri, allow_fragments=False)
fitted_solution_path = parsed_uri.path
with open(fitted_solution_path, 'rb') as fin:
fitted_object = pickle.load(fin)
saved[fitted_solution_id] = fitted_object
return saved
def main():
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(message)s")
channel = grpc.insecure_channel('localhost:{0}'.format(os.environ.get('D3MPORT', 45042)))
core = LoggingStub(pb_core_grpc.CoreStub(channel), logger)
train_dataset_path = '/input/TRAIN/dataset_TRAIN/datasetDoc.json'
test_dataset_path = '/input/TEST/dataset_TEST/datasetDoc.json'
with open(sys.argv[1]) as problem:
problem = json.load(problem)
# Do a hello
do_hello(core)
# Do a list primitives
do_listprimitives(core)
# Do a search
solutions = do_search(core, problem, train_dataset_path)
# Describe the pipelines
do_describe(core, solutions)
# Score all found solutions
do_score(core, problem, solutions, train_dataset_path)
# Train all found solutions
fitted = do_train(core, solutions, train_dataset_path)
# Test all fitted solutions
do_test(core, fitted, test_dataset_path)
# Export all fitted solutions
do_export(core, fitted)
if __name__ == '__main__':
main()
| StarcoderdataPython |
147650 | # Default locale mapping for the Facebook JS SDK
# The list of supported locales is at
# https://www.facebook.com/translations/FacebookLocales.xml
import os
from django.utils.translation import get_language, to_locale
def _build_locale_table(filename_or_file):
"""
Parses the FacebookLocales.xml file and builds a dict relating every
available language ('en, 'es, 'zh', ...) with a list of available regions
for that language ('en' -> 'US', 'EN') and an (arbitrary) default region.
"""
# Require the XML parser module only if we want the default mapping
from xml.dom.minidom import parse
dom = parse(filename_or_file)
reps = dom.getElementsByTagName("representation")
locs = map(lambda r: r.childNodes[0].data, reps)
locale_map = {}
for loc in locs:
lang, _, reg = loc.partition("_")
lang_map = locale_map.setdefault(lang, {"regs": [], "default": reg})
lang_map["regs"].append(reg)
# Default region overrides (arbitrary)
locale_map["en"]["default"] = "US"
# Special case: Use es_ES for Spain and es_LA for everything else
locale_map["es"]["default"] = "LA"
locale_map["zh"]["default"] = "CN"
locale_map["fr"]["default"] = "FR"
locale_map["pt"]["default"] = "PT"
return locale_map
def get_default_locale_callable():
"""
Wrapper function so that the default mapping is only built when needed
"""
exec_dir = os.path.dirname(os.path.realpath(__file__))
xml_path = os.path.join(exec_dir, "data", "FacebookLocales.xml")
fb_locales = _build_locale_table(xml_path)
def default_locale(request):
"""
Guess an appropriate FB locale based on the active Django locale.
If the active locale is available, it is returned. Otherwise,
it tries to return another locale with the same language. If there
isn't one avaible, 'en_US' is returned.
"""
chosen = "en_US"
language = get_language()
if language:
locale = to_locale(language)
lang, _, reg = locale.partition("_")
lang_map = fb_locales.get(lang)
if lang_map is not None:
if reg in lang_map["regs"]:
chosen = lang + "_" + reg
else:
chosen = lang + "_" + lang_map["default"]
return chosen
return default_locale
| StarcoderdataPython |
3329061 | from typing import Dict, Tuple, List, Any
import array
import concurrent.futures
import datetime
import logging
import numpy
import pymongo
import pytz
import zlib
from . import DataProvider
from wx_explore.common import tracing
from wx_explore.common.models import (
Projection,
SourceField,
DataPointSet,
)
class MongoBackend(DataProvider):
logger: logging.Logger
account_name: str
account_key: str
table_name: str
n_x_per_row: int = 128
def __init__(self, uri: str, database: str, collection: str):
self.logger = logging.getLogger(self.__class__.__name__)
self.collection = pymongo.MongoClient(uri)[database][collection]
self.collection.create_index([
('proj_id', pymongo.ASCENDING),
('valid_time', pymongo.ASCENDING),
('y', pymongo.ASCENDING),
])
def get_fields(
self,
proj_id: int,
loc: Tuple[float, float],
valid_source_fields: List[SourceField],
start: datetime.datetime,
end: datetime.datetime
) -> List[DataPointSet]:
x, y = loc
nearest_row_x = ((x // self.n_x_per_row) * self.n_x_per_row)
rel_x = x - nearest_row_x
with tracing.start_span('get_fields lookup'):
results = self.collection.find({
'proj_id': proj_id,
'y': y,
'x_shard': nearest_row_x,
'valid_time': {
'$gte': start,
'$lt': end,
},
})
data_points = []
for item in results:
for sf in valid_source_fields:
key = f"sf{sf.id}"
if key not in item or item[key] is None:
continue
raw = zlib.decompress(item[key])
val = array.array("f", raw).tolist()[rel_x]
data_point = DataPointSet(
values=[val],
metric_id=sf.metric.id,
valid_time=item['valid_time'].replace(tzinfo=pytz.UTC),
source_field_id=sf.id,
run_time=item['run_time'].replace(tzinfo=pytz.UTC),
)
data_points.append(data_point)
return data_points
def put_fields(
self,
proj: Projection,
fields: Dict[Tuple[int, datetime.datetime, datetime.datetime], List[numpy.array]]
):
# fields is map of (field_id, valid_time, run_time) -> [msg, ...]
with concurrent.futures.ThreadPoolExecutor(1) as ex:
ex.map(lambda y: self._put_fields_worker(proj, fields, y), range(proj.n_y))
def _put_fields_worker(
self,
proj: Projection,
fields: Dict[Tuple[int, datetime.datetime, datetime.datetime], List[numpy.array]],
y: int
):
rows: Dict[Tuple[datetime.datetime, datetime.datetime, int], Dict[str, Any]] = {}
with tracing.start_span('put_fields transformations') as span:
span.set_attribute("num_fields", len(fields))
for (field_id, valid_time, run_time), msgs in fields.items():
for x in range(0, proj.n_x, self.n_x_per_row):
row_key = (valid_time, run_time, x)
if row_key not in rows:
rows[row_key] = {
'proj_id': proj.id,
'valid_time': valid_time,
'run_time': run_time,
'y': y,
'x_shard': x,
}
for msg in msgs:
# XXX: this only keeps last msg per field breaking ensembles
rows[row_key][f"sf{field_id}"] = zlib.compress(msg[y][x:x+self.n_x_per_row].tobytes())
with tracing.start_span('put_fields saving') as span:
self.collection.insert_many(rows.values())
def clean(self, oldest_time: datetime.datetime):
for proj in Projection.query.all():
self.collection.remove({
'proj_id': proj.id,
'valid_time': {
'$lt': oldest_time,
},
})
def merge(self):
pass
| StarcoderdataPython |
3361498 | <reponame>nbk1982/pynctual<filename>pynctual/continous_detection.py
import detection as sherlock
import time
while(1):
sherlock.holmes()
time.sleep(2)
| StarcoderdataPython |
3369799 | <gh_stars>100-1000
from django.dispatch import Signal
from pretalx.common.signals import EventPluginSignal
nav_event = EventPluginSignal()
"""
This signal allows you to add additional views to the admin panel
navigation. You will get the request as a keyword argument ``request``.
Receivers are expected to return a list of dictionaries. The dictionaries
should contain at least the keys ``label`` and ``url``. You can also return
a ForkAwesome icon name with the key ``icon``, it will be respected depending
on the type of navigation. You should also return an ``active`` key with a boolean
set to ``True``, when this item should be marked as active. The ``request`` object
will have an attribute ``event``.
If you use this, you should read the documentation on :ref:`how to deal with URLs <urlconf>`
in pretalx.
As with all plugin signals, the ``sender`` keyword argument will contain the event.
"""
nav_global = Signal()
"""
This signal allows you to add additional views to the navigation bar when no event is
selected. You will get the request as a keyword argument ``request``.
Receivers are expected to return a list of dictionaries. The dictionaries
should contain at least the keys ``label`` and ``url``. You can also return
a ForkAwesome icon name with the key ``icon``, it will be respected depending
on the type of navigation. You should also return an ``active`` key with a boolean
set to ``True``, when this item should be marked as active.
If you use this, you should read the documentation on :ref:`how to deal with URLs <urlconf>`
in pretalx.
This is no ``EventPluginSignal``, so you do not get the event in the ``sender`` argument
and you may get the signal regardless of whether your plugin is active.
"""
activate_event = EventPluginSignal()
"""
This signal is sent out before an event goes live. It allows any installed
plugin to raise an Exception to prevent the event from going live. The
exception message will be exposed to the user. If a string value is returned, pretalx
will show it as a success message.
You will get the request as a keyword argument ``request``.
Receivers are not expected to return a response.
As with all plugin signals, the ``sender`` keyword argument will contain the event.
"""
nav_event_settings = EventPluginSignal()
"""
This signal is sent out to collect additional settings sub-pages of an event.
Receivers are expected to return a list of dictionaries. The dictionaries
should contain at least the keys ``label`` and ``url``. You should also return
an ``active`` key with a boolean set to ``True``, when this item should be marked
as active.
As with all plugin signals, the ``sender`` keyword argument will contain the event.
A second keyword argument ``request`` will contain the request object.
"""
event_copy_data = EventPluginSignal()
"""
This signal is sent out when a new event is created as a clone of an existing event, i.e.
the settings from the older event are copied to the newer one. You can listen to this
signal to copy data or configuration stored within your plugin's models as well.
You don't need to copy data inside the general settings storage which is cloned automatically,
but you might need to modify that data.
The ``sender`` keyword argument will contain the event of the **new** event. The ``other``
keyword argument will contain the event slug to **copy from**. The keyword arguments
``submission_type_map``, ``question_map``, ``track_map`` and ``speaker_information_map`` contain
mappings from object IDs in the original event to objects in the new event of the respective
types.
"""
| StarcoderdataPython |
122 | <reponame>ritchie46/flopy
from __future__ import print_function
import os
import numpy as np
import matplotlib.pyplot as plt
import flopy
fb = flopy.modflow.Modflow.load('freyberg', version='mf2005', model_ws=os.path.join('..', 'data', 'freyberg'), verbose=True)
dis = fb.dis
top = fb.dis.top
fb.dis.top.plot(grid=True, colorbar=True)
fb.dis.botm.plot(grid=True, colorbar=True)
fb.dis.plot()
plt.show()
fb.dis.plot()
plt.show()
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1,2,1, aspect='equal')
fb.dis.top.plot(grid=True, axes=ax, colorbar=True)
ax = fig.add_subplot(1,2,2, aspect='equal')
fb.dis.botm.plot(grid=True, axes=ax, colorbar=True)
plt.show()
print('this is the end my friend') | StarcoderdataPython |
175074 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import requests
import time
import uuid
from .models import DatasetType, SourceType, JobType
from marquez_client import errors
from marquez_client import log
from marquez_client.constants import (
ENABLE_SSL, DEFAULT_HOST, DEFAULT_PORT, DEFAULT_TIMEOUT_MS
)
from marquez_client.version import VERSION
from six.moves.urllib.parse import quote
_API_PATH = 'api/v1'
_USER_AGENT = f'marquez-python/{VERSION}'
_HEADERS = {'User-Agent': _USER_AGENT}
# Marquez Client
class MarquezClient(object):
def __init__(self, enable_ssl=False, host=None, port=None,
timeout_ms=None):
enable_ssl = enable_ssl or os.environ.get('ENABLE_SSL', ENABLE_SSL)
host = host or os.environ.get('MARQUEZ_HOST', DEFAULT_HOST)
port = port or os.environ.get('MARQUEZ_PORT', DEFAULT_PORT)
self._timeout = self._to_seconds(timeout_ms or os.environ.get(
'MARQUEZ_TIMEOUT_MS', DEFAULT_TIMEOUT_MS)
)
protocol = 'http'
if enable_ssl:
protocol = 'https'
self._api_base = f'{protocol}://{host}:{port}/{_API_PATH}'
if not port or port == 80:
self._api_base = f'{protocol}://{host}/{_API_PATH}'
log.debug(self._api_base)
# Namespace API
def create_namespace(self, namespace_name, owner_name, description=None):
MarquezClient._check_name_length(namespace_name, 'namespace_name')
MarquezClient._check_name_length(owner_name, 'owner_name')
payload = {
'ownerName': owner_name
}
if description:
payload['description'] = description
return self._put(
self._url('/namespaces/{0}', namespace_name),
payload=payload
)
def get_namespace(self, namespace_name):
MarquezClient._check_name_length(namespace_name, 'namespace_name')
return self._get(self._url('/namespaces/{0}', namespace_name))
def list_namespaces(self, limit=None, offset=None):
return self._get(
self._url('/namespaces'),
params={
'limit': limit,
'offset': offset
}
)
# Source API
def create_source(self, source_name, source_type, connection_url,
description=None):
MarquezClient._check_name_length(source_name, 'source_name')
MarquezClient._is_instance_of(source_type, SourceType)
MarquezClient._is_valid_connection_url(connection_url)
payload = {
'type': source_type.value,
'connectionUrl': connection_url
}
if description:
payload['description'] = description
return self._put(self._url('/sources/{0}', source_name),
payload=payload)
def get_source(self, source_name):
MarquezClient._check_name_length(source_name, 'source_name')
return self._get(self._url('/sources/{0}', source_name))
def list_sources(self, limit=None, offset=None):
return self._get(
self._url('/sources'),
params={
'limit': limit,
'offset': offset
}
)
# Datasets API
def create_dataset(self, namespace_name, dataset_name, dataset_type,
physical_name, source_name,
description=None, run_id=None,
schema_location=None,
fields=None, tags=None):
MarquezClient._check_name_length(namespace_name, 'namespace_name')
MarquezClient._check_name_length(dataset_name, 'dataset_name')
MarquezClient._is_instance_of(dataset_type, DatasetType)
if dataset_type == DatasetType.STREAM:
MarquezClient._is_none(schema_location, 'schema_location')
MarquezClient._check_name_length(physical_name, 'physical_name')
MarquezClient._check_name_length(source_name, 'source_name')
payload = {
'type': dataset_type.value,
'physicalName': physical_name,
'sourceName': source_name,
}
if description:
payload['description'] = description
if run_id:
payload['runId'] = run_id
if fields:
payload['fields'] = fields
if tags:
payload['tags'] = tags
if schema_location:
payload['schemaLocation'] = schema_location
return self._put(
self._url('/namespaces/{0}/datasets/{1}', namespace_name,
dataset_name),
payload=payload
)
def get_dataset(self, namespace_name, dataset_name):
MarquezClient._check_name_length(namespace_name, 'namespace_name')
MarquezClient._check_name_length(dataset_name, 'dataset_name')
return self._get(
self._url('/namespaces/{0}/datasets/{1}',
namespace_name, dataset_name)
)
def list_datasets(self, namespace_name, limit=None, offset=None):
MarquezClient._check_name_length(namespace_name, 'namespace_name')
return self._get(
self._url('/namespaces/{0}/datasets', namespace_name),
params={
'limit': limit,
'offset': offset
}
)
def tag_dataset(self, namespace_name, dataset_name, tag_name):
MarquezClient._check_name_length(namespace_name, 'namespace_name')
MarquezClient._check_name_length(dataset_name, 'dataset_name')
if not tag_name:
raise ValueError('tag_name must not be None')
return self._post(
self._url('/namespaces/{0}/datasets/{1}/tags/{2}',
namespace_name, dataset_name, tag_name)
)
def tag_dataset_field(self, namespace_name, dataset_name, field_name,
tag_name):
MarquezClient._check_name_length(namespace_name, 'namespace_name')
MarquezClient._check_name_length(dataset_name, 'dataset_name')
MarquezClient._check_name_length(field_name, 'field_name')
MarquezClient._check_name_length(tag_name, 'tag_name')
return self._post(
self._url('/namespaces/{0}/datasets/{1}/fields/{2}/tags/{3}',
namespace_name, dataset_name, field_name, tag_name)
)
# Job API
def create_job(self, namespace_name, job_name, job_type, location=None,
input_dataset=None,
output_dataset=None, description=None, context=None):
MarquezClient._check_name_length(namespace_name, 'namespace_name')
MarquezClient._check_name_length(job_name, 'job_name')
MarquezClient._is_instance_of(job_type, JobType)
payload = {
'inputs': input_dataset or [],
'outputs': output_dataset or [],
'type': job_type.name
}
if context:
payload['context'] = context
if location:
payload['location'] = location
if description:
payload['description'] = description
return self._put(
self._url('/namespaces/{0}/jobs/{1}', namespace_name, job_name),
payload=payload
)
def get_job(self, namespace_name, job_name):
MarquezClient._check_name_length(namespace_name, 'namespace_name')
MarquezClient._check_name_length(job_name, 'job_name')
return self._get(
self._url('/namespaces/{0}/jobs/{1}', namespace_name, job_name)
)
def list_jobs(self, namespace_name, limit=None, offset=None):
MarquezClient._check_name_length(namespace_name, 'namespace_name')
return self._get(
self._url('/namespaces/{0}/jobs', namespace_name),
params={
'limit': limit,
'offset': offset
}
)
def create_job_run(self, namespace_name, job_name,
nominal_start_time=None,
nominal_end_time=None, run_args=None,
mark_as_running=False):
MarquezClient._check_name_length(namespace_name, 'namespace_name')
MarquezClient._check_name_length(job_name, 'job_name')
payload = {}
if nominal_start_time:
payload['nominalStartTime'] = nominal_start_time
if nominal_end_time:
payload['nominalEndTime'] = nominal_end_time
if run_args:
payload['runArgs'] = run_args
response = self._post(
self._url('/namespaces/{0}/jobs/{1}/runs',
namespace_name, job_name),
payload=payload)
if mark_as_running:
run_id = response['runId']
response = self.mark_job_run_as_started(run_id)
return response
def list_job_runs(self, namespace_name, job_name, limit=None,
offset=None):
MarquezClient._check_name_length(namespace_name, 'namespace_name')
MarquezClient._check_name_length(job_name, 'job_name')
return self._get(
self._url(
'/namespaces/{0}/jobs/{1}/runs',
namespace_name,
job_name),
params={
'limit': limit,
'offset': offset
}
)
def get_job_run(self, run_id):
self._is_valid_uuid(run_id, 'run_id')
return self._get(self._url('/jobs/runs/{0}', run_id))
def mark_job_run_as_started(self, run_id):
return self.__mark_job_run_as(run_id, 'start')
def mark_job_run_as_completed(self, run_id):
return self.__mark_job_run_as(run_id, 'complete')
def mark_job_run_as_failed(self, run_id):
return self.__mark_job_run_as(run_id, 'fail')
def mark_job_run_as_aborted(self, run_id):
return self.__mark_job_run_as(run_id, 'abort')
def list_tags(self, limit=None, offset=None):
return self._get(
self._url('/tags'),
params={
'limit': limit,
'offset': offset
}
)
def __mark_job_run_as(self, run_id, action):
MarquezClient._is_valid_uuid(run_id, 'run_id')
return self._post(
self._url('/jobs/runs/{0}/{1}', run_id, action), payload={}
)
# Common
def _url(self, path, *args):
encoded_args = [quote(arg.encode('utf-8'), safe='') for arg in args]
return f'{self._api_base}{path.format(*encoded_args)}'
def _post(self, url, payload, as_json=True):
now_ms = self._now_ms()
response = requests.post(
url=url, headers=_HEADERS, json=payload, timeout=self._timeout)
log.info(f'{url}', method='POST', payload=json.dumps(
payload), duration_ms=(self._now_ms() - now_ms))
return self._response(response, as_json)
def _put(self, url, payload=None, as_json=True):
now_ms = self._now_ms()
response = requests.put(
url=url, headers=_HEADERS, json=payload, timeout=self._timeout)
log.info(f'{url}', method='PUT', payload=json.dumps(
payload), duration_ms=(self._now_ms() - now_ms))
return self._response(response, as_json)
def _get(self, url, params=None, as_json=True):
now_ms = self._now_ms()
response = requests.get(
url, params=params, headers=_HEADERS, timeout=self._timeout)
log.info(f'{url}', method='GET',
duration_ms=(self._now_ms() - now_ms))
return self._response(response, as_json)
@staticmethod
def _now_ms():
return int(round(time.time() * 1000))
def _response(self, response, as_json):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
self._raise_api_error(e)
return response.json() if as_json else response.text
def _raise_api_error(self, e):
# TODO: https://github.com/MarquezProject/marquez-python/issues/55
raise errors.APIError() from e
@staticmethod
def _to_seconds(timeout_ms):
return float(timeout_ms) / 1000.0
@staticmethod
def _is_none(variable_value, variable_name):
if not variable_value:
raise ValueError(f"{variable_name} must not be None")
@staticmethod
def _check_name_length(variable_value, variable_name):
MarquezClient._is_none(variable_value, variable_name)
# ['namespace_name', 'owner_name', 'source_name'] <= 64
# ['dataset_name', 'field_name', 'job_name', 'tag_name'] <= 255
if variable_name in ['namespace_name', 'owner_name', 'source_name']:
if len(variable_value) > 64:
raise ValueError(f"{variable_name} length is"
f" {len(variable_value)}, must be <= 64")
else:
if len(variable_value) > 255:
raise ValueError(f"{variable_name} length is"
f" {len(variable_value)}, must be <= 255")
@staticmethod
def _is_valid_uuid(variable_value, variable_name):
MarquezClient._is_none(variable_value, variable_name)
try:
uuid.UUID(str(variable_value))
except ValueError:
raise ValueError(f"{variable_name} must be a valid UUID")
@staticmethod
def _is_instance_of(variable_value, variable_enum_type):
if not isinstance(variable_value, variable_enum_type):
raise ValueError(f"{variable_value} must be an instance"
f" of {variable_enum_type}")
@staticmethod
def _is_valid_connection_url(connection_url):
MarquezClient._is_none(connection_url, 'connection_url')
| StarcoderdataPython |
3295409 | <gh_stars>1-10
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -*- encoding: utf-8 -*-
"""python_instance.py: Python Instance for running python functions
"""
from concurrent import futures
from log import Log
import grpc
import InstanceCommunication_pb2_grpc
class InstanceCommunicationServicer(InstanceCommunication_pb2_grpc.InstanceControlServicer):
"""Provides methods that implement functionality of route guide server."""
def __init__(self, pyinstance):
self.pyinstance = pyinstance
def GetFunctionStatus(self, request, context):
Log.info("Came in GetFunctionStatus")
return self.pyinstance.get_function_status()
def GetAndResetMetrics(self, request, context):
Log.info("Came in GetAndResetMetrics")
return self.pyinstance.get_and_reset_metrics()
def serve(port, pyinstance):
server_instance = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
InstanceCommunication_pb2_grpc.add_InstanceControlServicer_to_server(
InstanceCommunicationServicer(pyinstance), server_instance)
server_instance.add_insecure_port('[::]:%d' % port)
Log.info("Serving InstanceCommunication on port %d" % int(port))
server_instance.start()
return server_instance
| StarcoderdataPython |
37348 | <gh_stars>1-10
import numpy as np
from time import time
def bench_2_1():
trials = 100
elements = 1000000
times = []
for i in range(trials):
start = time()
M = np.random.randint(1,999, size=elements)
t = time()-start
times.append(t)
print 'Python - Benchmark 2.1: Average time = {} milliseconds'.format(np.mean(times)*1000) | StarcoderdataPython |
1686859 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 25 18:00:44 2019
@author: franchesoni
"""
import os
import numpy as np
from functions import evaluate
'''Evaluate the performance of orders over the places in vectors and save
the predictions, the orders, and the RMSDs'''
orders = [(1, 0), (2, 0), (3, 0), (4, 0), (5, 0),
(6, 0), (7, 0), (8, 0), (9, 0), (10, 0),
(0, 1), (1, 1), (2, 1), (3, 1), (4, 1),
(5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1),
(0, 2), (1, 2), (2, 2), (3, 2), (4, 2),
(5, 2), (6, 2), (7, 2), (8, 2), (9, 2), (10, 2),
(0, 3), (1, 3), (2, 3), (3, 3), (4, 3),
(5, 3), (6, 3), (7, 3), (8, 3), (9, 3), (10, 3)]
filenames = [file for file in os.listdir('vectors') if file[-3::]='npy']
for filename in filenames:
evaluate(filename, orders, csv=True)
#filename = os.listdir('vectors')[-1]
#evaluate(filename, orders, csv=True) | StarcoderdataPython |
26820 | <reponame>chris-lawton/libraries_wagtail
from django.apps import AppConfig
class ExhibitionsConfig(AppConfig):
name = 'exhibitions'
| StarcoderdataPython |
109694 | # Librerias en carpetas locales
from .submodels.pos import PyPos
| StarcoderdataPython |
1761710 | <gh_stars>0
# Copyright (C) 2018 SCARV project <<EMAIL>>
#
# Use of this source code is restricted per the MIT license, a copy of which
# can be found at https://opensource.org/licenses/MIT (or should be included
# as LICENSE.txt within the associated archive or repository).
# Per
#
# https://www.python.org/dev/peps/pep-0420/#id22
#
# we want to support a "split" (per component) sca3s namespace: by default,
# Python uses the first one in ${PYTHONPATH} exclusively. This is achieved
# by extending the search path.
from pkgutil import extend_path
__path__ = extend_path( __path__, __name__ )
__all__ = [ 'backend' ]
from . import backend
| StarcoderdataPython |
125578 | <gh_stars>100-1000
import os, sys
import numpy as np
from scipy.io.wavfile import write
folder = sys.argv[1]
for file in os.listdir(folder):
if file.endswith(".npy"):
print(file, file.split(".")[0])
a = np.load(folder+file)
write(folder+file.split(".")[0]+".wav", 22050, a) | StarcoderdataPython |
3536 | <reponame>wbprice/ojimoji
import numpy
h = .25
s = 1
bitmap = numpy.array([
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,1,1,1,1,1,1,1,1,1,0,0,0,0,0],
[0,0,1,1,1,1,1,1,1,1,1,1,1,1,0,0],
[0,0,1,1,1,0,1,0,1,1,1,0,0,1,0,0],
[0,0,1,1,0,1,0,1,0,1,1,0,0,1,0,0],
[0,0,1,1,1,0,1,0,1,1,1,0,1,0,0,0],
[0,0,1,1,0,1,0,1,0,1,1,1,0,0,0,0],
[0,0,1,1,1,0,1,0,1,1,1,0,0,0,0,0],
[0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0],
[0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]])
| StarcoderdataPython |
40350 | # Let's make an database
#
# this is like the worst code ever
#
# just to make an test DB for burn the subs
import time
from datetime import datetime
from bts.dataBaseClass import Sub
def main():
fileName = open("subscriberListTest.txt")
print("making:")
for entry in fileName:
entry = entry.strip()
dateTime = datetime.utcnow()
dbEntry = Sub.create(
userName = entry,
entryTime = dateTime,
status = 2,
# userName = 'Yfj',
fontSize = 72,
positionX = 1000,
positionY = 1000
)
print(entry)
time.sleep(0.2)
dbEntry.save()
print("done, you fuck.")
fileName.close()
if __name__ == "__main__":
main() | StarcoderdataPython |
1637080 | from pytg import sender
from pytg.exceptions import IllegalResponseException
import os
import logging
import yaml
import datetime
import time
logging.basicConfig(level=logging.INFO)
# Ugly hack: increate timeout for document reception
# Sub hack: use a list to assign a new value
tmp_f = list(sender.functions["load_document"])
tmp_f[sender.FUNC_TIME] = 3600.0
sender.functions["load_document"] = tuple(tmp_f)
x = sender.Sender("127.0.0.1", 4458)
def build_dialogs_list():
"""Return the list of all dialogs"""
base_list = []
res = True
while res:
res = x.dialog_list(100, len(base_list))
base_list += res
return base_list
def work_on_dialog(d):
"""Backup a particular dialog"""
logging.info("Working on %s %s %s", d['type'], d['print_name'], d['id'])
if not d['print_name']:
logging.error("%s has no print_name, cannot continue.", d['id'])
return
working_dir = "logs/by_ids/{}/".format(d['id'])
if not os.path.isdir(working_dir):
logging.debug("Creating working_dir %s", working_dir)
os.mkdir(working_dir)
symlink = "logs/{},{}".format(d['type'], d['print_name'].replace('/', ''))
if not os.path.exists(symlink):
logging.debug("Creating symlink %s", symlink)
os.symlink(working_dir[5:], symlink)
# "Eat" history until the last message, but stop at the last checkpoint
checkpoint_file = "{}/_checkpoint.yaml".format(working_dir)
last_checkpoint = None
if os.path.exists(checkpoint_file):
logging.debug("Loading checkpoing")
with open(checkpoint_file, 'r') as checkpoint_f:
data = yaml.load(checkpoint_f)
last_checkpoint = data.get('checkpoint', None)
logging.info("Last checkpoint is %s", last_checkpoint)
messages = {}
last_messages = True
while last_messages and last_checkpoint not in messages:
try:
last_messages = x.history(d['print_name'], 250, len(messages), retry_connect=-1)
except IllegalResponseException as e:
last_messages = []
if str(e) == "Result parser does not allow exceptions.":
logging.warning("Slowing down...")
time.sleep(5)
last_messages = True
if last_messages and last_messages != True:
for message in last_messages:
messages[message['id']] = message
logging.info("Loading, offset %s", len(messages))
logging.info("Found %s messages to process", len(messages))
# Save messages by date
loaded_data = {}
for id, message in messages.items():
if 'date' not in message:
logging.error("Not date in message %s", message['id'])
continue
date = datetime.datetime.fromtimestamp(message['date'])
file_key = '{}.{}.yaml'.format(date.year, date.month)
if file_key not in loaded_data:
file_key_name = '{}{}'.format(working_dir, file_key)
if os.path.isfile(file_key_name):
with open(file_key_name, 'r') as file_key_f:
loaded_data[file_key] = yaml.load(file_key_f)
logging.info("Loaded datafile %s", file_key)
else:
loaded_data[file_key] = {}
logging.info("Created datafile %s", file_key)
if message['id'] not in loaded_data[file_key]:
if message['event'] == 'message':
loaded_data[file_key][message['id']] = {'from': message['from']['print_name'], 'text': message.get('text', ''), 'date': message['date']}
if 'media' in message:
if message['media']['type'] not in ['webpage', 'contact']:
result = x.load_document(message['id'])
if os.path.exists(result['result']):
file_dir = "files_{}_{}/".format(date.year, date.month)
file_dir_full = "{}/{}/".format(working_dir, file_dir)
if not os.path.isdir(file_dir_full):
os.mkdir(file_dir_full)
media_file = "{}/{}.{}".format(file_dir_full, message['id'], result['result'].split('.')[-1].replace('/', ''))
os.rename(result['result'], media_file)
loaded_data[file_key][message['id']]['media'] = '{}{}.{}'.format(file_dir, message['id'], result['result'].split('.')[-1].replace('/', ''))
else:
loaded_data[file_key][message['id']]['media'] = result['result']
elif message['event'] == 'service':
pass
else:
logging.error("Unknow type %s", message['event'])
if not last_checkpoint or last_checkpoint < message['id']:
last_checkpoint = message['id']
# Save messages
for file_key, data in loaded_data.items():
with open('{}/{}'.format(working_dir, file_key), 'w') as file_key_f:
yaml.dump(data, file_key_f, default_flow_style=False)
logging.info("Saved datafile %s", file_key)
# Save checkpoint
with open(checkpoint_file, 'w') as checkpoint_f:
yaml.dump({'checkpoint': last_checkpoint}, checkpoint_f)
logging.info("Saved checkpoint")
return True
for d in build_dialogs_list():
work_on_dialog(d)
| StarcoderdataPython |
90112 | #There's so many way to improve these code, but for learning sake. This is good enough. I will revisit these code again in a month or two. to improve it by making it shorter, easier to read
import random
print("Hey! It's time to duel! Let's go!\n ROCK... \n PAPER... \n SCISSOR!!")
round = ''
your_score = 0
computer_score = 0
count = 0
while round != "end":
your_hand = input ("What is your pick? Rock, Paper or Scissor: ").lower()
computer_hand = random.choice(["rock","paper","scissor"])
if computer_hand == your_hand:
print("It's a tie. Pick again: ")
elif (computer_hand == "rock" and your_hand == "scissor") or (computer_hand == "paper" and your_hand == "rock") or (computer_hand == "scissor" and your_hand == "paper"):
print("Haha, I win!. ")
computer_score += 1
count +=1
# R>S, S>P, P>R
elif (computer_hand == "scissor" and your_hand == "rock") or (computer_hand == "scissor" and your_hand == "paper") or (computer_hand == "paper" and your_hand == "rock"):
print("F%$#, you win this time.")
your_score += 1
count +=1
else:
print("That's a wrong input!")
if count == 3:
if computer_score > your_score:
print("Yup, It's me the winner.")
else:
print("S%#t, i need to upgrade my cpu. you won, player.")
break
#improvement idea: I should try to use def,return. more, break the game into two big part, user input and winning condition. add these to def and combine them together. it will make code shorter and more portable.
| StarcoderdataPython |
3351053 | <filename>python/CodingInterviews/offer27.py
'''
Function:
二叉树的镜像
Author:
Charles
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def mirrorTree(self, root: TreeNode) -> TreeNode:
def recur(root):
if not root: return
root.left, root.right = root.right, root.left
recur(root.left)
recur(root.right)
recur(root)
return root | StarcoderdataPython |
1660572 | """
The :mod:`tslearn.datasets` module provides simplified access to standard time
series datasets.
"""
import zipfile
import tempfile
import shutil
import os
import warnings
from urllib.request import urlretrieve
__author__ = '<NAME> <EMAIL>.tavenard[at]univ-rennes2.fr'
def extract_from_zip_url(url, target_dir=None, verbose=False):
"""Download a zip file from its URL and unzip it.
A `RuntimeWarning` is printed on failure.
Parameters
----------
url : string
URL from which to download.
target_dir : str or None (default: None)
Directory to be used to extract unzipped downloaded files.
verbose : bool (default: False)
Whether to print information about the process (cached files used, ...)
Returns
-------
str or None
Directory in which the zip file has been extracted if the process was
successful, None otherwise
"""
fname = os.path.basename(url)
tmpdir = tempfile.mkdtemp()
local_zip_fname = os.path.join(tmpdir, fname)
urlretrieve(url, local_zip_fname)
os.makedirs(target_dir, exist_ok=True)
try:
with zipfile.ZipFile(local_zip_fname, "r") as f:
f.extractall(path=target_dir)
if verbose:
print("Successfully extracted file %s to path %s" %
(local_zip_fname, target_dir))
return target_dir
except zipfile.BadZipFile:
warnings.warn("Corrupted or missing zip file encountered, aborting",
category=RuntimeWarning)
return None
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
def in_file_string_replace(filename, old_string, new_string):
"""String replacement within a text file. It is used to fix typos in
downloaded csv file.
The code was modified from "https://stackoverflow.com/questions/4128144/"
Parameters
----------
filename : str
Path to the file where strings should be replaced
old_string : str
The string to be replaced in the file.
new_string : str
The new string that will replace old_string
"""
with open(filename) as f:
s = f.read()
with open(filename, 'w') as f:
s = s.replace(old_string, new_string)
f.write(s)
| StarcoderdataPython |
3339024 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Transpiler pass to remove reset gate when the qubit is in zero state
"""
from qiskit.circuit import Reset
from qiskit.transpiler.basepasses import TransformationPass
class RemoveResetInZeroState(TransformationPass):
"""Remove reset gate when the qubit is in zero state"""
def run(self, dag):
"""Return a new circuit that has been optimized."""
resets = dag.op_nodes(Reset)
for reset in resets:
predecessor = next(dag.predecessors(reset))
if predecessor.type == 'in':
dag.remove_op_node(reset)
return dag
| StarcoderdataPython |
164610 | <gh_stars>0
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
from numpy.polynomial import Polynomial, polynomial
import algorithm.spectropyrometer_constants as sc
from scipy.interpolate import splev
from scipy.optimize import minimize
from algorithm.goal_function import goal_function
from algorithm.statistics import tukey_fence
def calculate_logR(data_spl, wl_v0, wl_v1):
logR_array = []
for wl0, wl1 in zip(wl_v0, wl_v1):
# Corresponding data from the filtered data
res0 = np.exp(splev(wl0, data_spl))
res1 = np.exp(splev(wl1, data_spl))
# Ratio of intensities
R = res0/res1
logR = np.log(R)
logR_array.append(logR)
logR_array = np.array(logR_array)
return logR_array
def ce_temperature(logR, wl_v0, wl_v1):
'''
Function: ce_temperature
Calculates the temperature based on the averaging of multiple
two-wavelength predictions and Constant Emissivity (CE)
Inputs:
- logR The logarithm of the intensity ratio I_0 / I_1, computed
upstream
- wl_v0, wl_v1 Vector of wavelengths chosen
Ouputs:
- Predicted temperature from averaging (K)
- Standard deviation (K)
- Standard deviation (%)
- Natural logarithm of ratio of intensities of two wavelengths. Useful for
the non-constant emissivity case as well and avoids having to recalculate
it.
'''
Tout = []
### Standard computation
# For each pair of wavelengths (wl0,wl1), calculate constant emissivity
# temperature
try:
invT = logR - 5 *np.log(wl_v1/wl_v0)
### Temperature
Tout = 1/invT
Tout *= sc.C2 * (1/wl_v1 - 1/wl_v0)
### Returns
# Tout = Tout[Tout>0]
Tave, Tstd, Tmetric, Tleft = tukey_fence(Tout)
# If there is some issue with the computation, avoid this data point
except:
Tave,Tstd,Tmetric = 1e5 * np.ones(3)
return Tave, Tstd, Tmetric
def nce_temperature(poly_coeff,logR,
wl_v0,wl_v1,
wl_binm,wl_binM,
wl_min,
wl_max):
'''
Function: nce_temperature
Calculates the temperature based on a Non-Constant Emissivity (NCE).
The emissivity is modeled with a Chebyshev polynomial of order N where N
is determined by a separate routine
Inputs:
-
Outputs:
- Predicted temperature from averaging (K)
- Standard deviation (K)
- Standard deviation (%)
'''
# Create a polynomial representation with the proposed coefficients
# Rescaling is done internally by providing the bounds l_min and l_max
domain = np.array([wl_min,wl_max])
pol = Polynomial(poly_coeff,domain)
# Calculate the emissivities at the corresponding wavelengths
eps1 = polynomial.polyval(wl_v1,pol.coef)
eps0 = polynomial.polyval(wl_v0,pol.coef)
### Inverse temperature
try:
invT = logR - 5 *np.log(wl_v1/wl_v0) - np.log(eps0/eps1)
### Temperature
Tout = 1/invT
Tout *= sc.C2 * (1/wl_v1 - 1/wl_v0)
### Returns
# Tout = Tout[Tout>0]
Tave, Tstd, Tmetric, Tleft = tukey_fence(Tout)
# print('Coeffs: ', poly_coeff, '\t p-value:',normaltest(Tleft)[1])
except:
Tave, Tstd, Tmetric = 1e5 * np.ones(3)
return Tave, Tstd, Tmetric
def optimum_temperature(data_spl, cmb_pix, pix_vec, wl_vec, order):
'''
Function: optimum_temperature
Calculates the temperature based on the assumption of a polynomial order
Inputs:
- data_spl Spline representation of the filtered intensity data
- cmb_pix Pixels chosen for each pixel bin
- pix_vec Overall pixel vector
- wl_vec Vector of wavelengths (nm)
Ouputs:
- Predicted temperature from averaging (K)
- Standard deviation (K)
- Standard deviation (%)
- Flag indicating if advanced method was used
'''
bins = pix_vec[0::sc.pix_slice]
wl_sub_vec = wl_vec[pix_vec]
# Minimum and maximum wavelengths
wl_min = np.min(wl_sub_vec)
wl_max = np.max(wl_sub_vec)
# Which wavelengths are associated with the pixel combinations?
wl_v0 = wl_vec[cmb_pix[:,0]]
wl_v1 = wl_vec[cmb_pix[:,1]]
# Create the [lambda_min,lambda_max] pairs that delimit a "bin"
wl_binm = wl_vec[bins]
wl_binM = wl_vec[bins[1::]]
wl_binM = np.append(wl_binM,wl_vec[-1])
### Calculate intensity ratio
logR = calculate_logR(data_spl, wl_v0, wl_v1)
### Which order are we using?
if order == 0:
# If emissivity is constant, calculate the temperature with the simple model
sol = None
Tave, Tstd, Tmetric = ce_temperature(logR,wl_v0,wl_v1)
else:
# Otherwise, optimization routine on the coefficients of epsilon
# Define the goal function
f = lambda pc: goal_function(pc, logR, wl_v0, wl_v1, wl_min, wl_max)
# Initial values of coefficients
pc0 = np.zeros(order+1)
pc0[0] = sc.eps0
# Minimization
min_options = {'xatol':1e-15, 'fatol':1e-15, 'maxfev':20000} # Nelder-Mead
sol = minimize(f, pc0, method = 'Nelder-Mead', options = min_options)
# Calculate temperature from solution
Tave, Tstd, Tmetric = nce_temperature(sol.x,logR,
wl_v0,wl_v1,
wl_binm,wl_binM,
wl_min,
wl_max)
return Tave, Tstd, Tmetric, sol
| StarcoderdataPython |
4825153 | import webbrowser
class Movie(object):
def __init__(self, movie_title, movie_storyline,
poster_image, trailer_youtube):
"""
This can initialize the specific details of the movie,
such as title, storyline, poster image url, and trailer url.
"""
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
def show_trailer(self):
"""
This function can open the url on the browser and
show the movie trailer on it.
"""
webbrowser.open(self.trailer_youtube_url)
| StarcoderdataPython |
70568 | <gh_stars>10-100
"""
DataLoader class
"""
import math
from galaxy.args import str2bool
from galaxy.data.batch import batch
from galaxy.data.sampler import RandomSampler
from galaxy.data.sampler import SequentialSampler
from galaxy.data.sampler import SortedSampler
class DataLoader(object):
""" Implement of DataLoader. """
@classmethod
def add_cmdline_argument(cls, group):
group.add_argument("--shuffle", type=str2bool, default=True)
group.add_argument("--sort_pool_size", type=int, default=0)
return group
def __init__(self, dataset, hparams, collate_fn=None, sampler=None, is_test=False):
self.dataset = dataset
self.collate_fn = collate_fn
self.sort_pool_size = hparams.sort_pool_size
if sampler is None:
if hparams.shuffle and not is_test:
sampler = RandomSampler(dataset)
else:
sampler = SequentialSampler(dataset)
if self.sort_pool_size > 0 and not is_test:
sampler = SortedSampler(sampler, self.sort_pool_size)
def reader():
for idx in sampler:
yield idx
self.reader = batch(reader, batch_size=hparams.batch_size, drop_last=False)
self.num_batches = math.ceil(len(dataset) / hparams.batch_size)
return
def __len__(self):
return self.num_batches
def __iter__(self):
"""
1. Sampler -> batch data index:[1, 2, 3]
2. Dataset -> batch data:[[x1, y1], [x2, y2], [x3, y3]]
3. collate_fn -> batch data: [[x1, x2, x3], [y1, y2, y3]]
"""
for batch_indices in self.reader():
samples = [self.dataset[idx] for idx in batch_indices]
yield self.collate_fn(samples)
| StarcoderdataPython |
21245 | <filename>app/core/tests/test_admin.py
import pytest
from django.urls import reverse
@pytest.mark.skip(reason="WIP moving to pytest tests")
def test_with_authenticated_client(client, django_user_model):
email = '<EMAIL>'
password = '<PASSWORD>'
admin_user = django_user_model.objects.create_superuser(
email, password)
client.force_login(user=admin_user)
user = django_user_model.objects.create_user('<EMAIL>', password='<PASSWORD>',
name='Test user full name')
url = reverse('admin:core_user_changelist')
res = client.get(url)
assert user.name in res
assert user.email in res
def test_user_page_change(client, django_user_model):
"""Test that the user edit page works"""
email = '<EMAIL>'
password = '<PASSWORD>'
admin_user = django_user_model.objects.create_superuser(
email, password)
client.force_login(user=admin_user)
user = django_user_model.objects.create_user('<EMAIL>', password='<PASSWORD>',
name='Test user full name')
url = reverse('admin:core_user_change', args=[user.id])
res = client.get(url)
assert res.status_code == 200
def test_create_user_page(client, django_user_model):
"""Test that the create user page works"""
email = '<EMAIL>'
password = '<PASSWORD>'
admin_user = django_user_model.objects.create_superuser(
email, password)
client.force_login(user=admin_user)
url = reverse('admin:core_user_add')
res = client.get(url)
assert res.status_code == 200
'''
@pytest.mark.django_db
def test_user_create():
User.objects.create_user('<EMAIL>', password='<PASSWORD>', name='Test user full name')
assert User.objects.count() == 1
@pytest.mark.parametrize(
'admin, user, client',
get_user_model().objects.create_superuser(
'<EMAIL>', password='<PASSWORD>'),
get_user_model().objects.create_user(
'<EMAIL>', password='<PASSWORD>', name='Test user full name'),
Client()
)
@pytest.mark.db
def test_users_listed(admin, user, client):
"""Test that users are listed on the user page """
url = reverse('admin:core_user_changelist')
res = client.get(url)
assert user.name in res
assert user.email in res
'''
| StarcoderdataPython |
1742729 | """Example Airflow DAG that creates a Cloud Dataproc cluster, runs the Hadoop
wordcount example, and deletes the cluster.
This DAG relies on three Airflow variables
https://airflow.apache.org/concepts.html#variables
* gcp_project - Google Cloud Project to use for the Cloud Dataproc cluster.
* gce_zone - Google Compute Engine zone where Cloud Dataproc cluster should be
created.
* gcs_bucket - Google Cloud Storage bucket to use for result of Hadoop job.
"""
import datetime
import os
from airflow import models
from airflow.contrib.operators import dataproc_operator
from airflow.utils import trigger_rule
# Output file for Cloud Dataproc job.
output_file = os.path.join(
models.Variable.get('gcs_bucket'), 'wordcount',
datetime.datetime.now().strftime('%Y%m%d-%H%M%S')) + os.sep
# Path to Hadoop wordcount example available on every Dataproc cluster.
WORDCOUNT_JAR = (
'file:///usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
)
# Arguments to pass to Cloud Dataproc job.
wordcount_args = ['wordcount', 'gs://pub/shakespeare/rose.txt', output_file]
yesterday = datetime.datetime.combine(
datetime.datetime.today() - datetime.timedelta(1),
datetime.datetime.min.time())
default_dag_args = {
# Setting start date as yesterday starts the DAG immediately when it is
# detected in the Cloud Storage bucket.
'start_date': yesterday,
# To email on failure or retry set 'email' arg to your email and enable
# emailing here.
'email_on_failure': False,
'email_on_retry': False,
# If a task fails, retry it once after waiting at least 5 minutes
'retries': 1,
'retry_delay': datetime.timedelta(minutes=5),
'project_id': models.Variable.get('gcp_project')
}
with models.DAG(
'composer_sample_quickstart',
# Continue to run DAG once per day
schedule_interval=datetime.timedelta(days=1),
default_args=default_dag_args) as dag:
# Create a Cloud Dataproc cluster.
create_dataproc_cluster = dataproc_operator.DataprocClusterCreateOperator(
task_id='create_dataproc_cluster',
# Give the cluster a unique name by appending the date scheduled.
# See https://airflow.apache.org/code.html#default-variables
cluster_name='quickstart-cluster-{{ ds_nodash }}',
num_workers=2,
zone=models.Variable.get('gce_zone'),
master_machine_type='n1-standard-1',
worker_machine_type='n1-standard-1')
# Run the Hadoop wordcount example installed on the Cloud Dataproc cluster
# master node.
run_dataproc_hadoop = dataproc_operator.DataProcHadoopOperator(
task_id='run_dataproc_hadoop',
main_jar=WORDCOUNT_JAR,
cluster_name='quickstart-cluster-{{ ds_nodash }}',
arguments=wordcount_args)
# Delete Cloud Dataproc cluster.
delete_dataproc_cluster = dataproc_operator.DataprocClusterDeleteOperator(
task_id='delete_dataproc_cluster',
cluster_name='quickstart-cluster-{{ ds_nodash }}',
# Setting trigger_rule to ALL_DONE causes the cluster to be deleted
# even if the Dataproc job fails.
trigger_rule=trigger_rule.TriggerRule.ALL_DONE)
# Define DAG dependencies.
create_dataproc_cluster >> run_dataproc_hadoop >> delete_dataproc_cluster | StarcoderdataPython |
169637 | """
LP Files
https://www.ibm.com/support/knowledgecenter/SSSA5P_12.5.0/ilog.odms.cplex.help/CPLEX/FileFormats/topics/LP.html
http://www.gurobi.com/documentation/8.0/refman/lp_format.html
"""
from math import isinf
from os import path
import pyflip as flp
def write_lp_file(model, filename, directory='.'):
full_filename = path.join(directory, filename)
with open(full_filename, 'w') as fp:
# Title section
fp.write(f'\ {model.name}\n')
# Objective function
fp.write(f'{model.objective.dir}\n')
if model.objective.expr.var_dict:
fp.write(f' {model.objective.name}: {model.objective.expr}\n')
else:
fp.write(f' {model.objective.name}:\n') # omit the constant (because it confuses gurobi_cl)
# Constraints (printed in rearranged form)
fp.write('subject to\n')
for constraint in model.constraints.values():
fp.write(f' {constraint.name}: {constraint._lhs} {constraint.mid} {constraint._rhs}\n')
# Variables
# Sort variables
bound_statements = []
bound_free_statements = []
general_statements = []
binary_statements = []
for variable in model.variables.values():
# Integer variables
if not variable.continuous:
if variable.lower_bound == 0 and variable.upper_bound == 1:
binary_statements.append(f' {variable.name}')
# Binary variables need not appear in bounds section
continue
else:
general_statements.append(f' {variable.name}')
# Bounds section
if isinf(variable.lower_bound) and isinf(variable.upper_bound):
bound_free_statements.append(f' {variable.name} free')
else:
bound_statements.append(f' {variable.lower_bound} <= {variable.name} <= {variable.upper_bound}')
fp.write('bounds\n')
fp.write('\n'.join(bound_statements) + '\n')
fp.write('\n'.join(bound_free_statements) + '\n')
fp.write('general\n')
fp.write('\n'.join(general_statements) + '\n')
fp.write('binary\n')
fp.write('\n'.join(binary_statements) + '\n')
fp.write('end\n')
return full_filename
def read_lp_file(model):
#TODO
raise NotImplementedError | StarcoderdataPython |
1710759 | <filename>balancehistory.py
#!/usr/local/bin/python3
"""
Created on 14 Mar 2018
@author: adeelkhan
"""
import argparse
import csv
import datetime
from finance.financedb import FinanceDB
from finance.textreport import TextReport
def load_accounts_from_file(filename):
accounts = dict()
with open(filename, 'rU') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if len(row) == 0:
continue
bank, account, reverse = row
accounts['{} {}'.format(bank, account)] = reverse
return accounts
def main(file_name):
db = FinanceDB()
_ = db.connect('finance.db')
accounts = load_accounts_from_file(file_name)
monthly_balances = db.calculate_monthly_balances()
logger.debug('{} months.'.format(len(monthly_balances)))
for month, balances in sorted(monthly_balances.items(), key=lambda x: x[0]):
# print('{}: {}'.format(month, ', '.join(['{}:{}'.format(a, b) for a, b in balances.items() if b != 0])))
calc_balance = 0
for acc, bal in balances.items():
if acc in accounts:
if accounts[acc] == 'Y':
calc_balance -= bal
else:
calc_balance += bal
rollovers = db.get_rollovers(month)
rollover = sum([r['amount'] for r in rollovers.values()])
rollover2 = sum([r['amount'] for r in rollovers.values() if r['amount'] > 0])
print('{},{:13,.2f},{:13,.2f},{:13,.2f}'.format(month, calc_balance, rollover, rollover2))
db.disconnect()
if __name__ == '__main__':
import logging
from finance import logutil
root_logger = logutil.setup_logging()
logger = logging.getLogger('balancehistory')
parser = argparse.ArgumentParser(description='Show the past months balances.')
parser.add_argument('-f', '--file', metavar='position-filename',
help='the file that defines which accounts to include',
default='metadata/position.csv')
args = parser.parse_args()
# print 'Loading allocations...'
file = args.file
logger.info("File: %s", file)
main(file)
| StarcoderdataPython |
4822145 | from MongoDataSource import *
#from SolrDataSource import * | StarcoderdataPython |
3370484 | <reponame>ur001/sociation_corpus
# coding: utf-8
def print_results(title, results):
print(title)
print ("=" * 20)
for word_name, similarity in results:
print("{:0.3f}\t{}".format(similarity, word_name))
print("")
def query_model_print_results(model, query, count=10):
results = model.get_top_similar_to_words(query.lower().split(','), count=count)
print_results(query, results) | StarcoderdataPython |
3374491 | from flask import render_template, url_for, flash, redirect, request
from steganographer import app, db, bcrypt
from flask_login import login_user, current_user, logout_user, login_required
import secrets, os
from PIL import Image
import PIL, numpy
# FORMS
from steganographer.forms import RegistrationForm, LoginForm, UpdateAccountForm, StegoHide, StegoReveal
# MODELS
from steganographer.models import User, InputInformation, Final_Stego, CovertInput, RevealedInfo
# STEGANOGRAPHER
from stego_proj import Hide, Reveal
# ERRORS
@app.errorhandler(404)
def error_404(error):
return render_template('404.html'), 404
@app.errorhandler(403)
def error_403(error):
return render_template('403.html'), 403
@app.errorhandler(500)
def error_500(error):
return render_template('500.html'), 500
# ROUTES
@app.route('/test')
def test():
return render_template('500.html',title = 'TEST!!!!')
@app.route('/')
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/register', methods = ['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RegistrationForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(username=form.username.data, email=form.email.data, password=<PASSWORD>)
db.session.add(user)
db.session.commit()
flash('Your account has been created!!\nYou can now log in!!', 'success')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@app.route('/login', methods = ['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email = form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember = form.remember.data)
next_page = request.args.get('next')
return redirect(next_page) if next_page else redirect(url_for('home'))
else:
flash('Login Unsuccessful!!\nPlease check email and password!!', 'danger')
return render_template('login.html', title='Login', form=form)
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for('home'))
def save_picture(form_picture):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
picture_path = os.path.join(app.root_path, 'static/profile_pics', picture_fn)
output_size = (125, 125)
i = Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return picture_fn
@app.route("/account-update", methods=['GET', 'POST'])
@login_required
def account():
form = UpdateAccountForm()
if form.validate_on_submit():
if form.picture.data:
picture_file = save_picture(form.picture.data)
current_user.image_file = picture_file
current_user.username = form.username.data
current_user.email = form.email.data
db.session.commit()
flash('Your account has been updated!', 'success')
return redirect(url_for('account'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
image_file = url_for('static', filename='profile_pics/' + current_user.image_file)
return render_template('account.html', title='Account', image_file=image_file, form=form)
def save_img_from_form(form_picture, direc):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
picture_path = os.path.join(app.root_path, 'static/' + str(direc), picture_fn)
i = Image.open(form_picture)
i.save(picture_path)
return picture_fn
def save_img_from_path(form_picture, direc):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture)
picture_fn = random_hex + f_ext
picture_path = os.path.join(app.root_path, 'static/' + str(direc), picture_fn)
i = Image.open(form_picture)
i.save(picture_path)
return picture_fn
@app.route('/hide', methods=['GET', 'POST'])
@login_required
def hide():
form = StegoHide()
if form.validate_on_submit():
inpOvtImg, proc_image, inpHideImg = '0', '0', '0'
if form.inputImage.data:
inpOvtImg = save_img_from_form(form.inputImage.data, 'ovt_to_cvt/inpimg')
dir_inp = str(str(app.root_path) + '/static/ovt_to_cvt/inpimg/')
if form.typeStego.data == 'IMG':
if form.hideImg.data:
inpHideImg = save_img_from_form(form.hideImg.data, 'ovt_to_cvt/hideimg')
else:
flash('Some problem with secret image upload!', 'danger')
dir_hid = str(str(app.root_path) + '/static/ovt_to_cvt/hideimg/')
proc_image = Hide.run_FLASK(str(dir_inp+inpOvtImg),
stego_type=form.typeStego.data,
dataToHide=str(dir_hid+inpHideImg),
lsb_bits=int(form.lsb.data),
resultFileName=form.fileName.data)
proc_image = save_img_from_path(proc_image, 'ovt_to_cvt/otpimg')
else:
proc_image = Hide.run_FLASK(str(dir_inp+inpOvtImg), stego_type=form.typeStego.data,
dataToHide=form.hideText.data,
lsb_bits=int(form.lsb.data),
resultFileName=form.fileName.data)
proc_image = save_img_from_path(proc_image, 'ovt_to_cvt/otpimg')
inp_stuff = InputInformation(inputImage=inpOvtImg,
typeStego=form.typeStego.data,
hideText=form.hideText.data, hideImg=inpHideImg,
lsb=int(form.lsb.data), fileName=form.fileName.data,
hidder=current_user)
db.session.add(inp_stuff)
inp_extra = Final_Stego(covertImg=proc_image, mainInfo=inp_stuff)
db.session.add(inp_extra)
db.session.commit()
flash('Your secret has been hidden!', 'success')
inp_id = inp_stuff.id
return redirect(url_for('hide_success', inp_id=inp_id))
else:
flash('Some problem with overt file upload!', 'danger')
return render_template('hide.html', title='Hide', form=form)
@app.route('/hide_success/<int:inp_id>/')
@login_required
def hide_success(inp_id):
INP = InputInformation.query.filter_by(id = inp_id).first_or_404()
OTP = Final_Stego.query.filter_by(mainInfo=INP).first_or_404()
return render_template('hide_success.html', INP=INP, OTP=OTP)
@app.route('/reveal', methods=['GET', 'POST'])
@login_required
def reveal():
form = StegoReveal()
if form.validate_on_submit():
inpCvtImg, rvldimg, rvlmsg, inp_extra = '0', '0', '0', '0'
if form.rvlimage.data:
inpCvtImg = save_img_from_form(form.rvlimage.data, 'cvt_to_ovt/inpimg')
dir_inp = str(str(app.root_path) + '/static/cvt_to_ovt/inpimg/')
inp_stuff = CovertInput(rvlimage=inpCvtImg, typeStego=form.typeStego.data,
lsb=form.lsb.data, revealer=current_user,
fileNameIfImage=form.hiddenFileName.data )
if form.typeStego.data == 'IMG':
rvldimg = Reveal.run_FLASK(str(dir_inp+inpCvtImg), stego_type=form.typeStego.data,
resultFileName=form.hiddenFileName.data,
lsb_bits=int(form.lsb.data))
rvldimg = save_img_from_path(rvldimg, 'cvt_to_ovt/rvldimg')
inp_extra = RevealedInfo(rvlImg=rvldimg, mainInfo=inp_stuff)
else:
rvlmsg = Reveal.run_FLASK(str(dir_inp+inpCvtImg), stego_type=form.typeStego.data,
lsb_bits=int(form.lsb.data))
inp_extra = RevealedInfo(rvlMsg=rvlmsg, mainInfo=inp_stuff)
db.session.add(inp_stuff)
db.session.add(inp_extra)
db.session.commit()
inp_id = inp_stuff.id
flash('Your secrets are now open...', 'success')
return redirect(url_for('reveal_success', inp_id=inp_id))
return render_template('reveal.html', title='Reveal', form=form)
@app.route('/reveal_success/<int:inp_id>/')
@login_required
def reveal_success(inp_id):
INP = CovertInput.query.filter_by(id = inp_id).first_or_404()
OTP = RevealedInfo.query.filter_by(mainInfo=INP).first_or_404()
return render_template('reveal_success.html', INP=INP, OTP=OTP)
@app.route('/profile/<string:username>')
@login_required
def profile(username):
user = User.query.filter_by(username=username).first_or_404()
hide_list = []
HIDES = InputInformation.query.filter_by(hidder=user)
for pic in HIDES:
hide_list.append(Final_Stego.query.filter_by(mainInfo=pic).first())
return render_template('profile.html', user=user, HIDES=HIDES,
hide_list=hide_list) | StarcoderdataPython |
1729713 | <reponame>ovv/sir-bot-a-lot
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
from pathlib import Path
import sys
from setuptools import setup, convert_path
if sys.version_info < (3, 5):
raise RuntimeError('SirBot requires Python 3.5+')
def load_package_meta():
meta_path = convert_path('./sirbot/core/__meta__.py')
meta_ns = {}
with open(meta_path) as f:
exec(f.read(), meta_ns)
return meta_ns['DATA']
PKG_META = load_package_meta()
def parse_reqs(req_path='./requirements/requirements.txt'):
"""Recursively parse requirements from nested pip files."""
install_requires = []
with codecs.open(req_path, 'r') as handle:
# remove comments and empty lines
lines = (line.strip() for line in handle
if line.strip() and not line.startswith('#'))
for line in lines:
# check for nested requirements files
if line.startswith('-r'):
# recursively call this function
install_requires += parse_reqs(req_path=line[3:])
else:
# add the line as a new requirement
install_requires.append(line)
return install_requires
def parse_readme():
"""Parse contents of the README."""
# Get the long description from the relevant file
readme_file = str(Path(__file__).parent / 'README.rst')
with codecs.open(readme_file, encoding='utf-8') as handle:
long_description = handle.read()
return long_description
setup(
long_description=parse_readme(),
keywords=[
'sirbot',
'chatbot',
'bot',
'slack',
],
packages=[
'sirbot',
'sirbot.core',
'sirbot.utils',
'sirbot.cli',
'sirbot.registry'
],
package_dir={
'sirbot': 'sirbot',
'sirbot.core': 'sirbot/core',
'sirbot.utils': 'sirbot/utils',
'sirbot.cli': 'sirbot/cli',
'sirbot.registry': 'sirbot/registry'
},
package_data={
'sirbot.core': ['config.yml'],
'sirbot.cli': ['sirbot.yml.mako', 'plugin.py.mako']
},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and
# allow pip to create the appropriate form of executable for the
# target platform.
# entry_points={
# 'console_scripts': [
# 'sirbot=sirbot.cli:main'
# ]
# },
# If there are data files included in your packages that need to be
# installed, specify them here.
entry_points={
'console_scripts': [
'sirbot=sirbot.cli:main'
]
},
include_package_data=True,
install_requires=parse_reqs('./requirements/requirements.txt'),
python_requires='~=3.5',
zip_safe=False,
tests_require=[
'pytest-runner',
'pytest-cov',
'pytest-aiohttp',
'pytest',
],
extras_require={
'dev': parse_reqs('./requirements/requirements_dev.txt')
},
# See: http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Environment :: Console',
],
author=PKG_META['author'],
# docker_name=PKG_META['docker_name'],
# docker_tag=PKG_META['docker_tag'],
author_email=PKG_META['author_email'],
# copyright=PKG_META['copyright'],
description=PKG_META['description'],
license=PKG_META['license'],
name=PKG_META['name'],
url=PKG_META['url'],
version=PKG_META['version'],
maintainer="pythondev slack community",
maintainer_email=PKG_META['author_email']
)
| StarcoderdataPython |
3349331 | <gh_stars>1-10
import bleach
from django.apps import apps
from django.conf import settings
from django.template import Library
from django.template.exceptions import TemplateDoesNotExist
from django.template.loader import get_template
from django.utils.module_loading import import_string
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
app_templates_cache = {}
register = Library()
@register.simple_tag
def appearance_app_templates(template_name):
result = []
for app in apps.get_app_configs():
template_id = '{}.{}'.format(app.label, template_name)
if settings.DEBUG or template_id not in app_templates_cache:
try:
app_templates_cache[template_id] = get_template(
'{}/app/{}.html'.format(app.label, template_name)
).render()
except TemplateDoesNotExist:
"""Non fatal"""
app_templates_cache[template_id] = ''
result.append(app_templates_cache[template_id])
return mark_safe('\n'.join(result))
@register.filter
def appearance_get_choice_value(field):
try:
return dict(field.field.choices)[field.value()]
except TypeError:
return ', '.join([subwidget.data['label'] for subwidget in field.subwidgets if subwidget.data['selected']])
except KeyError:
return _('None')
@register.filter
def appearance_get_form_media_js(form):
return [form.media.absolute_path(path) for path in form.media._js]
@register.simple_tag
def appearance_get_icon(icon_path):
return import_string(dotted_path=icon_path).render()
@register.simple_tag
def appearance_get_user_theme_stylesheet(user):
if user and user.is_authenticated:
theme = user.theme_settings.theme
if theme:
return bleach.clean(
text=user.theme_settings.theme.stylesheet,
tags=('style',)
)
return ''
@register.simple_tag
def appearance_icon_render(icon_class, enable_shadow=False):
return icon_class.render(extra_context={'enable_shadow': enable_shadow})
@register.filter
def appearance_object_list_count(object_list):
try:
return object_list.count()
except TypeError:
return len(object_list)
| StarcoderdataPython |
3291568 | <filename>src/lib/datasets/sample/pano_det.py<gh_stars>0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import numpy as np
import pandas as pd
import torch
import json
import cv2
import os
from utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
import math
class PanoDataset(data.Dataset):
def is_true(self, s):
if 'T' in s or 't' in s:
return True
return False
def pano_center_crop_and_resize(self, img):
h, w = img.shape
new_w = int(h * 1.5)
margin = (w - new_w) // 2
eq = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(20, 20))
img = eq.apply(img)
img = img[:, margin: margin + new_w]
img = cv2.resize(img, dsize=(self.default_resolution[1], self.default_resolution[0]))
img = (np.float64(img) - np.mean(img)) / np.std(img)
return img
def change_coords(self, x, y, H, W):
new_w = int(H * 1.5)
margin = (W - new_w) // 2
x -= margin
x /= new_w
y /= H
return (x, y)
def process_anno(self, anno_file_name, H, W):
w = W // 2
h = H // 2
annos = []
df = pd.read_csv(os.path.join(self.data_dir, anno_file_name), header=None)
for idx, row in df.iterrows():
if self.num_classes == 1 and not self.is_true(row[1]):
continue
tooth_num = int(row[0])
tooth_class = (tooth_num // 10) * 8 + tooth_num % 10 - 9
x_max = y_max = -math.inf
x_min = y_min = math.inf
j = 3
while j < 19:
x = int(row[j]) + w
y = int(row[j+1]) + h
j += 2
x_max = max(x_max, x)
y_max = max(y_max, y)
x_min = min(x_min, x)
y_min = min(y_min, y)
x_center = (x_min + x_max) // 2
y_center = (y_min + y_max) // 2
x_alveolar = w + int(row[27])
y_alveolar = h + int(row[28])
x_crown = w + (int(row[3]) + int(row[17])) // 2
y_crown = h + (int(row[4]) + int(row[18])) // 2
x_root = w + (int(row[9]) + int(row[11])) // 2
y_root = h + (int(row[10]) + int(row[12])) // 2
tooth_width = (x_max - x_min) / (H * 1.5)
tooth_height = (y_max - y_min) / H
x_center, y_center = self.change_coords(x_center, y_center, H, W)
x_alveolar, y_alveolar = self.change_coords(x_alveolar, y_alveolar, H, W)
x_crown, y_crown = self.change_coords(x_crown, y_crown, H, W)
x_root, y_root = self.change_coords(x_root, y_root, H, W)
annos.append({
'tooth_class': tooth_class,
'tooth_size': (tooth_width, tooth_height),
'extreme_points': [
[x_center, y_center],
[x_alveolar, y_alveolar],
[x_crown, y_crown],
[x_root, y_root]
]
})
return annos
def __getitem__(self, index):
img_file_name = self.img_file_names[index]
img_path = os.path.join(self.data_dir, img_file_name)
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
H, W = img.shape
inp = self.pano_center_crop_and_resize(img)
inp = np.expand_dims(inp, 0)
output_h = self.opt.output_h
output_w = self.opt.output_w
hm_center = np.zeros((self.num_classes, output_h, output_w), dtype=np.float32)
ind_center = np.zeros((self.max_objs), dtype=np.int64)
tooth_wh = np.zeros((self.max_objs, 2), dtype=np.float32)
reg_mask = np.zeros((self.max_objs), dtype=np.uint8)
if not self.opt.test:
anno_file_name = img_file_name[:-3] + 'txt'
annos = self.process_anno(anno_file_name, H, W)
num_objs = min(len(annos), self.max_objs)
draw_gaussian = draw_umich_gaussian
for k in range(num_objs):
anno = annos[k]
cls_id = anno['tooth_class']
pts = np.array(anno['extreme_points'], dtype=np.float32) * [output_w, output_h]
pt_int = pts.astype(np.int32)
tooth_w, tooth_h = anno['tooth_size']
tooth_wh[k] = tooth_w, tooth_h
radius = gaussian_radius((math.ceil(tooth_h * output_h), math.ceil(tooth_w * output_w)))
radius = max(0, int(radius))
if self.num_classes == 1:
draw_gaussian(hm_center[0], pt_int[0], radius)
else:
draw_gaussian(hm_center[cls_id], pt_int[0], radius)
ind_center[k] = pt_int[0, 1] * output_w + pt_int[0, 0]
reg_mask[k] = 1
ret = {
'input': inp,
'img_id': img_file_name[:-4],
'original_wh': (W, H),
'hm_center': hm_center,
'reg_mask': reg_mask,
'tooth_wh': tooth_wh,
'ind_center': ind_center
}
return ret
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.