id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
1667201 | #====================================================
# text_reply.py
## decide the response according to the input text
# YIHAN LINE BOT
# Created by <NAME> on May 21, 2021.
# Copyright © 2021 <NAME>. All rights reserved.
#====================================================
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import *
import json
import requests
import re
import os, sys
#---------------- custom module ----------------
import text_push as text_push
import RSSfeed as RSSfeed
import tools as tools
import bot_functions as bot_functions
import user_db_manipulate as user_db_manipulate
from config import *
#---------------- global variables ----------------
# key words for detecting what action is it
action_key_word = [".*文章列表.*", ".*查看追蹤列表.*", ".*取消追蹤.*"]
#---------------------------------------------------
def text_reply_message(user_message, userId):
#---------------------- Info recording ---------------------
## website name = crawling: <title>
## site url = user_message
## article titles = web_info[nth article]['title']
## published dates = web_info[nth article]['published']
## articles' urls = web_info[nth article]['links'][0]['href']
## image to show = crawling: <og:image> || <icon>
#-----------------------------------------------------------
return_message_array = []
repeat_tracker = False
# get user's data(DB)
with open("./json/userDB/"+userId+".json", "r") as data:
userData = json.load(data)
try:
### 加入追蹤 Add_new_tracker
if requests.get( user_message ).status_code == 200:
if len(userData["tracker_list"]) == 0:
return_message_array = bot_functions.add_new_tracker( user_message, userId )
else:
# detect if the URL has been already added to the tracker list
for element in userData["tracker_list"]:
if element["web_url"] == user_message:
# remind the user that he/she has already track the URL
return_message_array.append( TextSendMessage(text="這個網誌您已有追蹤囉!") )
### show tracker_list (carousel)
FlexMessage = bot_functions.show_tracker_list( userId )
return_message_array.append( FlexSendMessage( 'trackers', FlexMessage ) )
repeat_tracker = True
break
# if not find, then add new tracker
if repeat_tracker == False:
return_message_array = bot_functions.add_new_tracker( user_message, userId )
# if the user is in "tutorial status", then also reply the guiding text
if (userData["status"] == "tutorial"):
return_message_array.append( TextSendMessage(text="已成功將網誌加入追蹤!") )
return_message_array.append( TextSendMessage(text="請按上上則訊息中的「按我看文章列表」以查看最新文章") )
except requests.exceptions.RequestException as e: # typeof(URL) != URL
### 查看最新文章 Show_articles_card =================================
if( tools.analyze_text(user_message, action_key_word[0]) ):
# split the "user_message" to get which website that user want to get
### split user_message
str_array = user_message.split('#')
web_name = str_array[0]
# call function to get articles' cards
return_message_array = bot_functions.show_articles_card( web_name, userId )
# if the user is in "tutorial status", then also reply the guiding text
if (userData["status"] == "tutorial"):
return_message_array.append( TextSendMessage(text="成功看到這個網誌的最新文章列表囉!") )
return_message_array.append( TextSendMessage(text="請按以下按鈕以查看追蹤清單",
quick_reply=QuickReply(items=[
QuickReplyButton(
action=MessageAction(
label="查看追蹤列表",
text="查看追蹤列表"))
])) )
### 查看追蹤列表 Show_tracker_list =================================
elif( tools.analyze_text(user_message, action_key_word[1]) ):
if len(userData["tracker_list"]) == 0:
return_message_array.append( TextSendMessage(text="還沒有追蹤任何文章唷,現在就開始追蹤吧~") )
return_message_array.append( TextSendMessage(text="請以鍵盤輸入想追蹤的網誌URL:") )
else:
### show tracker_list (carousel)
FlexMessage = bot_functions.show_tracker_list( userId )
return_message_array.append( FlexSendMessage( 'trackers', FlexMessage ) )
# if the user is in "tutorial status", then also reply the guiding text
if (userData["status"] == "tutorial"):
return_message_array.append( TextSendMessage(text="成功看到列表了!以上這些就是您目前已追蹤的網誌唷~") )
return_message_array.append( TextSendMessage(text="請按上則訊息中的「取消追蹤」以取消追蹤此網誌") )
### 取消追蹤 Delete_tracker =================================
elif( tools.analyze_text(user_message, action_key_word[2]) ):
### split "web_name" string from user_message
str_array = user_message.split('#')
web_name = str_array[1]
bot_functions.delete_tracker( userId, web_name )
# if the user is in "tutorial status", then also reply the guiding text
if (userData["status"] == "tutorial"):
user_db_manipulate.modify_db(userId, "status", "general") # Finish tutorial
return_message_array.append( TextSendMessage(text="已成功刪除一個追蹤項目!") )
return_message_array.append( TextSendMessage(text="恭喜呀~您已完成試用!現在,試著加入自己想追蹤的網誌吧~") )
else:
return_message_array.append( TextSendMessage(text="已成功刪除一個追蹤項目!",
quick_reply=QuickReply(items=[
QuickReplyButton(
action=MessageAction(
label="查看追蹤列表",
text="查看追蹤列表")),
])) )
### 不認識的指令 Exception Handler
else:
return_message_array.append( TextSendMessage(text="咦這個指令沒看過耶🤔"))
return_message_array.append( TextSendMessage(text="請點選以下指令、或直接輸入網址唷!",
quick_reply=QuickReply(items=[
QuickReplyButton(
action=MessageAction(
label="查看追蹤列表",
text="查看追蹤列表")),
])) )
return return_message_array # because the amount of reply sometimes > 1, so return the array type
# ### Add_new_tracker
# if( requests.get(user_message).status_code == 200 ):
# # user send new URL
# return_message_array = bot_functions.add_new_tracker( user_message )
# # if the user is in "tutorial status", then also reply the guiding text
# if (userData["status"] == "tutorial"):
# return_message_array.append( TextSendMessage(text="已成功將網誌加入追蹤!請按上則訊息中的「按我看文章列表」以查看最新文章 ") )
| StarcoderdataPython |
3313286 | <filename>sphinx_js/nodes.py
from docutils import nodes
from docutils.nodes import Node
class automodulestoctree(nodes.comment):
pass
def automodules_noop(self: nodes.NodeVisitor, node: Node) -> None:
pass
def automodules_toc_visit_html(self: nodes.NodeVisitor, node: automodulestoctree) -> None:
"""Hide automodules toctree list in HTML output."""
raise nodes.SkipNode
| StarcoderdataPython |
3215711 | from floodsystem.stationdata import build_station_list as stations
def test_1D():
#Task 1D (1) Produce a list of rivers with stations without reiteration and in alphabetical order (SZ)
def rivers_with_station(stations):
"""takes station object list.
returns a set of rivers which have stations."""
set_rivers = set()
for i in stations:
set_rivers.add(i.river)
assert type(set_rivers) == type(set)
return sorted(set_rivers)
# Task 1D (2) (EE)
def stations_by_river(stations):
"""takes list of station objects
returns dictionary of format {river: list of stations on this river}"""
stations_by_river_dictionary = {}
# if station is in dictionary, add to to appropriate list, else create new key and item
for object in stations:
if object.river in stations_by_river_dictionary:
stations_by_river_dictionary[object.river].append(object)
else:
stations_by_river_dictionary[object.river] = [object]
assert type(stations_by_river_dictionary) == type(dict)
return stations_by_river_dictionary
| StarcoderdataPython |
34970 | <reponame>codernayeem/python-cheat-sheet
# Functions
print("************* Function ***********")
# Simple function without any arguments/parameters
def say_welocme():
return print('Welocme')
# Simple function with arguments/parameters
def say_helo(name, age):
print('Helo', name, age)
# this function returns None
say_helo('Nayeem', 18) # passing args as positional args
say_helo(age=19, name='Sami') # passing args as keyword args (if you mismatch the serial, use keywords)
def check_odd_number(n):
return True if n % 2 else False
if check_odd_number(43):
print(43, " is a odd number")
print("********* Default parameter **********")
# Simple function with a default arguments/parameters
def say_somethings(name, message="Welcome"):
print(message, name)
# Type hint:
print("********* Type hint **********")
def greeting(name: str) -> str:
# Type hints improve IDEs and linters. They make it much easier to statically reason about your code
# The Python runtime does not enforce function and variable type annotations. They can be used by third party tools such as type checkers, IDEs, linters, etc
# here we defined name should be str and a str will be returned
return 'Hello ' + name
greeting("Nayeem")
# scope
print("************ Scope *************")
parent_name = "Anything" # this is a global variable
def show_parent1():
print(parent_name) # this will print the global variable
def show_parent2():
parent_name = "Lovely" # this will not change global variable. it will create a new local variable
print(parent_name) # print local variable
def show_parent3():
# we can use global variable in function
# but cannot modify them directly
# TO modify:
# method 1:
global parent_name
parent_name = "Something" # this will change the global variable
print(parent_name)
# method 2:
globals()['parent_name'] = "Something_Nothing" # this will change the global variable
print(globals()['parent_name'])
def show_parent4(parent_name):
print(parent_name) # this parent_name is a local variable
# to use the global variable here
print(globals()['parent_name']) # this will print the global variable, not the local one
# A variable can not be both : parameter and global
# So you can not do that here:
# global parent_name
# print(parent_name)
show_parent1()
show_parent2()
show_parent3()
show_parent4("Long Lasting")
l1 = [56, 87, 89, 45, 57]
d1 = {'Karim': 50, 'Rafiq': 90, 'Sabbir': 60}
# Lambda function
print("************ Lambda function *************")
# lambda function is just a one line simple anonymous function.
# It's defination ==> lambda parameter_list: expression
# lambda function is used when we need a function once and as a argument to another function
print(min(d1.items(), key=lambda item: item[1])) # returns the smallest element
# Python built-in functions/methods
print("************ Some Built-in functions *************")
print(len(l1)) # returns the length of that iterable
print(sum(l1)) # return the sum of an iterable
print(max(l1)) # returns the biggext element
print(min(l1)) # returns the smallest element
print(max(d1, key=lambda k: d1[k])) # returns the biggext element
print(min(d1.items(), key=lambda item: item[1])) # returns the smallest element
print(all([0, 1, 5])) # returns True if all the elements is True, otherwise False
print(any([0, 1, 5])) # returns True if any of the elements is True, otherwise False
print(repr('hi')) # call __repr__() for that object. Represent object
print(id(l1)) # returns a unique integer number which represents identity
print(type(56)) # returns the class type of that object
print(dir(567)) # Returns a list of the specified object's properties and methods
print(ord('A')) # 65 : Return the Unicode code point for a one-character string
print(chr(65)) # 'A' : Return a Unicode string of one character with ordina
print(abs(-62)) # 62 : Return a absolute value of a number
eval('print("hi")') # Evaluates and executes an expression
print(eval('(58*9)+3**2')) # Evaluates and executes an expression
print("************ All Built-in functions *************")
# abs() Returns the absolute value of a number
# all() Returns True if all items in an iterable object are true
# any() Returns True if any item in an iterable object is true
# ascii() Returns a readable version of an object. Replaces none-ascii characters with escape character
# bin() Returns the binary version of a number
# bool() Returns the boolean value of the specified object
# bytearray() Returns an array of bytes
# bytes() Returns a bytes object
# callable() Returns True if the specified object is callable, otherwise False
# chr() Returns a character from the specified Unicode code.
# classmethod() Converts a method into a class method
# compile() Returns the specified source as an object, ready to be executed
# complex() Returns a complex number
# delattr() Deletes the specified attribute (property or method) from the specified object
# dict() Returns a dictionary (Array)
# dir() Returns a list of the specified object's properties and methods
# divmod() Returns the quotient and the remainder when argument1 is divided by argument2
# enumerate() Takes a collection (e.g. a tuple) and returns it as an enumerate object
# eval() Evaluates and executes an expression
# exec() Executes the specified code (or object)
# filter() Use a filter function to exclude items in an iterable object
# float() Returns a floating point number
# format() Formats a specified value
# frozenset() Returns a frozenset object
# getattr() Returns the value of the specified attribute (property or method)
# globals() Returns the current global symbol table as a dictionary
# hasattr() Returns True if the specified object has the specified attribute (property/method)
# hash() Returns the hash value of a specified object
# help() Executes the built-in help system
# hex() Converts a number into a hexadecimal value
# id() Returns the id of an object
# input() Allowing user input
# int() Returns an integer number
# isinstance() Returns True if a specified object is an instance of a specified object
# issubclass() Returns True if a specified class is a subclass of a specified object
# iter() Returns an iterator object
# len() Returns the length of an object
# list() Returns a list
# locals() Returns an updated dictionary of the current local symbol table
# map() Returns the specified iterator with the specified function applied to each item
# max() Returns the largest item in an iterable
# memoryview() Returns a memory view object
# min() Returns the smallest item in an iterable
# next() Returns the next item in an iterable
# object() Returns a new object
# oct() Converts a number into an octal
# open() Opens a file and returns a file object
# ord() Convert an integer representing the Unicode of the specified character
# pow() Returns the value of x to the power of y
# print() Prints to the standard output device
# property() Gets, sets, deletes a property
# range() Returns a sequence of numbers, starting from 0 and increments by 1 (by default)
# repr() Returns a readable version of an object
# reversed() Returns a reversed iterator
# round() Rounds a numbers
# set() Returns a new set object
# setattr() Sets an attribute (property/method) of an object
# slice() Returns a slice object
# sorted() Returns a sorted list
# @staticmethod() Converts a method into a static method
# str() Returns a string object
# sum() Sums the items of an iterator
# super() Returns an object that represents the parent class
# tuple() Returns a tuple
# type() Returns the type of an object
# vars() Returns the __dict__ property of an object
# zip() Returns an iterator, from two or more iterators
# Decorators
print('*********** Decorators ************')
from functools import wraps
def star(func):
def inner(*args, **kwargs):
print("*" * 30)
func(*args, **kwargs)
print("*" * 30)
return inner
@star
def printer1(msg):
print(msg)
def percent(func):
def inner(*args, **kwargs):
print("%" * 30)
func(*args, **kwargs)
print("%" * 30)
return inner
@star
@percent
def printer2(msg):
print(msg)
printer1("Hello")
printer2("Hello")
# Function caching
print('*********** Function caching ************')
import time
from functools import lru_cache
@lru_cache(maxsize=32)
def some_work(n):
time.sleep(3)
return n * 2
print('Running work')
some_work(5)
print('Calling again ..')
some_work(9) # tihs time, this run immedietly
print('finished')
# Coroutines
print('*********** Coroutines ************')
import time
def searcher():
time.sleep(3)
book = "Tihs is ok"
while True:
text = (yield) # this means its a Coroutine function
if text in book:
print(f'"{text}" found')
else:
print(f'"{text}" not found')
search = searcher()
next(search) # this runs until that while loop
search.send('ok')
print('Going for next')
search.send('okk')
print('Going for next')
search.send('is')
print('Finished')
search.close()
| StarcoderdataPython |
3393682 | from slack_sms_gw.config import (
LoggingConfig,
SlackConfig
)
from slack_sms_gw.slack.client import SlackClient
from requests import Response, PreparedRequest
from requests.structures import CaseInsensitiveDict
class SlackClientHelper:
def __init__(self, log_config: LoggingConfig, config: SlackConfig):
self.log_config = log_config
self.config = config
self.slack_client = SlackClient(
log_config=self.log_config,
config=self.config,
)
@staticmethod
def mock_send_ok(request: PreparedRequest) -> Response:
resp = Response()
resp.status_code = 200
resp.url = request.url
headers = CaseInsensitiveDict()
headers["content-type"] = "text/html"
headers["content-encoding"] = "gzip"
resp.encoding = headers["content-encoding"]
resp.headers = headers
resp._content = b"ok"
return resp
| StarcoderdataPython |
3335221 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('genealogio', '0018_person_last_name_current'),
]
operations = [
migrations.AlterField(
model_name='timelineitem',
name='families',
field=models.ManyToManyField(help_text='Sind hier Familien ausgew\xe4hlt, so wird der Eintrag nur bei den ausgew\xe4hlten Familien angezeigt, sonst bei allen Familien', to='genealogio.Family', verbose_name='Familien', blank=True),
preserve_default=True,
),
]
| StarcoderdataPython |
42988 | <gh_stars>0
from datadog import initialize, api
options = {
'api_key': 'api_key',
'app_key': 'app_key'
}
initialize(**options)
start_time = 1419436850
end_time = 1419436870
api.Event.query(start=start_time, end=end_time, priority="normal", tags=["application:web"])
| StarcoderdataPython |
3212434 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
##-------- [PPC] Jobshop Scheduling ---------
# * Author: <NAME>
# * Date: Apr 30th, 2020
# * Description:
# Using the event-driven scheuling method
# to solve the JSS prob. Here is a sample
# code with the style of OOP. Feel free to
# modify it as you like.
##--------------------------------------------
#
import os
import numpy as np
import pandas as pd
from gantt_plot import Gantt
#entity
class Order:
def __init__(self, ID, AT, DD, routing, PT):
self.ID = ID
self.AT = AT #AT: arrival time
self.DD = DD #DD: due date
self.PT = PT #PT: processing time
self.routing = routing
self.progress = 0
#resource in factory
class Source:
def __init__(self, order_info):
self.order_info = order_info
self.output = 0
def arrival_event(self, fac):
raise NotImplementedError
class Machine:
def __init__(self, ID, DP_rule):
self.ID = ID
self.state = 'idle'
self.buffer = []
self.wspace = [] #wspace: working space
self.DP_rule = DP_rule
def start_processing(self, fac):
raise NotImplementedError
def end_process_event(self, fac):
raise NotImplementedError
class Factory:
def __init__(self, order_info, DP_rule):
self.order_info = order_info
self.DP_rule = DP_rule
self.event_lst = pd.DataFrame(columns=["event_type", "time"])
#statistics
self.throughput = 0
self.order_statistic = pd.DataFrame(columns = ["ID", "release_time",
"complete_time", "due_date",
"flow_time", "lateness",
"tardiness"])
#[Plug in] tool of gantt plotting
self.gantt_plot = Gantt()
#build ur custom factory
self.__build__()
def __build__(self):
raise NotImplementedError
def initialize(self, order_info):
raise NotImplementedError
def next_event(self, stop_time):
raise NotImplementedError
def event(self, event_type):
raise NotImplementedError
def update_order_statistic(self, order):
raise NotImplementedError
# some parameters
M = float('inf')
LOG = True
stop_time = 500
if __name__ == '__main__':
#read the input data sheet
data_dir = os.getcwd() + "/data/"
order_info = pd.read_excel(data_dir + "order_information.xlsx")
#data preprocessing
order_info = order_info.sort_values(['arrival_time']).reset_index(drop=True)
DP_rule = 'SPT' #'EDD'
#build the factory
fac = Factory(order_info, DP_rule)
fac.build()
#start the simulation
fac.next_event(stop_time)
#output result
print(fac.order_statistic)
fac.gantt_plot.draw_gantt()
| StarcoderdataPython |
65967 | <reponame>kainstan/stealer
import re
from typing import Optional
from django.http import HttpResponse
from core.interface import Service
from core.model import Result, ErrorResult
from tools import http_utils
from core import config
from core.type import Video
headers = {
"user-agent": config.user_agent
}
info_headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": config.user_agent
}
download_headers = {
"accept": "*/*",
"accept-encoding": "identity;q=1, *;q=0",
"accept-language": "zh-CN,zh;q=0.9,ja;q=0.8,en;q=0.7,zh-TW;q=0.6,de;q=0.5,fr;q=0.4,ca;q=0.3,ga;q=0.2",
"range": "bytes=0-",
"sec-fetch-dest": "video",
"sec-fetch-mode": "no-cors",
"sec-fetch-site": "cross-sit",
"user-agent": config.user_agent
}
vtype = Video.HUOSHAN
class HuoshanService(Service):
@classmethod
def get_prefix_pattern(cls) -> str:
return 'com/hotsoon/s\/'
@classmethod
def make_url(cls, index) -> str:
return 'https://share.huoshan.com/hotsoon/s/' + index
@classmethod
def index(cls, url) -> Optional[str]:
index = re.findall(r'(?<=s\/)\w+', url)
try:
return index[0]
except IndexError:
return None
@classmethod
def fetch(cls, url: str, mode=0) -> Result:
url = cls.get_url(url)
if url is None:
return ErrorResult.URL_NOT_INCORRECT
# 请求短链接,获得itemId
res = http_utils.get(url, header=headers)
if http_utils.is_error(res):
return Result.error(res)
try:
item_id = re.findall(r"(?<=item_id=)\d+(?=\&)", res.url)[0]
except IndexError:
return Result.failed(res.reason)
# 视频信息链接
infourl = "https://share.huoshan.com/api/item/info?item_id=" + item_id
# 请求长链接,获取play_addr
url_res = http_utils.get(infourl, header=info_headers)
if http_utils.is_error(url_res):
return Result.error(url_res)
vhtml = str(url_res.text)
try:
video_id = re.findall(r'(?<=video_id\=)\w+(?=\&)', vhtml)[0]
except IndexError:
return Result.failed(url_res.reason)
if not video_id:
return ErrorResult.VIDEO_ADDRESS_NOT_FOUNT
link = "https://api.huoshan.com/hotsoon/item/video/_source/?video_id=" + video_id + "&line=0&app_id=0&vquality=normal"
result = Result.success(link)
if mode != 0:
result.ref = res.url
return result
@classmethod
def download(cls, url) -> HttpResponse:
return cls.proxy_download(vtype, url, download_headers)
if __name__ == '__main__':
HuoshanService.fetch('http://share.huoshan.com/hotsoon/s/eVDEDNYXu78')
| StarcoderdataPython |
181360 | <reponame>ADrozdova/ASR
import random
import librosa as lr
import torch
from torch import Tensor
from hw_asr.augmentations.base import AugmentationBase
class PitchShift(AugmentationBase):
def __init__(self, **kwargs):
self.steps = kwargs.get("steps")
self.sampling_rate = kwargs.get("sampling_rate", 16000)
def __call__(self, data: Tensor):
n_steps = float(random.randint(-self.steps, self.steps))
data = data.squeeze(0).numpy()
return torch.from_numpy(lr.effects.pitch_shift(data, self.sampling_rate, n_steps=n_steps)).unsqueeze(0)
| StarcoderdataPython |
1694662 | # Generated by Django 3.0 on 2020-03-16 19:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('administration', '0020_auto_20200316_1325'),
]
operations = [
migrations.CreateModel(
name='Variable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(help_text='key of the key-value pair', max_length=255, unique=True, verbose_name='key')),
('value', models.TextField(help_text='value of the key-value pair', verbose_name='value')),
],
options={
'verbose_name': 'variable',
'verbose_name_plural': 'variables',
},
),
]
| StarcoderdataPython |
3364807 | import tensorflow as tf
import tensorflow.keras as K
import tensorflow_probability as tfp
class UNet(K.Model):
def __init__(self,
base_channels=64,
fixed_size=False,
in_channels=3,
in_size=(512, 512),
classes=21,
aux=False,
variational=False,
activation='relu',
**kwargs):
super().__init__(**kwargs)
assert (in_channels > 0)
assert ((in_size[0] % 32 == 0) and (in_size[1] % 32 == 0))
self.base_channels = base_channels
self.in_size = in_size
self.classes = classes
self.fixed_size = fixed_size
self.in_conv = K.layers.Conv2D(base_channels,
3,
activation=activation,
padding='same',
kernel_initializer='he_normal')
self.conv1 = [K.layers.Conv2D(base_channels,
3,
activation=activation,
padding='same',
kernel_initializer='he_normal') for _ in range(3)]
self.conv2 = [K.layers.Conv2D(base_channels * 2,
3,
activation=activation,
padding='same',
kernel_initializer='he_normal') for _ in range(4)]
self.conv3 = [K.layers.Conv2D(base_channels * 4,
3,
activation=activation,
padding='same',
kernel_initializer='he_normal') for _ in range(4)]
self.conv4 = [K.layers.Conv2D(base_channels * 8,
3,
activation=activation,
padding='same',
kernel_initializer='he_normal') for _ in range(4)]
if not variational:
self.conv5 = [K.layers.Conv2D(base_channels * 16,
3,
activation=activation,
padding='same',
kernel_initializer='he_normal') for _ in range(2)]
else:
self.conv5 = [tfp.layers.Convolution2DReparameterization(base_channels * 16,
3,
activation=activation,
padding='same'),
K.layers.Conv2D(base_channels * 16,
3,
activation=activation,
padding='same',
kernel_initializer='he_normal')
]
self.dropout1 = K.layers.Dropout(0.1)
self.dropout2 = K.layers.Dropout(0.2)
self.dropout3 = K.layers.Dropout(0.3)
self.pool = K.layers.MaxPool2D(pool_size=(2, 2))
self.bn = []
for _ in range(18):
self.bn.append(K.layers.BatchNormalization())
self.up6 = K.layers.Conv2DTranspose(base_channels * 8, (3, 3), strides=(2, 2), padding="same")
self.up7 = K.layers.Conv2DTranspose(base_channels * 4, (3, 3), strides=(2, 2), padding="same")
self.up8 = K.layers.Conv2DTranspose(base_channels * 2, (3, 3), strides=(2, 2), padding="same")
self.up9 = K.layers.Conv2DTranspose(base_channels, (3, 3), strides=(2, 2), padding="same")
self.conv10 = K.layers.Conv2D(classes, 1, strides=1, padding="same")
def call(self, inputs, training=None, mask=None):
c1 = self.in_conv(inputs)
c1 = self.bn[0](c1, training=training)
c1 = self.dropout1(c1)
c1 = self.conv1[0](c1)
c1 = self.bn[1](c1, training=training)
p1 = self.pool(c1)
c2 = self.conv2[0](p1)
c2 = self.bn[2](c2, training=training)
c2 = self.dropout1(c2)
c2 = self.conv2[1](c2)
c2 = self.bn[3](c2, training=training)
p2 = self.pool(c2)
c3 = self.conv3[0](p2)
c3 = self.bn[4](c3, training=training)
c3 = self.dropout2(c3)
c3 = self.conv3[1](c3)
c3 = self.bn[5](c3, training=training)
p3 = self.pool(c3)
c4 = self.conv4[0](p3)
c4 = self.bn[6](c4, training=training)
c4 = self.dropout2(c4)
c4 = self.conv4[1](c4)
c4 = self.bn[7](c4, training=training)
p4 = self.pool(c4)
c5 = self.conv5[0](p4)
c5 = self.bn[8](c5, training=training)
c5 = self.dropout3(c5)
c5 = self.conv5[1](c5)
c5 = self.bn[9](c5, training=training)
u6 = self.up6(c5)
u6 = K.layers.concatenate([u6, c4])
c6 = self.conv4[2](u6)
c6 = self.bn[10](c6, training=training)
c6 = self.dropout2(c6)
c6 = self.conv4[3](c6)
c6 = self.bn[11](c6, training=training)
u7 = self.up7(c6)
u7 = K.layers.concatenate([u7, c3])
c7 = self.conv3[2](u7)
c7 = self.bn[12](c7, training=training)
c7 = self.dropout2(c7)
c7 = self.conv3[3](c7)
c7 = self.bn[13](c7, training=training)
u8 = self.up8(c7)
u8 = K.layers.concatenate([u8, c2])
c8 = self.conv2[2](u8)
c8 = self.bn[14](c8, training=training)
c8 = self.dropout1(c8)
c8 = self.conv2[3](c8)
c8 = self.bn[15](c8, training=training)
u9 = self.up9(c8)
u9 = K.layers.concatenate([u9, c1])
c9 = self.conv1[1](u9)
c9 = self.bn[16](c9, training=training)
c9 = self.dropout1(c9)
c9 = self.conv1[2](c9)
c9 = self.bn[17](c9, training=training)
final = self.conv10(c9)
return final
class VUNet(K.Model):
def __init__(self,
base_channels=32,
fixed_size=False,
in_channels=3,
in_size=(512, 512),
classes=21,
aux=False,
activation='relu',
**kwargs):
super().__init__(**kwargs)
assert (in_channels > 0)
assert ((in_size[0] % 32 == 0) and (in_size[1] % 32 == 0))
self.base_channels = base_channels
self.in_size = in_size
self.classes = classes
self.fixed_size = fixed_size
self.in_conv = tfp.layers.Convolution2DReparameterization(base_channels,
3,
activation=activation,
padding='same')
self.conv1 = [tfp.layers.Convolution2DReparameterization(base_channels,
3,
activation=activation,
padding='same') for _ in range(3)]
self.conv2 = [tfp.layers.Convolution2DReparameterization(base_channels * 2,
3,
activation=activation,
padding='same') for _ in range(4)]
self.conv3 = [tfp.layers.Convolution2DReparameterization(base_channels * 4,
3,
activation=activation,
padding='same') for _ in range(4)]
self.conv4 = [tfp.layers.Convolution2DReparameterization(base_channels * 8,
3,
activation=activation,
padding='same') for _ in range(4)]
self.conv5 = [tfp.layers.Convolution2DReparameterization(base_channels * 16,
3,
activation=activation,
padding='same') for _ in range(2)]
self.dropout1 = K.layers.Dropout(0.1)
self.dropout2 = K.layers.Dropout(0.2)
self.dropout3 = K.layers.Dropout(0.3)
self.pool = K.layers.MaxPool2D(pool_size=(2, 2))
self.bn = []
for _ in range(18):
self.bn.append(K.layers.BatchNormalization())
self.up6 = K.layers.Conv2DTranspose(base_channels * 8, (3, 3), strides=(2, 2), padding="same")
self.up7 = K.layers.Conv2DTranspose(base_channels * 4, (3, 3), strides=(2, 2), padding="same")
self.up8 = K.layers.Conv2DTranspose(base_channels * 2, (3, 3), strides=(2, 2), padding="same")
self.up9 = K.layers.Conv2DTranspose(base_channels, (3, 3), strides=(2, 2), padding="same")
self.conv10 = tfp.layers.Convolution2DReparameterization(classes, 1, strides=1, padding="same")
def call(self, inputs, training=None, mask=None):
c1 = self.in_conv(inputs)
c1 = self.bn[0](c1, training=training)
c1 = self.dropout1(c1)
c1 = self.conv1[0](c1)
c1 = self.bn[1](c1, training=training)
p1 = self.pool(c1)
c2 = self.conv2[0](p1)
c2 = self.bn[2](c2, training=training)
c2 = self.dropout1(c2)
c2 = self.conv2[1](c2)
c2 = self.bn[3](c2, training=training)
p2 = self.pool(c2)
c3 = self.conv3[0](p2)
c3 = self.bn[4](c3, training=training)
c3 = self.dropout2(c3)
c3 = self.conv3[1](c3)
c3 = self.bn[5](c3, training=training)
p3 = self.pool(c3)
c4 = self.conv4[0](p3)
c4 = self.bn[6](c4, training=training)
c4 = self.dropout2(c4)
c4 = self.conv4[1](c4)
c4 = self.bn[7](c4, training=training)
p4 = self.pool(c4)
c5 = self.conv5[0](p4)
c5 = self.bn[8](c5, training=training)
c5 = self.dropout3(c5)
c5 = self.conv5[1](c5)
c5 = self.bn[9](c5, training=training)
u6 = self.up6(c5)
u6 = K.layers.concatenate([u6, c4])
c6 = self.conv4[2](u6)
c6 = self.bn[10](c6, training=training)
c6 = self.dropout2(c6)
c6 = self.conv4[3](c6)
c6 = self.bn[11](c6, training=training)
u7 = self.up7(c6)
u7 = K.layers.concatenate([u7, c3])
c7 = self.conv3[2](u7)
c7 = self.bn[12](c7, training=training)
c7 = self.dropout2(c7)
c7 = self.conv3[3](c7)
c7 = self.bn[13](c7, training=training)
u8 = self.up8(c7)
u8 = K.layers.concatenate([u8, c2])
c8 = self.conv2[2](u8)
c8 = self.bn[14](c8, training=training)
c8 = self.dropout1(c8)
c8 = self.conv2[3](c8)
c8 = self.bn[15](c8, training=training)
u9 = self.up9(c8)
u9 = K.layers.concatenate([u9, c1])
c9 = self.conv1[1](u9)
c9 = self.bn[16](c9, training=training)
c9 = self.dropout1(c9)
c9 = self.conv1[2](c9)
c9 = self.bn[17](c9, training=training)
final = self.conv10(c9)
return final
if __name__ == "__main__":
x = tf.random.uniform((1, 512, 512, 3))
unet = UNet(32)
unet.build(input_shape=(None, None, None, 3))
print(unet.summary())
print(unet(x).shape)
| StarcoderdataPython |
15901 | from os.path import abspath, join, dirname
from colibris.conf import settings
STATIC_PATH = abspath(join(dirname(__file__), 'swagger'))
UI_URL = settings.API_DOCS_URL
STATIC_URL = '{}/static'.format(UI_URL)
APISPEC_URL = '{}/apispec'.format(UI_URL)
| StarcoderdataPython |
3329915 | <reponame>S73ph4n/octavvs<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>
Atmospheric and scattering correction
"""
import gc
import os.path
from time import monotonic
import numpy as np
import sklearn.linear_model
import sklearn.cluster
#import statsmodels.multivariate.pca
from scipy.interpolate import PchipInterpolator
from scipy.signal import hilbert, savgol_filter, tukey
from scipy.io import loadmat, savemat
import matplotlib.pyplot as plt
from . import baseline
def load_reference(wn, what=None, matfilename=None):
"""
Loads and normalizes a spectrum from a Matlab file, interpolating at the given points.
The reference is assumed to cover the entire range of wavenumbers.
Parameters:
wn: array of wavenumbers at which to get the spectrum
what: A string defining what type of reference to get, corresponding to a file in the
'reference' directory
matfilename: the name of an arbitrary Matlab file to load data from; the data must be
in a matrix called AB, with wavenumbers in the first column.
Returns: spectrum at the points given by wn
"""
if (what is None) == (matfilename is None):
raise ValueError("Either 'what' or 'matfilename' must be specified")
if what is not None:
matfilename = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__),
'reference', what + '.mat'))
ref = loadmat(matfilename)['AB']
# Handle the case of high-to-low since the interpolator requires low-to-high
d = 1 if ref[0,0] < ref[-1,0] else -1
ref = PchipInterpolator(ref[::d,0], ref[::d,1])(wn)
return ref #/ ref.max()
def nonnegative(y, fracspectra=.02, fracvalues=.02):
"""
Make a matrix of spectral data nonnegative by shifting all the spectra up by the same computed
amount, followed by setting negative values to 0. The shift is chosen such that at most
fracspectra of the spectra get more than fracvalues of their intensities set to zero.
Parameters:
y: array of intensities for (pixel, wavenumber)
fracspectra: unheeded fraction of the spectra
fracvalues: maximal fraction of points to clip at 0
Returns: shifted spectra in the same format as y
"""
s = int(fracspectra * y.shape[0])
v = int(fracvalues * y.shape[1])
if s == 0 or v == 0:
return y - np.min(y.min(), 0)
if s >= y.shape[0] or v >= y.shape[1]:
return np.maximum(y, 0)
yp = np.partition(y, v, axis=1)[:,v]
a = np.partition(yp, s)[s]
return np.maximum(y - a if a < 0 else y, 0)
def find_wn_ranges(wn, ranges):
"""
Find indexes corresponding to the beginning and end of a list of ranges of wavenumbers. The
wavenumbers have to be sorted in either direction.
Parameters:
wn: array of wavenumbers
ranges: numpy array of shape (n, 2) with desired wavenumber ranges in order [low,high]
Returns: numpy array of shape (n, 2) with indexes of the wavenumbers delimiting those ranges
"""
if isinstance(ranges, list):
ranges = np.array(ranges)
if(wn[0] < wn[-1]):
return np.stack((np.searchsorted(wn, ranges[:,0]),
np.searchsorted(wn, ranges[:,1], 'right')), 1)
return len(wn) - np.stack((np.searchsorted(wn[::-1], ranges[:,1], 'right'),
np.searchsorted(wn[::-1], ranges[:,0])), 1)
def cut_wn(wn, y, ranges):
"""
Cut a set of spectra, leaving only the given wavenumber range(s).
Parameters:
wn: array of wavenumbers, sorted in either direction
y: array of spectra, shape (..., wavenumber)
ranges: list or numpy array of shape (..., 2) with desired wavenumber ranges in pairs (low, high)
Returns: (wavenumbers, spectra) with data in the given wavenumber ranges
"""
if isinstance(ranges, list):
ranges = np.array(ranges)
inrange = lambda w: ((w >= ranges[...,0]) & (w <= ranges[...,1])).any()
ix = np.array([inrange(w) for w in wn])
return wn[ix], y[...,ix]
def atmospheric(wn, y, atm=None, cut_co2 = True, extra_iters=5, extra_factor=0.25,
smooth_win=9, progressCallback = None):
"""
Apply atmospheric correction to multiple spectra, subtracting as much of the atompsheric
spectrum as needed to minimize the sum of squares of differences between consecutive points
in the corrected spectra. Each supplied range of wavenumbers is corrected separately.
Parameters:
wn: array of wavenumbers, sorted in either direction
y: array of spectra in the order (pixel, wavenumber), or just one spectrum
atm: atmospheric spectrum; if None, load the default
cut_co2: replace the CO2 region with a neatly fitted spline
extra_iters: number of iterations of subtraction of a locally reshaped atmospheric spectrum
(needed if the relative peak intensities are not always as in the atmospheric reference)
extra_factor: how much of the reshaped atmospheric spectrum to remove per iteration
smooth_win: window size (in cm-1) for smoothing of the spectrum in the atm regions
progressCallback(int a, int b): callback function called to indicated that the processing
is complete to a fraction a/b.
Returns:
tuple of (spectra after correction, array of correction factors; shape (spectra,ranges))
"""
squeeze = False
yorig = y
if y.ndim == 1:
y = y[None,:]
squeeze = True
else:
y = y.copy()
if atm is None or (isinstance(atm, str) and atm == ''):
atm = load_reference(wn, what='water')
elif isinstance(atm, str):
atm = load_reference(wn, matfilename=atm)
else:
atm = atm.copy()
# ranges: numpy array (n, 2) of n non-overlapping wavenumber ranges (typically for H2O only), or None
# extra_winwidth: width of the window (in cm-1) used to locally reshape the atm spectrum
ranges = [[1300, 2100], [3410, 3850], [2190, 2480]]
extra_winwidth = [30, 150, 40]
corr_ranges = 2 if cut_co2 else 3
# ranges = ranges[:2]
# extra_winwidth = extra_winwidth[:2]
if ranges is None:
ranges = np.array([0, len(wn)])
else:
ranges = find_wn_ranges(wn, ranges)
for i in range(corr_ranges):
p, q = ranges[i]
if q - p < 2: continue
atm[p:q] -= baseline.straight(wn[p:q], atm[p:q]);
savgolwin = 1 + 2 * int(smooth_win * (len(wn) - 1) / np.abs(wn[0] - wn[-1]))
if progressCallback:
progressA = 0
progressB = 1 + corr_ranges * (extra_iters + (1 if savgolwin > 1 else 0))
progressCallback(progressA, progressB)
dh = atm[:-1] - atm[1:]
dy = y[:,:-1] - y[:,1:]
dh2 = np.cumsum(dh * dh)
dhdy = np.cumsum(dy * dh, 1)
az = np.zeros((len(y), corr_ranges))
for i in range(corr_ranges):
p, q = ranges[i]
if q - p < 2: continue
r = q-2 if q <= len(wn) else q-1
az[:, i] = ((dhdy[:,r] - dhdy[:,p-1]) / (dh2[r] - dh2[p-1])) if p > 0 else (dhdy[:,r] / dh2[r])
y[:, p:q] -= az[:, i, None] @ atm[None, p:q]
if progressCallback:
progressA += 1
progressCallback(progressA, progressB)
for pss in range(extra_iters):
for i in range(corr_ranges):
p, q = ranges[i]
if q - p < 2: continue
window = 2 * int(extra_winwidth[i] * (len(wn) - 1) / np.abs(wn[0] - wn[-1]))
winh = (window+1)//2
dy = y[:,:-1] - y[:,1:]
dhdy = np.cumsum(dy * dh, 1)
aa = np.zeros_like(y)
aa[:,1:winh+1] = dhdy[:,1:window:2] / np.maximum(dh2[1:window:2], 1e-8)
aa[:,1+winh:-winh-1] = (dhdy[:,window:-1] - dhdy[:,:-1-window]) / np.maximum(dh2[window:-1] - dh2[:-1-window], 1e-8)
aa[:,-winh-1:-1] = (dhdy[:,-1:] - dhdy[:,-1-window:-1:2]) / np.maximum(dh2[-1] - dh2[-1-window:-1:2], 1e-8)
aa[:, 0] = aa[:, 1]
aa[:, -1] = aa[:, -2]
aa = savgol_filter(aa, window + 1, 3, axis=1)
y[:, p:q] -= extra_factor * aa[:, p:q] * atm[p:q]
if progressCallback:
progressA += 1
progressCallback(progressA, progressB)
if savgolwin > 1:
for i in range(corr_ranges):
p, q = ranges[i]
if q - p < savgolwin: continue
y[:, p:q] = savgol_filter(y[:, p:q], savgolwin, 3, axis=1)
if progressCallback:
progressA += 1
progressCallback(progressA, progressB)
if cut_co2:
rng = np.array([[2190, 2260], [2410, 2480]])
rngm = rng.mean(1)
rngd = rngm[1] - rngm[0]
cr = find_wn_ranges(wn, rng).flatten()
if cr[1] - cr[0] > 2 and cr[3] - cr[2] > 2:
a = np.empty((4, len(y)))
a[0:2,:] = np.polyfit((wn[cr[0]:cr[1]]-rngm[0])/rngd, y[:,cr[0]:cr[1]].T, deg=1)
a[2:4,:] = np.polyfit((wn[cr[2]:cr[3]]-rngm[1])/rngd, y[:,cr[2]:cr[3]].T, deg=1)
P,Q = find_wn_ranges(wn, rngm[None,:])[0]
t = np.interp(wn[P:Q], wn[[Q,P] if wn[0] > wn[-1] else [P,Q]], [1, 0])
tt = np.array([-t**3+t**2, -2*t**3+3*t**2, -t**3+2*t**2-t, 2*t**3-3*t**2+1])
pt = a.T @ tt
y[:, P:Q] += (pt - y[:, P:Q]) * tukey(len(t), .3)
corrs = np.zeros(2)
ncorrs = np.zeros_like(corrs)
for i in range(len(ranges)):
p, q = ranges[i]
if q - p < 2: continue
corr = np.abs(yorig[:, p:q] - y[:, p:q]).sum(1) / np.maximum(np.abs(yorig[:, p:q]), np.abs(y[:, p:q])).sum(1)
gas = int(i > 1)
corrs[gas] += corr.mean()
ncorrs[gas] += 1
if ncorrs[0] > 1:
corrs[0] = corrs[0] / ncorrs[0]
return (y.squeeze() if squeeze else y), corrs
def kkre(wn, ref):
wn2 = wn ** 2.
wa = wn * ref
kk = np.empty_like(wn)
for i in range(len(wn)):
with np.errstate(divide='ignore', invalid='ignore'):
fg = wa / (wn2 - wn[i] ** 2.)
if i == 0 or i == len(wn) - 1:
fg[i] = 0
else:
fg[i] = (fg[i-1] + fg[i+1]) / 2
kk[i] = 2/np.pi * np.trapz(x=wn, y=fg)
if wn[0] < wn[-1]:
return kk
return -kk
def hilbert_n(wn, ref, zeropad=500):
"""
Compute the Kramers-Kronig relations by Hilbert transform, extending the absorption spectrum
with 0 to either size and resampling the spectrum at evenly spaced intervals if it is found to
be unevenly sampled.
"""
# Cache some data structures to avoid having to reinitialize them on every call.
if not hasattr(hilbert_n, "wn") or hilbert_n.wn is not wn or hilbert_n.zp != zeropad:
hilbert_n.wn = wn
even = (wn[-1] - wn[0]) / (len(wn) - 1)
diff = np.abs((np.diff(wn) - even) / even).mean()
hilbert_n.zp = zeropad
hilbert_n.evenspaced = diff < 1e-3
hilbert_n.increasing = wn[0] < wn[-1]
# print('hilbert',hilbert_n.evenspaced,hilbert_n.increasing)
if not hilbert_n.evenspaced:
hilbert_n.lin = np.linspace(min(wn[0], wn[-1]), max(wn[0], wn[-1]), len(wn))
hilbert_n.npad = int(len(wn) / abs(wn[-1] - wn[0]) * zeropad)
hilbert_n.nim = np.zeros((len(wn) + 2 * hilbert_n.npad))
if hilbert_n.evenspaced:
if hilbert_n.npad == 0:
if hilbert_n.increasing:
hilbert_n.nim = ref
else:
hilbert_n.nim = ref[::-1]
elif hilbert_n.increasing:
hilbert_n.nim[hilbert_n.npad:hilbert_n.npad+len(wn)] = ref
else:
hilbert_n.nim[hilbert_n.npad:hilbert_n.npad+len(wn)] = ref[::-1]
else:
if hilbert_n.increasing:
hilbert_n.nim[hilbert_n.npad:hilbert_n.npad+len(wn)] = PchipInterpolator(wn, ref)(hilbert_n.lin)
else:
hilbert_n.nim[hilbert_n.npad:hilbert_n.npad+len(wn)] = PchipInterpolator(wn[::-1], ref[::-1])(hilbert_n.lin)
nreal = -np.imag(hilbert(hilbert_n.nim))
if hilbert_n.npad:
nreal = nreal[hilbert_n.npad:-hilbert_n.npad]
if hilbert_n.evenspaced:
return nreal if hilbert_n.increasing else nreal[::-1]
return PchipInterpolator(hilbert_n.lin, nreal)(wn)
def pca_nipals(x, ncomp, tol=1e-5, max_iter=1000, copy=True, explainedvariance=None):
"""
NIPALS algorithm for PCA, based on the code in statmodels.multivariate
but with optimizations as in Bassan's Matlab implementation to
sacrifice some accuracy for speed.
x: ndarray of data, will be altered
ncomp: number of PCA components to return
tol: tolerance
copy: If false, destroy the input matrix x
explainedvariance: If >0, stop after this fraction of the total variance is explained
returns: PCA loadings as rows
"""
if copy:
x = x.copy()
if explainedvariance is not None and explainedvariance > 0:
varlim = (x * x).sum() * (1. - explainedvariance)
else:
varlim = 0
npts, nvar = x.shape
vecs = np.empty((ncomp, nvar))
for i in range(ncomp):
factor = np.ones(npts)
for j in range(max_iter):
vec = x.T @ factor #/ (factor @ factor)
vec = vec / np.sqrt(vec @ vec)
f_old = factor
factor = x @ vec #/ (vec @ vec)
f_old = factor - f_old
if tol > np.sqrt(f_old @ f_old) / np.sqrt(factor @ factor):
break
vecs[i, :] = vec
if i < ncomp - 1:
x -= factor[:,None] @ vec[None,:]
if varlim > 0 and (x * x).sum() < varlim:
return vecs[:i+1, :]
return vecs
def compute_model(wn, ref, n_components, a, d, bvals, konevskikh=True, linearcomponent=False,
variancelimit = None):
"""
Support function for rmiesc_miccs. Compute the extinction matrix for Bassan's algorithm,
then PCA transform it.
Parameters:
wn: array of wavenumbers
ref: reference spectrum
n_components: number of PCA components to use
a: array of values for the parameter a (index of refraction)
d: array of values for the parameter d*4pi (sphere size)
bvals: number of values for parameter b (mixing of a and real part of n from absorption ref)
konevskikh: if True, use the faster method by Konevskikh et al.
variancelimit: if a number (around 0.9996), use as many PCA components as needed to
explain this fraction of the variance of the extinction matrix
"""
# Compute the scaled real part of the refractive index by Kramers-Kronig transform:
# We skip a factor 2pi because it's normalized away anyway.
# n_im = ref / wn
# nkk = -np.imag(hilbert(n_im)) if wn[0] < wn[-1] else -np.imag(hilbert(n_im[::-1]))[::-1]
# I'm a bit confused about the division/multiplication by wn.
# Bassan's matlab code uses wn*ref in kkre
# bassan2010 says k (=n_im) is proportional to ref
# but various sources say k is ref/wn * const
# My kkre reproduces's Bassan with the same wn*ref
# My hilbert_n gives the same output with Hilbert transform of ref
# Causin's python code Hilbert transforms ref for Bassan's algorith but ref/wn for Konevskikh's!
# Solheim's matlab code Hilbert transforms ref/wn
if konevskikh:
# nim = ref / ref.max() / (wn * 100)
nim = ref / (wn * 100)
nre = hilbert_n(wn, nim, 300)
nmin = nre.min()
# print('refmax', ref.max(), 'nrange', nmin, nre.max())
if nmin < -1:
nre = nre / -nmin
nim = nim / -nmin
# My revised distribution of alpha_0 and gamma
alpha_0 = 1e-2 * np.linspace(d[0] * (a[0]-1), d[-1] * (a[-1]-1), len(a))
gamma = .25 * 2 * np.log(10) / np.pi * np.linspace(1 / alpha_0[0], 1 / alpha_0[-1], len(alpha_0))
# Solheim's distributions of alpha_0 and gamma
# alpha_0 = 1e-2 * d * (a - 1)
# gamma = .25 * 2 * np.log(10) / np.pi / alpha_0
Q = np.empty((len(alpha_0) * len(gamma), len(wn))) # Initialize the extinction matrix
# print('alpha_0', alpha_0)
# print('gamma', gamma)
# Build the extinction matrix
n_row = 0
for a0 in alpha_0:
for g in gamma:
rho = a0 * (1. + g * nre) * wn * 100
denom = 1. / g + nre
tanbeta = nim / denom
beta = np.arctan2(nim, denom)
cosb = np.cos(beta)
cosbrho = cosb / rho
# Following Konevskikh et al 2016
Q[n_row] = 2. - 4. * cosbrho * (np.exp(-rho * tanbeta) *
(np.sin(rho - beta) + cosbrho * np.cos(rho - 2 * beta)) -
cosbrho * np.cos(2 * beta))
n_row += 1
# savemat('everything-p.mat', locals())
else:
nkk = kkre(wn, ref/wn) # should divide by wn here (or not multiply by wn in the function)
# nkk = hilbert_n(wn, ref / wn, 300) # should divide by wn
nkk = nkk / abs(nkk.min())
# Build the extinction matrix
Q = np.empty((len(a) * bvals * len(d), len(wn))) # Initialize the extinction matrix
n_row = 0
for i in range(len(a)):
b = np.linspace(0.0, a[i] - 1.01, bvals) # Range of amplification factors of nkk
for j in range(len(b)):
n = a[i] + b[j] * nkk # Compute the real refractive index
for k in range(len(d)):
rho = d[k] * (n - 1.) * wn
# Compute the extinction coefficients for each combination of a, b and d:
Q[n_row] = 2. - 4. / rho * np.sin(rho) + \
4. / (rho * rho) * (1. - np.cos(rho))
n_row += 1
n_nonpca = 3 if linearcomponent else 2
# Orthogonalization of the model to improve numeric stability
refn = ref / np.sqrt(ref@ref)
Q = Q - (Q @ refn)[:,None] @ refn[None,:]
# Perform PCA of the extinction matrix
pca = pca_nipals(Q, ncomp=n_components, tol=1e-5, copy=False, explainedvariance=variancelimit)
model = np.empty((n_nonpca + pca.shape[0], pca.shape[1]))
model[n_nonpca:, :] = pca
# # This method is up to 50% slower but gives additional info
# pca = statsmodels.multivariate.pca.PCA(Q, ncomp=n_components,
# method='nipals', tol=1e-5, demean=False, standardize=False)
# esum = pca.eigenvals.cumsum()
# n_components = np.min((np.searchsorted(esum, esum[-1] * variancelimit) + 1, len(esum)))
# model = np.zeros((n_nonpca + n_components, len(wn)))
# model[n_nonpca:, :] = pca.loadings[:,:n_components].T
model[0,:] = ref
model[1,:] = 1
if linearcomponent:
model[2,:] = np.linspace(0., 1., len(wn))
if linearcomponent:
model[2,:] = np.linspace(0., 1., len(wn))
# for i in range(2, n_nonpca):
# w = model[i, :] - np.sum(np.dot(model[i, :], b) * b for b in model[0:i, :])
# model[i,:] = w / np.sqrt(w @ w)
# savemat('everything_after-p.mat', locals())
# killmenow
# Orthogonalization of the model to improve numeric stability (doing it after PCA is only
# marginally slower)
# for i in range(len(model)):
# v = model[i, :]
# w = v - np.sum(np.dot(v, b) * b for b in model[0:i, :])
# model[i,:] = w / np.linalg.norm(w)
return model
def stable_rmiesc_clusters(iters, clusters):
"""
Make a cluster size scheme for reliable convergence in rmiesc_miccs.
Parameters:
iters: The number of basic iterations to be used, preferably at least about 12-20
Returns:
array of cluster sizes (or zeros) for each iteration; this will be bigger than
the input iterations
"""
iters = max(2, iters) * 2
cc = np.zeros(iters, dtype=np.int)
cc[:iters//3] = 1
cc[iters//2:iters*3//4] = clusters
return cc
def rmiesc(wn, app, ref, n_components=7, iterations=10, clusters=None,
pcavariancelimit=None,
verbose=False, a=np.linspace(1.1, 1.5, 10), d=np.linspace(2.0, 8.0, 10),
bvals=10, plot=False, progressCallback = None, progressPlotCallback=None,
konevskikh=False, linearcomponent=True, weighted=False, renormalize=False,
autoiterations=False, targetrelresiduals=0.95):
"""
Correct scattered spectra using Bassan's algorithm. This implementation does no orthogonalization
of the extinction matrix or PCA components relative to the reference, nor is the reference smoothed
or filtered through a sum of gaussians as in the original Matlab implementation.
Parameters:
wn: sorted array of wavenumbers (high-to-low or low-to-high)
app: apparent spectrum, shape (pixels, wavenumbers)
ref: reference spectrum; array (wavenumbers)
n_components: number of principal components to be calculated for the extinction matrix
iterations: number of iterations of the algorithm
clusters: if not None, cluster pixels into this many clusters in each iteration and use
a common reference spectrum for each cluster. May be given as a list with one value per
iteration, in which case 0 means to reuse clusters from the previous iteration and mix
new/old references for stable convergence.
If clusters is negative, use stable_rmiesc_clusters to generate the list.
verbose: print progress information
a: indexes of refraction to use in model
d: sphere sizes to use in model, in micrometers
bvals: number of values for the model parameter b
plot: produce plots of the cluster references, if in cluster mode
progressCallback(int a, int b): callback function called to indicated that the processing
is complete to a fraction a/b.
konevskikh: if True, use the faster method by Konevskikh et al.
linearcomponent: if True, include a linear term in the model (used in Bassan's paper only).
weighted: if true, downweight the 1800-2800 region when fitting the model.
renormalize: if True, renormalize spectra against reference in each generation.
autoiterations; if True, iterate until residuals stop improving
targetrelresiduals: if autoiterations, stop when this relative change in residuals is seen
Return: corrected apparent spectra (the best encountered if autoiterations, else the final ones)
"""
# Make a rescaled copy of d and include the factor 4*pi
d = d * 4e-4 * np.pi;
# The input can be a single spectrum or a matrix of spectra. If the former, squeeze at the end.
squeeze = False
if app.ndim == 1:
app = app[None,:]
squeeze = True
if weighted:
weights = np.ones_like(wn)
weights[range(*find_wn_ranges(wn, [[1800, 2800]])[0])] = .001 ** .5
weights = weights[:, None]
else:
weights = None
if plot:
plt.figure()
color=plt.cm.jet(np.linspace(0, 1, iterations))
plt.plot(wn, app.mean(0), 'k', linewidth=.5)
if np.isscalar(clusters):
if clusters == 0:
clusters = None
elif clusters < 0:
clusters = stable_rmiesc_clusters(iterations, -clusters)
iterations = len(clusters)
else:
clusters = np.repeat(clusters, iterations)
elif clusters is not None:
if len(clusters) != iterations:
raise ValueError('len(clusters) must match iterations')
clusters = clusters.copy()
if progressCallback:
# Compute the number of progress steps
progressA = 0
if clusters is None:
progressB = 1 + (iterations > 1) * len(app)
else:
progressB = 0
prev = 1
for cl in clusters:
if cl > 0:
prev = cl
progressB += prev
startt = monotonic()
corrected = None # Just to get rid of warnings in the editor; will be set on iteration 0
# Set parameters for automatic iteration control
if renormalize:
autoupadd = 3 # Residual going up counts as residual going down too little this many times
automax = 3 # Stop when residual has gone down too little this many times
else:
autoupadd = 1
automax = 5
if clusters is not None:
# Cluster mode: In each iteration, after correcting all the spectra, cluster them. Then take the
# mean of the corrected spectra in each cluster as the new reference for that cluster in the next
# iteration.
ref = ref.copy()[None, :] # One reference per cluster
ref = ref / (np.abs(ref).mean() / np.abs(app).mean())
labels = np.zeros(len(app)) # Cluster labels; initially all in cluster 0
# clusters[-1] = 0
progstep = 1 # Current progress bar step size
for iteration in range(iterations):
gc.collect() # Because my old laptop was unhappy with RAM usage otherwise
curc = clusters[iteration] # Current cluster size setting
if curc > 0:
progstep = curc
# Skip this iteration if every spectrum has stopped improving and the cluster settings
# are unchanged
if autoiterations:
if not iteration or curc != clusters[iteration-1]:
unimproved = np.zeros(len(app), dtype=int)
elif (unimproved <= automax).sum() == 0:
progressA += progstep
if progressCallback:
progressCallback(progressA, progressB)
# print('progX',progressA,progressB)
continue
# Possibly recluster the spectra and compute reference spectra
if iteration == 0:
pass
elif curc > 0:
if autoiterations:
notdone = unimproved <= automax
nds = notdone.sum()
curc = min(curc, int(nds))
labels = np.zeros(len(app)) - 1
if curc == nds:
labels[notdone] = range(0, nds)
elif curc > 1:
kmeans = sklearn.cluster.MiniBatchKMeans(curc)
labels[notdone] = kmeans.fit_predict(corrected[notdone,:])
else:
labels[notdone] = 0
else:
if curc > 1:
kmeans = sklearn.cluster.MiniBatchKMeans(curc)
labels = kmeans.fit_predict(corrected)
else:
labels = np.zeros(len(app), dtype=int)
if(len(ref) != curc):
ref = np.zeros((curc, len(wn)))
for cl in range(curc):
sel = labels == cl
if sel.sum() == 0:
print('Info: empty cluster at %d, %d' % (iteration, cl))
else:
ref[cl,:] = corrected[sel].mean(0)
else:
# Mix old reference and corrected spectrum. This requires the clusters
# to remain unchanged.
if autoiterations:
labels[unimproved > automax] = -1 # Exclude all that are done already
for cl in range(len(ref)):
sel = labels == cl
if sel.sum() > 0:
ref[cl,:] = .5 * corrected[sel].mean(0) + .5 * ref[cl,:]
if plot:
plt.plot(wn, ref.T, c=color[iteration], linewidth=.5)
if progressPlotCallback:
progressPlotCallback(ref, (iteration, iterations))
ref[ref < 0] = 0
if iteration == 0 :
projs = [np.dot(app[i], ref[0].T)*(ref[0]/(ref[0] @ ref[0])) for i in range(len(app))]
else :
projs = [np.dot(app[i], corrected[i].T)*(corrected[i]/(corrected[i] @ corrected[i])) for i in range(len(app))]
projs = np.array(projs)
app_deref = app - projs
for cl in range(len(ref)):
ix = np.where(labels == cl)[0] # Indexes of spectra in this cluster
if autoiterations:
ix = ix[unimproved[ix] <= automax]
if ix.size:
model0 = compute_model(wn, ref[cl], n_components, a, d, bvals,
konevskikh=konevskikh, linearcomponent=linearcomponent,
variancelimit=pcavariancelimit)
#print(np.shape(corrected), np.shape(app))
if plot:
plt.figure()
plt.plot(projs[0], label="Proj")
plt.plot(app[0, :] - projs[0], label='Difference')
plt.plot(app[0, :], label='App')
plt.plot(model0[0, :], label='Reference')
if iteration :
plt.plot(corrected[0, :], label='Prev')
plt.legend()
plt.show()
model = model0[1:, :] #Then we don't need the reference part of the model
if weights is None:
cons = np.linalg.lstsq(model.T, app_deref[ix].T, rcond=None)[0]
else:
cons = np.linalg.lstsq(model.T * weights, app_deref[ix].T * weights, rcond=None)[0]
corrs = app[ix] - cons.T @ model
if renormalize:
corrs = corrs / cons[0, :, None]
resids = ((corrs - projs[ix])**2).sum(1) #We compare to the previous correction, not the reference
if iteration == 0:
corrected = corrs
residuals = resids
nimprov = len(resids)
else:
improved = resids < residuals[ix]
iximp = ix[improved] # Indexes of improved spectra
if autoiterations:
impmore = resids[improved] < residuals[iximp] * targetrelresiduals
unimproved[iximp[impmore]] = 0
unimproved[iximp[np.logical_not(impmore)]] += 1
unimproved[ix[np.logical_not(improved)]] += autoupadd
corrected[iximp, :] = corrs[improved, :]
residuals[iximp] = resids[improved]
nimprov = improved.sum()
if verbose:
print("iter %3d, cluster %3d (%5d px): avgres %7.3g imprvd %4d time %f" %
(iteration, cl, len(ix), resids.mean(), nimprov, monotonic()-startt))
if progressCallback:
progressCallback(progressA + cl + 1, progressB)
if progressCallback:
progressA += progstep
if len(ref) < progstep:
progressCallback(progressA, progressB)
# print('progY',progressA,progressB)
else:
# For efficiency, compute the model from the input reference spectrum only once
model = compute_model(wn, ref, n_components, a, d, bvals, konevskikh=konevskikh,
linearcomponent=linearcomponent, variancelimit=pcavariancelimit)
if weights is None:
cons = np.linalg.lstsq(model.T, app.T, rcond=None)[0]
else:
cons = np.linalg.lstsq(model.T * weights, app.T * weights, rcond=None)[0]
corrected = app - cons[1:, :].T @ model[1:, :]
if renormalize:
corrected = corrected / cons[0, :, None]
if autoiterations:
residuals = ((corrected - model[0, :])**2).sum(1)
if progressPlotCallback:
progressPlotCallback(ref, (0, len(app) + 1))
if verbose:
print("all pixels, iter %2d time %f" % (0, monotonic()-startt))
if progressCallback:
progressA += 1
progressCallback(progressA, progressB)
if iterations > 1:
for s in range(len(app)):
gc.collect()
unimproved = 0
ref = corrected[s, :] # Corrected spectrum as new reference
for iteration in range(1, iterations):
ref[ref < 0] = 0. # No negative values in reference spectrum
model = compute_model(wn, ref, n_components, a, d, bvals,
konevskikh=konevskikh, linearcomponent=linearcomponent,
variancelimit=pcavariancelimit)
if weights is None:
cons = np.linalg.lstsq(model.T, app[s], rcond=None)[0]
else:
cons = np.linalg.lstsq(model.T * weights, app[s] * weights[:, 0], rcond=None)[0]
corr = app[s] - cons[1:] @ model[1:, :]
if renormalize:
corr = corr / cons[0]
print("pixel %5d: iter %3d residual %7.3g " %
(s, iteration+1, ((corr - model[0, :])**2).sum()))
if autoiterations:
residual = ((corr - model[0, :])**2).sum()
if residual < residuals[s]:
corrected[s, :] = corr
unimproved = unimproved + 1 if residual > residuals[s] * targetrelresiduals else 0
residuals[s] = residual
else:
unimproved += autoupadd
if unimproved > automax:
break
ref = corr
if not autoiterations:
corrected[s, :] = corr
residual = ((corr / cons[0] - model[0, :])**2).sum()
if verbose:
print("pixel %5d: iter %3d residual %7.3g time %f" %
(s, iteration+1, residual, monotonic()-startt))
if progressCallback:
progressA += 1
progressCallback(progressA, progressB)
if progressPlotCallback and len(app) < 50:
progressPlotCallback(ref, (s + 1, len(app) + 1))
return corrected.squeeze() if squeeze else corrected
| StarcoderdataPython |
1781182 | from splinter import Browser
from time import sleep
b = Browser()
b.visit('http://ddg.gg')
print(f'Título: {b.title}')
# print(f'html: {b.html}')
print(f'URL: {b.url}')
b.visit('http://google.com')
b.back()
sleep(3)
b.forward()
sleep(2)
b.quit()
| StarcoderdataPython |
1603791 | #
#
# Copyright (c) 2013, Georgia Tech Research Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Georgia Tech Research Corporation nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY GEORGIA TECH RESEARCH CORPORATION ''AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL GEORGIA TECH BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# \authors: <NAME> (Healthcare Robotics Lab, Georgia Tech.)
# \adviser: <NAME> (Healthcare Robotics Lab, Georgia Tech.)
import subprocess
import roslib
roslib.load_manifest('hrl_dynamic_mpc')
import sys
import time
import hrl_lib.util as ut
import numpy as np
import os
from hrl_dynamic_mpc.srv import LogData
import threading
import rospy
import signal
import darci_client as dc
class BatchRunner():
def __init__(self, num_trials, f_threshes, delta_t_s, goal_reset=None):
self.num_trials = num_trials
self.lock = threading.RLock()
self.first_reach = True
goal_reset[2] = goal_reset[2] - 0.15
self.goal_reset = goal_reset
rospy.init_node('batch_trials_reaching')
rospy.wait_for_service('rosbag_data')
rospy.wait_for_service('log_skin_data')
rospy.wait_for_service('log_data')
self.rosbag_srv = rospy.ServiceProxy('rosbag_data', LogData)
self.ft_and_humanoid_record_srv = rospy.ServiceProxy('log_data', LogData)
self.skin_record_srv = rospy.ServiceProxy('log_skin_data', LogData)
self.robot_state = dc.DarciClient()
self.f_threshes = f_threshes
self.delta_t_s = delta_t_s
self.reaching_left_results = []
self.reaching_right_results = []
def run_trial(self, i, side, f_thresh, t_impulse, goal):
self.rosbag_srv('first_impact_'+str(i).zfill(3)+'_'+side+'_f_thresh_'+str(f_thresh).zfill(2)+'_delta_t_impulse_'+str(t_impulse).zfill(3)+'_')
self.skin_record_srv('first_impact_'+str(i).zfill(3)+'_'+side+'_f_thresh_'+str(f_thresh).zfill(2)+'_delta_t_impulse_'+str(t_impulse).zfill(3)+'_')
self.ft_and_humanoid_record_srv('first_impact_'+str(i).zfill(3)+'_'+side+'_f_thresh_'+str(f_thresh).zfill(2)+'_delta_t_impulse_'+str(t_impulse).zfill(3)+'_')
goal_ls = goal[0].A1.tolist()
goal_str_buf = [str(goal_ls[0])+', '+str(goal_ls[1])+', '+str(goal_ls[2])]
goal_str = ''.join(goal_str_buf)
controller = subprocess.call(['python',
'run_controller_debug.py',
'--darci',
'--t_impulse='+str(t_impulse),
'--f_thresh='+str(f_thresh),
"--goal="+goal_str])
time.sleep(1.0)
self.rosbag_srv('')
self.skin_record_srv('')
self.ft_and_humanoid_record_srv('')
data = ut.load_pickle('./result.pkl')
if side == 'left':
self.reaching_left_results.append(data['result'])
else:
self.reaching_right_results.append(data['result'])
return data['result']
def run_slip_trial(self, i, f_thresh, t_impulse, goal):
self.rosbag_srv('slip_impact_'+str(i).zfill(3)+'_f_thresh_'+str(f_thresh).zfill(2)+'_delta_t_impulse_'+str(t_impulse).zfill(3)+'_')
self.skin_record_srv('slip_impact_'+str(i).zfill(3)+'_f_thresh_'+str(f_thresh).zfill(2)+'_delta_t_impulse_'+str(t_impulse).zfill(3)+'_')
self.ft_and_humanoid_record_srv('slip_impact_'+str(i).zfill(3)+'_f_thresh_'+str(f_thresh).zfill(2)+'_delta_t_impulse_'+str(t_impulse).zfill(3)+'_')
goal_ls = goal[0].A1.tolist()
goal_str_buf = [str(goal_ls[0])+', '+str(goal_ls[1])+', '+str(goal_ls[2])]
goal_str = ''.join(goal_str_buf)
controller = subprocess.call(['python',
'run_controller_debug.py',
'--darci',
'--t_impulse='+str(t_impulse),
'--f_thresh='+str(f_thresh),
"--goal="+goal_str])
time.sleep(1.0)
self.rosbag_srv('')
self.skin_record_srv('')
self.ft_and_humanoid_record_srv('')
data = ut.load_pickle('./result.pkl')
self.reaching_right_results.append(data['result'])
return data['result']
def run_slip_trial(self, i, f_thresh, t_impulse, goal):
self.rosbag_srv('slip_impact_'+str(i).zfill(3)+'_f_thresh_'+str(f_thresh).zfill(2)+'_delta_t_impulse_'+str(t_impulse).zfill(3)+'_')
self.skin_record_srv('slip_impact_'+str(i).zfill(3)+'_f_thresh_'+str(f_thresh).zfill(2)+'_delta_t_impulse_'+str(t_impulse).zfill(3)+'_')
self.ft_and_humanoid_record_srv('slip_impact_'+str(i).zfill(3)+'_f_thresh_'+str(f_thresh).zfill(2)+'_delta_t_impulse_'+str(t_impulse).zfill(3)+'_')
goal_ls = goal[0].A1.tolist()
goal_str_buf = [str(goal_ls[0])+', '+str(goal_ls[1])+', '+str(goal_ls[2])]
goal_str = ''.join(goal_str_buf)
controller = subprocess.call(['python',
'run_controller_debug.py',
'--darci',
'--t_impulse='+str(t_impulse),
'--f_thresh='+str(f_thresh),
"--goal="+goal_str])
time.sleep(1.0)
self.rosbag_srv('')
self.skin_record_srv('')
self.ft_and_humanoid_record_srv('')
data = ut.load_pickle('./result.pkl')
self.reaching_right_results.append(data['result'])
return data['result']
def run_canonical_trial(self, i, f_thresh, t_impulse, goal, num_can):
self.rosbag_srv('canonical_'+str(num_can).zfill(2)+'_trial_'+str(i).zfill(3)+'_f_thresh_'+str(f_thresh).zfill(2)+'_delta_t_impulse_'+str(t_impulse).zfill(3))
self.skin_record_srv('canonical_'+str(num_can).zfill(2)+'_trial_'+str(i).zfill(3)+'_f_thresh_'+str(f_thresh).zfill(2)+'_delta_t_impulse_'+str(t_impulse).zfill(3)+'_')
self.ft_and_humanoid_record_srv('canonical_'+str(num_can).zfill(2)+'_trial_'+str(i).zfill(3)+'_f_thresh_'+str(f_thresh).zfill(2)+'_delta_t_impulse_'+str(t_impulse).zfill(3)+'_')
goal_ls = goal[0].A1.tolist()
goal_str_buf = [str(goal_ls[0])+', '+str(goal_ls[1])+', '+str(goal_ls[2])]
goal_str = ''.join(goal_str_buf)
controller = subprocess.call(['python',
'run_controller_debug.py',
'--darci',
'--t_impulse='+str(t_impulse),
'--f_thresh='+str(f_thresh),
"--goal="+goal_str])
time.sleep(1.0)
self.rosbag_srv('')
self.skin_record_srv('')
self.ft_and_humanoid_record_srv('')
data = ut.load_pickle('./result.pkl')
self.reaching_right_results.append(data['result'])
return data['result']
def run_canonical(self, goals, q_configs, num_canonical):
for cmd in q_configs['start']:
self.robot_state.setDesiredJointAngles(list(cmd))
self.robot_state.updateSendCmd()
time.sleep(2.)
for f_thresh in self.f_threshes:
for t_impulse in self.delta_t_s:
for i in xrange(self.num_trials):
self.robot_state.setDesiredJointAngles(list(q_configs['right_start'][0]))
self.robot_state.updateSendCmd()
time.sleep(1.)
result = self.run_canonical_trial(i, f_thresh, t_impulse, goals, num_canonical)
for cmd in q_configs['restart']:
self.robot_state.setDesiredJointAngles(list(cmd))
self.robot_state.updateSendCmd()
time.sleep(2.)
self.robot_state.setDesiredJointAngles(list(q_configs['right_start'][0]))
self.robot_state.updateSendCmd()
time.sleep(1.)
data2 = {}
data2['reaching_straight'] = self.reaching_right_results
ut.save_pickle(data, './combined_results_for_canonical'+str(num_canonical)+'.pkl')
def run_foliage_trial(self, i, f_thresh, t_impulse, goal, num_reach, record = True):
if record == True:
self.rosbag_srv('foliage_goal_'+str(num_reach).zfill(3)+'_trial_'+str(i).zfill(3)+'_f_thresh_'+str(f_thresh).zfill(2)+'_delta_t_impulse_'+str(t_impulse).zfill(3)+'_')
self.skin_record_srv('foliage_goal_'+str(num_reach).zfill(3)+'_trial_'+str(i).zfill(3)+'_f_thresh_'+str(f_thresh).zfill(2)+'_delta_t_impulse_'+str(t_impulse).zfill(3)+'_')
self.ft_and_humanoid_record_srv('foliage_goal_'+str(num_reach).zfill(3)+'_trial_'+str(i).zfill(3)+'_f_thresh_'+str(f_thresh).zfill(2)+'_delta_t_impulse_'+str(t_impulse).zfill(3)+'_')
goal_ls = goal
goal_str_buf = [str(goal_ls[0])+', '+str(goal_ls[1])+', '+str(goal_ls[2])]
goal_str = ''.join(goal_str_buf)
controller = subprocess.call(['python',
'run_controller_debug.py',
'--darci',
'--t_impulse='+str(t_impulse),
'--f_thresh='+str(f_thresh),
"--goal="+goal_str])
time.sleep(1.0)
if record == True:
self.rosbag_srv('')
self.skin_record_srv('')
self.ft_and_humanoid_record_srv('')
data = ut.load_pickle('./result.pkl')
return data['result']
def run_foliage_reach(self, goals, q_configs, num_reach):
if self.first_reach == True:
self.first_reach = False
for cmd in q_configs['start']:
self.robot_state.setDesiredJointAngles(list(cmd))
self.robot_state.updateSendCmd()
time.sleep(2.)
for f_thresh in self.f_threshes:
for t_impulse in self.delta_t_s:
for i in xrange(self.num_trials):
self.robot_state.setDesiredJointAngles(list(q_configs['trial_start'][0]))
self.robot_state.updateSendCmd()
time.sleep(1.)
result = self.run_foliage_trial(i, f_thresh, t_impulse, goals, num_reach)
offset = 0.20 - goals[2]
goals[2] = goals[2]+offset
counter = (str(i)+'_up').zfill(6)
reset_result = self.run_foliage_trial(counter, f_thresh, t_impulse, goals, num_reach)
reset_result = self.run_foliage_trial(i, f_thresh, t_impulse, self.goal_reset, num_reach, record = False)
if result != 'success':
raw_input('Help me a bit please ..')
for cmd in q_configs['restart']:
self.robot_state.setDesiredJointAngles(list(cmd))
self.robot_state.updateSendCmd()
time.sleep(2.)
self.robot_state.setDesiredJointAngles(list(q_configs['trial_start'][0]))
self.robot_state.updateSendCmd()
time.sleep(1.)
data2 = {}
data2['reaching_straight'] = self.reaching_right_results
ut.save_pickle(data, './combined_results_for_foliage.pkl')
def run_first_impact(self, goals, q_configs):
for cmd in q_configs['start']:
self.robot_state.setDesiredJointAngles(list(cmd))
self.robot_state.updateSendCmd()
time.sleep(2.)
for f_thresh in self.f_threshes:
for t_impulse in self.delta_t_s:
for i in xrange(self.num_trials):
self.robot_state.setDesiredJointAngles(list(q_configs['left_start'][0]))
self.robot_state.updateSendCmd()
time.sleep(1.)
side = 'left'
result = self.run_trial(i, side, f_thresh, t_impulse, goals[side])
if result == 'success':
self.robot_state.setDesiredJointAngles(list(q_configs['right_start'][0]))
self.robot_state.updateSendCmd()
time.sleep(2.)
else:
for cmd in q_configs['left_to_right_restart']:
self.robot_state.setDesiredJointAngles(list(cmd))
self.robot_state.updateSendCmd()
time.sleep(2.)
self.robot_state.setDesiredJointAngles(list(q_configs['right_start'][0]))
self.robot_state.updateSendCmd()
time.sleep(1.)
side = 'right'
result = self.run_trial(i, side, f_thresh, t_impulse, goals[side])
if result == 'success':
self.robot_state.setDesiredJointAngles(list(q_configs['left_start'][0]))
self.robot_state.updateSendCmd()
time.sleep(2.)
else:
for cmd in q_configs['right_to_left_restart']:
self.robot_state.setDesiredJointAngles(list(cmd))
self.robot_state.updateSendCmd()
time.sleep(2.)
self.robot_state.setDesiredJointAngles(list(q_configs['left_start'][0]))
self.robot_state.updateSendCmd()
time.sleep(1.)
data2 = {}
data2['reaching_left'] = self.reaching_left_results
data2['reaching_right'] = self.reaching_right_results
ut.save_pickle(data, './combined_results_for_first_impact.pkl')
def in_hull(self, p, hull):
"""
Test if points in `p` are in `hull`
`p` should be a `NxK` coordinates of `N` points in `K` dimension
`hull` is either a scipy.spatial.Delaunay object or the `MxK` array of the
coordinates of `M` points in `K`dimension for which a Delaunay triangulation
will be computed
"""
from scipy.spatial import Delaunay
if not isinstance(hull,Delaunay):
hull = Delaunay(hull)
return hull.find_simplex(p)>=0
if __name__ == '__main__':
num_trials = 1
#f_threshes = [10.] #, 15.]
f_threshes = [5.]
#delta_t_s = [2., 4., 16., 48.]
delta_t_s = [8.]
#delta_t_s = [16., 48.]
data = ut.load_pickle('./joint_and_ee_data.pkl')
goal = data['ee_positions']['restart']
goal_reset = goal[0].A1.tolist()
runner = BatchRunner(num_trials, f_threshes, delta_t_s, goal_reset)
# goals = {'left':data['ee_positions']['right_start'],
# 'right':data['ee_positions']['left_start']}
# runner.run_first_impact(goals, data['q_configs'])
# data = ut.load_pickle('./starting_configs.pkl')
# goals = data['ee_positions']['goal']
# #runner.run_slip_impact(goals, data['q_configs'])
# runner.run_canonical(data['ee_positions']['goal'], data['q_configs'], 5)
range_pos = np.array(data['ee_positions']['range']).reshape(7,3)
z_max = -0.05
z_min = -0.25
x_max = np.max(range_pos[:,0])
x_min = np.min(range_pos[:,0])
y_max = np.max(range_pos[:,1])
y_min = np.min(range_pos[:,1])
goals = []
for i in xrange(120):
flag = False
while flag == False:
x_rand, y_rand, z_rand = np.random.rand(3)
x = x_rand*(x_max-x_min)+x_min
y = y_rand*(y_max-y_min)+y_min
z = z_rand*(z_max-z_min)+z_min
flag = runner.in_hull(np.array([x, y]), range_pos[:, 0:2].reshape(7,2))
if np.sqrt(x**2+(y-0.185)**2) < 0.30:
flag = False
goal_ls = [x, y, z]
goals.append(goal_ls)
ut.save_pickle(goals, './goal_positions.pkl')
runner.run_foliage_reach(goal_ls, data['q_configs'], i)
| StarcoderdataPython |
90357 | <filename>macord/bot.py
import aiohttp
import asyncio
import json
import requests
from typing import Any, Callable
from .message import *
class Bot(object):
def __init__(self, token: str) -> None:
self.__token: str = token
self.__heartbeat_interval: float = 0.0
self.__gateway_url: str = None
self.__ws: aiohttp.ClientWebSocketResponse = None
self.__message_create_callback: Callable[[Bot, Message], Any] = None
self.__message_update_callback: Callable[[Bot, Message], Any] = None
def run(self):
resp = requests.get(
'https://discord.com/api/v9/gateway/bot',
headers={
"Authorization": "Bot " + self.__token
}
)
if resp.status_code != 200:
raise requests.RequestException('failed to get gateway url')
resp_json = resp.json()
if 'url' not in resp_json:
raise KeyError('invalid response when get gateway url')
self.__gateway_url = resp_json['url'] + "?v=9&encoding=json"
try:
asyncio.run(self.__run())
except KeyboardInterrupt:
print("EXIT!!!")
def on_message_create(self, callback: Callable[['Bot', Message], Any]):
self.__message_create_callback = callback
def on_message_update(self, callback: Callable[['Bot', Message], Any]):
self.__message_update_callback = callback
def send_message(self, channel_id: str, message: MessageSend) -> Message:
resp = requests.post(
f"https://discord.com/api/v9/channels/{channel_id}/messages",
data=message.to_json(),
headers={
"Authorization": "Bot " + self.__token
}
)
if resp.status_code != 200:
raise requests.RequestException('failed to send message')
resp_json = resp.json()
return Message(resp_json)
async def __heartbeat(self):
heartbeat_payload = {"op": 1, "d": None}
while True:
await asyncio.sleep(self.__heartbeat_interval / 1000)
await self.__ws.send_json(heartbeat_payload)
async def __run(self):
session = aiohttp.ClientSession()
self.__ws = await session.ws_connect(self.__gateway_url)
resp = await self.__ws.receive_json()
if 'op' not in resp or resp['op'] != 10 or 'd' not in resp or 'heartbeat_interval' not in resp['d']:
raise KeyError('invalid response when connected to gateway')
self.__heartbeat_interval = resp['d']['heartbeat_interval']
heartbeat_task: asyncio.Task = asyncio.create_task(self.__heartbeat())
await self.__ws.send_json({
"op": 2,
"d": {
"token": <PASSWORD>.__token,
"intents": 513,
"properties": {
"$os": "linux",
"$browser": "disco",
"$device": "pc"
}
}
})
try:
while True:
resp = await self.__ws.receive()
if resp.type == aiohttp.WSMsgType.TEXT:
respJson = resp.json()
if respJson['op'] == 0:
if respJson['t'] == 'MESSAGE_CREATE' and self.__message_create_callback != None:
self.__message_create_callback(self, Message(respJson['d']))
elif respJson['t'] == 'MESSAGE_UPDATE' and self.__message_update_callback != None:
self.__message_update_callback(self, Message(respJson['d']))
except:
pass
heartbeat_task.cancel()
await self.__ws.close()
await session.close()
| StarcoderdataPython |
62951 | <gh_stars>1-10
# Copyright (C) 2013-2015 Ragpicker Developers.
# This file is part of Ragpicker Malware Crawler - http://code.google.com/p/malware-crawler/
from yapsy.IPlugin import IPlugin
from core.abstracts import Report
class MySQL(IPlugin, Report):
"""Stores data from long-run analysis in MySQL."""
def run(self, results, objfile):
# Import muss hier stehen, sonst kommt es bei Konfiguration ohne Mysql zum Fehler
from core.databaseMysql import DatabaseMySQL
"""Writes report.
@param results: analysis results dictionary.
@param objfile: file object
"""
database = DatabaseMySQL()
print "mysql.py Methode Run"
"""
# Count query using URL hash and file hash
count = database.countRagpickerDB(results["Info"]["file"]["md5"], results["Info"]["url"]["md5"])
# If report available for the file and url -> not insert
if count == 0:
# Create a copy of the dictionary. This is done in order to not modify
# the original dictionary and possibly compromise the following
# reporting modules.
report = dict(results)
# Store the report
database.insertRagpickerDB(report)
"""
def deleteAll(self):
"""Deletes all reports.
"""
print "mysql.py Methode DeleteAll"
"""
# Alle Ragpicker-Daten aus der MongoDB loeschen
count = Database().deleteRagpickerDB()
print "*** MongoDB (Ragpicker)***"
print "deleted documents:" + str(count)
print ""
""" | StarcoderdataPython |
4804383 | """
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
from test.unit.rules import BaseRuleTestCase
from cfnlint.rules.resources.properties.ListDuplicatesAllowed import ListDuplicatesAllowed # pylint: disable=E0401
class TestListDuplicatesAllowed(BaseRuleTestCase):
"""Test Allowed Value Property Configuration"""
def setUp(self):
"""Setup"""
super(TestListDuplicatesAllowed, self).setUp()
self.collection.register(ListDuplicatesAllowed())
self.success_templates = [
'test/fixtures/templates/good/resources/properties/list_duplicates_allowed.yaml'
]
def test_file_positive(self):
"""Test Positive"""
self.helper_file_positive()
def test_file_negative(self):
"""Test failure"""
self.helper_file_negative(
'test/fixtures/templates/bad/resources/properties/list_duplicates_allowed.yaml', 3)
| StarcoderdataPython |
106531 | import torch.nn as nn
import torch
import numpy as np
class VNet(nn.Module):
def __init__(self, nb_classes, in_channels=1, depth=5,
start_filters=16, batchnorm=True, mode="AE", input_size=None):
assert mode in ['AE', 'classifier'], "Unknown mode selected, currently supported are: 'AE' and 'classifier'"
if mode == 'classifier' and (input_size is None or len(input_size) != 3):
raise ValueError('The input size must be set as HxWxD')
super(VNet, self).__init__()
self.mode = mode
self.input_size = input_size
self.nb_classes = nb_classes
self.in_channels = in_channels
self.start_filters = start_filters
if self.mode == "AE":
self.up = []
self.down = []
nconvs = [min(cnt+1, 3) for cnt in range(depth)] # Nb of convs in each Down module
# Create the encoder pathway
for cnt in range(depth):
in_channels = self.in_channels if cnt == 0 else out_channels
out_channels = self.start_filters * (2 ** cnt)
dconv = False if cnt == 0 else True # apply a down conv ?
self.down.append(
Down(in_channels, out_channels,
nconv=nconvs[cnt], dconv=dconv,
batchnorm=batchnorm))
if self.mode == "AE":
# Create the decoder pathway
# - careful! decoding only requires depth-1 blocks
for cnt in range(depth - 1):
in_channels = out_channels
out_channels = in_channels // 2
self.up.append(
Up(in_channels, out_channels,
nconv=nconvs[-1-cnt],
batchnorm=batchnorm))
# Add the list of modules to current module
self.down = nn.ModuleList(self.down)
if self.mode == "AE":
self.up = nn.ModuleList(self.up)
# Get ouptut segmentation
if self.mode == "AE":
self.final_layer = nn.Conv3d(out_channels, self.nb_classes, kernel_size=1, groups=1, stride=1)
else: # Classification
(h, w, d) = np.array(self.input_size) // 2**(depth -1)
self.final_layer = nn.Sequential(
nn.Linear(out_channels*h*w*d, 128),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(128, 128),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(128, self.nb_classes))
# Weight initialization
self.weight_initializer()
def weight_initializer(self):
for module in self.modules():
if isinstance(module, nn.ConvTranspose3d) or isinstance(module, nn.Conv3d):
nn.init.xavier_normal_(module.weight)
nn.init.constant_(module.bias, 0)
elif isinstance(module, nn.BatchNorm2d):
nn.init.constant_(module.weight, 1)
nn.init.constant_(module.bias, 0)
elif isinstance(module, nn.Linear):
nn.init.normal_(module.weight, 0, 0.01)
nn.init.constant_(module.bias, 0)
def forward(self, x):
encoder_outs = []
for module in self.down:
x = module(x)
encoder_outs.append(x)
encoder_outs = encoder_outs[:-1][::-1]
for cnt, module in enumerate(self.up):
x_up = encoder_outs[cnt]
x = module(x, x_up)
x = self.final_layer(x)
return x
class LUConv(nn.Module):
def __init__(self, in_channels, out_channels,
kernel_size=5, stride=1, padding=0,
batchnorm=True, bias=True, mode="conv"):
super(LUConv, self).__init__()
if mode == "conv": # Usual Conv
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias)
elif mode == "transpose": # UpConv
self.conv = nn.ConvTranspose3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias)
if batchnorm:
self.bn = nn.BatchNorm3d(out_channels)
self.relu = nn.ReLU(True)
self.ops = nn.Sequential(self.conv, self.bn, self.relu)
def forward(self, x):
x = self.ops(x)
return x
class NConvs(nn.Module):
def __init__(self, in_channels, out_channels, nconv=3,
kernel_size=5, stride=1, padding=0,
batchnorm=True, bias=True, mode="conv"):
super(NConvs, self).__init__()
self.ops = nn.Sequential(LUConv(in_channels, out_channels, kernel_size, stride, padding, batchnorm, bias, mode),
*[LUConv(out_channels, out_channels, kernel_size, stride, padding, batchnorm, bias, mode)
for _ in range(nconv-1)])
def forward(self, x):
x = self.ops(x)
return x
class Down(nn.Module):
def __init__(self, in_channels, out_channels, nconv=3, dconv=True, batchnorm=True):
super(Down, self).__init__()
self.dconv = dconv
self.in_channels = in_channels
if dconv:
self.down_conv = NConvs(in_channels, out_channels, 1, kernel_size=2, stride=2, batchnorm=batchnorm)
self.nconvs = NConvs(out_channels, out_channels, nconv, kernel_size=5, stride=1,
padding=2, batchnorm=batchnorm)
else:
self.nconvs = NConvs(in_channels, out_channels, nconv,
kernel_size=5, stride=1, padding=2, batchnorm=batchnorm)
def forward(self, x):
if self.dconv:
x_down = self.down_conv(x)
else:
x_down = x
x_out = self.nconvs(x_down)
# Add the input in order to learn only the residual
if self.in_channels == 1 or self.dconv:
x = x_out + x_down
else:
x = x_out
return x
class Up(nn.Module):
def __init__(self, in_channels, out_channels, nconv=3, batchnorm=True):
super(Up, self).__init__()
self.up_conv = NConvs(in_channels, out_channels, 1, kernel_size=2, stride=2, batchnorm=batchnorm, mode="transpose")
self.nconvs = NConvs(in_channels, out_channels, nconv, kernel_size=5, stride=1, padding=2, batchnorm=batchnorm)
def forward(self, x_down, x_up):
x_down = self.up_conv(x_down)
xcat = torch.cat((x_up, x_down), dim=1)
x = self.nconvs(xcat)
x = x + x_down
return x
| StarcoderdataPython |
87624 | import os
config = {
'project_path': os.getcwd() + '/../openfoam/run/Airfoil2D_full/'
} | StarcoderdataPython |
7741 |
import torch
import logging
# Transformer version 4.9.1 - Newer versions may not work.
from transformers import AutoTokenizer
from trained_gpt_model import get_inference2
def t5_supp_inference(review_text):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check.
# device = torch.device('cpu')
print('Using device:' + str(device))
PRETRAINED_MODEL = 't5-base'
SEQ_LENGTH = 600
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<answer>', '<context>']}
)
model = torch.load("../trained_models/t5_model_hotpot_supporting_facts_last.pth")
model.eval()
encoded_text = tokenizer(
review_text,
padding=True,
max_length=SEQ_LENGTH,
truncation=True,
return_tensors="pt"
).to(device)
input_ids = encoded_text['input_ids']
with torch.no_grad():
output = model.generate(input_ids)
decoded_string = tokenizer.decode(output[0], skip_special_tokens=True)
logging.debug("Decoded string" + decoded_string)
print(decoded_string)
# device.empty_cache()
del model
del tokenizer
return decoded_string
def t5_full_inference(review_text):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check.
# device = torch.device('cpu')
print('Using device:' + str(device))
PRETRAINED_MODEL = 't5-base'
SEQ_LENGTH = 600
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<answer>', '<context>']}
)
model = torch.load("../trained_models/t5_model_hotpot_full_context_last.pth")
model.eval()
encoded_text = tokenizer(
review_text,
padding=True,
max_length=SEQ_LENGTH,
truncation=True,
return_tensors="pt"
).to(device)
input_ids = encoded_text['input_ids']
with torch.no_grad():
output = model.generate(input_ids)
decoded_string = tokenizer.decode(output[0], skip_special_tokens=True)
logging.debug("Decoded string" + decoded_string)
print(decoded_string)
# device.empty_cache()
del model
del tokenizer
return decoded_string
def bart_supp_inference(review_text):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check.
# device = torch.device('cpu')
print('Using device:' + str(device))
PRETRAINED_MODEL = 'facebook/bart-base'
SEQ_LENGTH = 600
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<answer>', '<context>']}
)
model = torch.load("../trained_models/bart_model_hotpot_supporting_facts_last.pth")
model.eval()
encoded_text = tokenizer(
review_text,
padding=True,
max_length=SEQ_LENGTH,
truncation=True,
return_tensors="pt"
).to(device)
input_ids = encoded_text['input_ids']
with torch.no_grad():
output = model.generate(input_ids)
decoded_string = tokenizer.decode(output[0], skip_special_tokens=True)
logging.debug("Decoded string" + decoded_string)
print(decoded_string)
# device.empty_cache()
del model
del tokenizer
return decoded_string
def bart_full_inference(review_text):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check.
# device = torch.device('cpu')
print('Using device:' + str(device))
PRETRAINED_MODEL = 'facebook/bart-base'
SEQ_LENGTH = 600
tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<answer>', '<context>']}
)
model = torch.load("../trained_models/bart_model_hotpot_full_context_last.pth")
model.eval()
encoded_text = tokenizer(
review_text,
padding=True,
max_length=SEQ_LENGTH,
truncation=True,
return_tensors="pt"
).to(device)
input_ids = encoded_text['input_ids']
with torch.no_grad():
output = model.generate(input_ids)
decoded_string = tokenizer.decode(output[0], skip_special_tokens=True)
logging.debug("Decoded string" + decoded_string)
print(decoded_string)
# device.empty_cache()
del model
del tokenizer
return decoded_string
# if __name__ == "__main__":
# review_text = "<answer> a fusional language <context> Typologically, Estonian represents a transitional form from an agglutinating language to a fusional language. The canonical word order is SVO (subject–verb–object)."
# t5_supp_inference(review_text, md2, device)
def get_inference(answer, context, model_name):
valuation_text = "<answer> " + answer + " <context> " + context
if model_name == 't5_supp':
return t5_supp_inference(valuation_text)
elif model_name == 't5_full':
return t5_full_inference(valuation_text)
elif model_name == 'bart_supp':
return bart_supp_inference(valuation_text)
elif model_name == 'bart_full':
return bart_full_inference(valuation_text)
elif model_name == 'gpt2':
return get_inference2(answer, context)
| StarcoderdataPython |
120099 | <gh_stars>1-10
from threading import Thread
from time import sleep
from os import _exit as kill
from _thread import interrupt_main as terminate
import logging
_log = logging.getLogger(__name__)
class Watchdog(object):
def __init__(self, timeout_seconds):
# type: (float) -> Watchdog
"""
A watchdog service that shuts down the application, should it become unresponsive.
The application must call Watchdog.alive() at least every timeout_seconds to signal
it's alive. Failing to do so will prompt the watchdog to shut down all threads
and then kill the application.
Recommended usage is inside a "with" block.
"""
self._interval = timeout_seconds
self._thread = None
self._alive_flag = True
self._do_watch = False
def is_watching(self):
# type: () -> bool
return self._do_watch or self._thread is not None
def start(self):
if self.is_watching():
return
_log.debug('starting')
self._do_watch = True
self._thread = Thread(target=self._watch)
self._thread.start()
_log.debug('started')
def stop(self):
if not self.is_watching():
return
_log.debug('stopping')
self._do_watch = False
self._thread.join(self._interval * 2)
self._thread = None
_log.debug('stopped')
def alive(self):
self._alive_flag = True
_log.debug('got alive signal')
def _watch(self):
while self._do_watch:
sleep(self._interval)
if self._alive_flag:
self._reset_alive()
else:
_log.debug('no alive signal received for more than ' + str(self._interval) + ' seconds')
self.shutdown()
def _reset_alive(self):
_log.debug('resetting alive flag')
self._alive_flag = False
def shutdown(self):
_log.debug('terminating threads')
terminate()
# give the threads time to shutdown gracefully
sleep(self._interval * 2)
_log.debug('kill')
kill(1)
def __enter__(self):
self.start()
return self # important
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
| StarcoderdataPython |
1703987 | from typing import AsyncIterable, Iterable, Any, List
from os import scandir, DirEntry, stat_result
from .wrap import to_thread
class EntryWrapper:
__slots__ = "entry",
def __init__(self, entry: DirEntry):
self.entry = entry
def __getattr__(self, attr: str) -> Any:
return getattr(self.entry, attr)
def __repr__(self) -> str:
name = type(self).__name__
return f"{name}<{self.entry}>"
async def inode(self) -> int:
return await to_thread(self.entry.inode)
async def is_dir(self, *, follow_symlinks: bool = True) -> bool:
return await to_thread(self.entry.is_dir, follow_symlinks=follow_symlinks)
async def is_file(self, *, follow_symlinks: bool = True) -> bool:
return await to_thread(self.entry.is_file, follow_symlinks=follow_symlinks)
async def is_symlink(self) -> bool:
return await to_thread(self.entry.is_symlink)
async def stat(self, *, follow_symlinks: bool = True) -> stat_result:
return await to_thread(self.entry.stat, follow_symlinks=follow_symlinks)
def wrapped_scandir(*args, **kwargs) -> Iterable[EntryWrapper]:
entries = scandir(*args, **kwargs)
yield from map(EntryWrapper, entries)
def _scandir_results(*args, **kwargs) -> List[EntryWrapper]:
return list(wrapped_scandir(*args, **kwargs))
async def scandir_async(*args, **kwargs) -> AsyncIterable[EntryWrapper]:
results = await to_thread(_scandir_results, *args, **kwargs)
for result in results:
yield result | StarcoderdataPython |
101187 | <reponame>KenWoo/Algorithm<filename>Algorithms/Easy/1309. Decrypt String from Alphabet to Integer Mapping/answer.py<gh_stars>0
from typing import List
class Solution:
def freqAlphabets(self, s: str) -> str:
dict = {}
for i in range(1, 10):
dict[str(i)] = chr(97+i-1)
for i in range(10, 27):
dict[str(i)+'#'] = chr(97+i-1)
N = len(s)
i = 0
res = []
while i < N:
if s[i] != '#':
if i == N - 1:
prev = i
while s[prev] != '#' and prev > -1:
prev -= 1
for j in range(prev+1, N):
res.append(dict[s[j]])
else:
prev = i-1
while s[prev] != '#' and prev > -1:
prev -= 1
for j in range(prev+1, i-2):
res.append(dict[s[j]])
res.append(dict[s[i-2: i+1]])
i += 1
return ''.join(res)
if __name__ == "__main__":
s = Solution()
result = s.freqAlphabets(
"10#11#12")
print(result)
| StarcoderdataPython |
3363839 | <reponame>BenjaminAllardEngineer/Adversarial-Attacks-on-Neural-Networks
# Generate adversarial examples for the FC model
# Save some of them in a file
filename = 'data/pickle/fc_adv_examples'
# Create and export 200 adversarial examples for later with epsilon=0.05
fc_model = torch.load(model_2_file)
acc, ex = test_attack_bis(fc_model, test_loader, 0.05, size_limit=200)
with open('data/pickle/fc_adv_examples', 'wb') as file:
pickle.dump(ex, file) | StarcoderdataPython |
1755228 | <gh_stars>1-10
"""
============
Mirai 配置
============
"""
from pydantic import BaseModel
class Config(BaseModel):
"""
Mirai 配置类,将在适配器被加载时被混入到机器人主配置中。
"""
__config_name__ = 'mirai'
"""
配置名称。
"""
adapter_type: str = 'ws'
"""
适配器类型,需要和 mirai-api-http 配置相同。
:type: str
"""
host: str = '127.0.0.1'
"""
本机域名。
:type: str
"""
port: int = 8080
"""
监听的端口。
:type: int
"""
url: str = '/mirai/ws'
"""
WebSocket 路径,需要和 mirai-api-http 配置相同。
:type: str
"""
api_timeout: int = 1000
"""
进行 API 调用时等待返回响应的超时时间。
:type: int
"""
verify_key: str = ''
"""
建立连接时的认证密钥,需要和 mirai-api-http 配置中的 verifyKey 相同,如果关闭验证则留空。
:type: str
"""
qq: int = 10001
"""
机器人的 QQ 号码,必须指定。
:type: int
"""
| StarcoderdataPython |
193331 | from setuptools import setup
import sys
import os
import re
IS_PY_2 = (sys.version_info[0] <= 2)
def read_readme():
with open('README.md') as f:
return f.read()
def read_version():
# importing gpustat causes an ImportError :-)
__PATH__ = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(__PATH__, 'gpustat.py')) as f:
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
f.read(), re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find __version__ string")
install_requires = [
'six',
'nvidia-ml-py>=7.352.0' if IS_PY_2 else \
'nvidia-ml-py3>=7.352.0',
'psutil',
'blessings>=1.6',
]
tests_requires = [
'mock>=2.0.0',
'nose',
'nose-cover3'
]
setup(
name='gpustat',
version=read_version(),
license='MIT',
description='An utility to monitor NVIDIA GPU status and usage',
long_description=read_readme(),
url='https://github.com/wookayin/gpustat',
author='<NAME>',
author_email='<EMAIL>',
keywords='nvidia-smi gpu cuda monitoring gpustat',
classifiers=[
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: System :: Monitoring',
],
#packages=['gpustat'],
py_modules=['gpustat'],
install_requires=install_requires,
extras_require={'test': tests_requires},
tests_require=tests_requires,
test_suite='nose.collector',
entry_points={
'console_scripts': ['gpustat=gpustat:main'],
},
include_package_data=True,
zip_safe=False,
)
| StarcoderdataPython |
3293796 | from Colors_Initialize import color_lookup
def color_pair_to_string(major_color, minor_color):
return f'{major_color}\t\t{minor_color}'
def console_print_ref_manual():
print('################# Reference Manual #################')
print('Major Color\tMinor Color\tPair Number')
pair_id = 1
for major_color in color_lookup().MAJOR_COLORS:
for minor_color in color_lookup().MINOR_COLORS:
print(f'{color_pair_to_string(major_color, minor_color)}\t\t{pair_id}')
pair_id += 1 | StarcoderdataPython |
1695220 | <filename>gapid_tests/command_buffer_tests/vkCmdSetStencilReference_test/vkCmdSetStencilReference.py
# Copyright 2017 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gapit_test_framework import gapit_test, require, require_equal
from gapit_test_framework import require_not_equal, little_endian_bytes_to_int
from gapit_test_framework import GapitTest, get_read_offset_function
from gapit_test_framework import GapidUnsupportedException
from vulkan_constants import *
@gapit_test("vkCmdSetStencilReference_test")
class SetStencilReference(GapitTest):
def expect(self):
first_set_stencil = require(
self.next_call_of("vkCmdSetStencilReference"))
require_not_equal(0, first_set_stencil.int_commandBuffer)
require_equal(VK_STENCIL_FACE_FRONT_BIT,
first_set_stencil.int_faceMask)
require_equal(0, first_set_stencil.int_reference)
second_set_stencil = require(
self.next_call_of("vkCmdSetStencilReference"))
require_not_equal(0, second_set_stencil.int_commandBuffer)
require_equal(VK_STENCIL_FACE_BACK_BIT,
second_set_stencil.int_faceMask)
require_equal(10, second_set_stencil.int_reference)
third_set_stencil = require(
self.next_call_of("vkCmdSetStencilReference"))
require_not_equal(0, third_set_stencil.int_commandBuffer)
require_equal(VK_STENCIL_FRONT_AND_BACK,
third_set_stencil.int_faceMask)
require_equal(0xFFFFFFFF, third_set_stencil.int_reference)
| StarcoderdataPython |
1756280 | import numpy as np
class Statistic:
def __init__(self, data=None):
if data is not None:
self._data = list(data)
else:
self._data = []
def append(self, value):
self._data.append(value)
def extend(self, data):
self._data.extend(data)
def mean(self):
if self.empty:
raise ValueError('no data')
return self.asarray().mean()
def std(self):
if self.empty:
raise ValueError('no data')
return self.asarray().std()
def var(self):
if self.empty:
raise ValueError('no data')
return self.asarray().var()
def moment(self, k):
n = len(self)
if n == 0:
raise ValueError('no data')
if np.abs(np.round(k) - k) > 0 or k <= 0:
raise ValueError('positive integer expected')
return sum((x ** k for x in self._data)) / n
def lag(self, k):
n = len(self)
if n == 0:
raise ValueError('no data')
if np.abs(np.round(k) - k) > 0 or k < 0:
raise ValueError('non-negative integer expected')
if n <= k:
raise ValueError('statistic has too few samples')
ar = self.asarray()
if k == 0:
return 1
return np.corrcoef(ar[k:], ar[:-k])[0, 1]
def __len__(self):
return len(self._data)
@property
def empty(self):
return len(self._data) == 0
def as_list(self):
return list(self._data)
def as_tuple(self):
return tuple(self._data)
def asarray(self):
return np.asarray(self._data)
def pmf(self):
values = {}
for v in self._data:
if v not in values:
values[v] = 1
else:
values[v] += 1
return values
class Trace:
def __init__(self, data=None, mode='auto'):
if data is not None:
try:
valid_as_samples = all(len(item) == 2 for item in data)
valid_as_split = len(data) == 2 and len(data[0]) == len(data[1])
except TypeError as e:
raise ValueError('wrong data shape') from e
else:
if not valid_as_samples and not valid_as_split:
raise ValueError('wrong data shape')
if (mode == 'auto' and valid_as_samples) or mode == 'samples':
_data = [(t, v) for (t, v) in data]
elif mode in {'auto', 'split'}:
_data = [(t, v) for t, v in zip(*data)]
else:
raise ValueError('invalid mode')
ar = np.asarray(_data).transpose()[0]
if np.any((ar[1:] - ar[:-1]) < 0):
raise ValueError('data must be ordered by time')
self._data = _data
else:
self._data = []
def record(self, t, v):
if self._data and t < self._data[-1][0]:
raise ValueError('adding data in past prohibited')
self._data.append((t, v),)
@property
def empty(self):
return len(self._data) == 0
def __len__(self):
return len(self._data)
def pmf(self):
if self.empty:
raise ValueError('expected non-empty values')
values = {}
for i in range(0, len(self._data) - 1):
v, dt = self._data[i][1], self._data[i + 1][0] - self._data[i][0]
if self._data[i][1] not in values:
values[v] = dt
else:
values[v] += dt
total_time = sum(values.values())
values = {v: t / total_time for v, t in values.items()}
return values
def timeavg(self):
return sum(v * p for v, p in self.pmf().items())
def _convert(self, fn, mode):
if mode == 'samples':
return fn(fn([t, v]) for (t, v) in self._data)
elif mode == 'split':
timestamps, values = [], []
for (t, v) in self._data:
timestamps.append(t)
values.append(v)
if timestamps:
return fn([fn(timestamps), fn(values)])
return fn()
else:
raise ValueError('invalid mode')
def as_list(self, mode='samples'):
return self._convert(list, mode)
def as_tuple(self, mode='samples'):
return self._convert(tuple, mode)
def asarray(self, mode='samples'):
return np.asarray(self.as_list(mode))
class Intervals:
def __init__(self, timestamps=None):
if timestamps:
_timestamps = [0] + list(timestamps)
try:
_zipped = zip(_timestamps[:-1], _timestamps[1:])
if any(x > y for x, y in _zipped):
raise ValueError('timestamps must be ascending')
except TypeError as e:
raise TypeError('only numeric values expected') from e
self._timestamps = [0] + list(timestamps)
else:
self._timestamps = [0]
@property
def last(self):
return self._timestamps[-1]
@property
def empty(self):
return len(self._timestamps) == 1
def __len__(self):
return len(self._timestamps) - 1
def record(self, timestamp):
try:
if timestamp < self.last:
raise ValueError('prohibited timestamps from past')
except TypeError as e:
raise TypeError('only numeric values expected') from e
self._timestamps.append(timestamp)
def statistic(self):
return Statistic(self.as_tuple())
def as_tuple(self):
ar = np.asarray(self._timestamps)
return tuple(ar[1:] - ar[:-1])
def as_list(self):
return list(self.as_tuple())
| StarcoderdataPython |
4840096 | <filename>python/paddle/fluid/tests/unittests/test_unique_name.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle.fluid as fluid
class TestUniqueName(unittest.TestCase):
def test_guard(self):
with fluid.unique_name.guard():
name_1 = fluid.unique_name.generate('')
with fluid.unique_name.guard():
name_2 = fluid.unique_name.generate('')
self.assertEqual(name_1, name_2)
with fluid.unique_name.guard("A"):
name_1 = fluid.unique_name.generate('')
with fluid.unique_name.guard('B'):
name_2 = fluid.unique_name.generate('')
self.assertNotEqual(name_1, name_2)
def test_generate(self):
with fluid.unique_name.guard():
name1 = fluid.unique_name.generate('fc')
name2 = fluid.unique_name.generate('fc')
name3 = fluid.unique_name.generate('tmp')
self.assertNotEqual(name1, name2)
self.assertEqual(name1[-2:], name3[-2:])
| StarcoderdataPython |
3362410 | <gh_stars>0
from rest_framework import status
from rest_framework.authentication import BasicAuthentication, SessionAuthentication
from rest_framework.generics import RetrieveAPIView, ListAPIView
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.response import Response
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rates.api.serializers import FiatRateListSerializer
from rates.models import FiatRate
class FiatRateAPIView(RetrieveAPIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
authentication_classes = [BasicAuthentication, SessionAuthentication, JSONWebTokenAuthentication]
def get(self, request):
try:
user_profile_obj = Profile.objects.get(user=request.user)
user_country = FiatRate.objects.get(country=user_profile_obj.country)
status_code = status.HTTP_200_OK
response = {
'success': True,
'status code': status_code,
'message': 'User Fiat Fetched',
'data': [{
'updated': user_country.updated,
'timestamp': user_country.timestamp,
'country': user_profile_obj.get_country(),
'dollar_rate': user_country.dollar_rate
}]
}
except Exception as e:
user_profile_obj = Profile.objects.get(user=request.user)
user_country = FiatRate.objects.get(country='United States Of America')
status_code = status.HTTP_200_OK
response = {
'success': True,
'status code': status_code,
'message': 'User Fiat Fetched',
'data': [{
'updated': user_country.updated,
'timestamp': user_country.timestamp,
'country': user_profile_obj.get_country(),
'dollar_rate': user_country.dollar_rate
}]
}
return Response(response, status=status_code)
class FiatListView(ListAPIView):
authentication_classes = [BasicAuthentication, SessionAuthentication, JSONWebTokenAuthentication]
serializer_class = FiatRateListSerializer
queryset = FiatRate.objects.all()
permission_classes = (IsAuthenticatedOrReadOnly,)
paginate_by = 15 | StarcoderdataPython |
1620945 | <gh_stars>0
from pyglet import image
import os, sys
base = os.getcwd() + "/Assets/"
icon = image.load(base + 'icon.png')
mario_img = image.load(base + 'mario.png')
luigi_img = image.load(base + 'luigi.png')
| StarcoderdataPython |
1651997 | <reponame>Inch4Tk/label_server<filename>flask_label/api.py
import json
import os
import random
import xml.etree.ElementTree as ET
from xml.dom import minidom
import tensorflow as tf
from flask import (
Blueprint, current_app, send_from_directory, jsonify, request
)
from object_detection.utils import dataset_util
from annotation_predictor import accept_prob_predictor
from annotation_predictor.util.class_reader import ClassReader
from annotation_predictor.util.send_accept_prob_request import send_accept_prob_request
from annotation_predictor.util.util import compute_feature_vector
from flask_label.auth import api_login_required
from flask_label.database import db
from flask_label.models import (
ImageTask, ImageBatch, VideoBatch, image_batch_schema, video_batch_schema, image_task_schema
)
from object_detector import train_od_model
from object_detector.send_od_request import send_od_request
from object_detector.util import parse_class_ids_json_to_pbtxt, update_number_of_classes
from settings import known_class_ids_annotation_predictor, \
class_ids_od, path_to_od_train_record
bp = Blueprint("api", __name__,
url_prefix="/api")
def batch_statistics(batch):
"""
Computes the total number of task and the number of labeled tasks in a batch.
Args:
batch: identifier of a batch
Returns: total number of tasks in batch, number of labeled tasks in batch
"""
lc = 0
for task in batch["tasks"]:
if task["is_labeled"]:
lc += 1
return len(batch["tasks"]), lc
def get_path_to_image(img_id: int):
"""
Computes the path to an image in the instance directory .
Args:
img_id: identifier of an image
Returns: absolute path to image
"""
img_task = None
while img_task is None:
img_task = ImageTask.query.filter_by(id=img_id).first()
img_path = os.path.join(
current_app.instance_path,
current_app.config["IMAGE_DIR"],
img_task.batch.dirname,
img_task.filename
)
return img_path
def get_path_to_label(img_id: int):
"""
Computes the path to a label belonging to an image in the instance directory .
Args:
img_id: identifier of an image
Returns: absolute path to label
"""
img_task = None
while img_task is None:
img_task = ImageTask.query.filter_by(id=img_id).first()
path = os.path.join(
current_app.instance_path,
current_app.config['IMAGE_DIR'],
img_task.batch.dirname,
current_app.config['IMAGE_LABEL_SUBDIR'],
img_task.filename
)
base = os.path.splitext(path)[0]
path = base + '.xml'
return path
def get_path_to_prediction(img_id: int):
"""
Computes the path to a prediction belonging to an image in the instance directory .
Args:
img_id: identifier of an image
Returns: absolute path to prediction
"""
img_task = None
while img_task is None:
img_task = ImageTask.query.filter_by(id=img_id).first()
pred_dir_path = os.path.join(
current_app.instance_path,
current_app.config["IMAGE_DIR"],
img_task.batch.dirname,
current_app.config['IMAGE_PREDICTIONS_SUBDIR'],
img_task.filename
)
base = os.path.splitext(pred_dir_path)[0]
pred_path = base + '.json'
return pred_path
def read_labels_from_xml(path):
"""
Reads in an xml-file containing labels for an image and transforms it to a json.
Returns:
width: width of the respective image of the label
height: height of the respective image of the label
classes: classes of annotated objects
boxes: position of annotated objects
"""
width = '-1'
height = '-1'
classes = []
boxes = []
if os.path.exists(path):
tree = ET.parse(path)
root = tree.getroot()
for name in root.findall('./size/width'):
width = name.text
for name in root.findall('./size/height'):
height = name.text
for name in root.findall('./object/name'):
classes.append(name.text)
for i, xmin in enumerate(root.findall('./object/bndbox/xmin')):
boxes.append([])
boxes[i].append(int(xmin.text, 10))
for i, ymin in enumerate(root.findall('./object/bndbox/ymin')):
boxes[i].append(int(ymin.text, 10))
for i, xmax in enumerate(root.findall('./object/bndbox/xmax')):
boxes[i].append(int(xmax.text, 10))
for i, ymax in enumerate(root.findall('./object/bndbox/ymax')):
boxes[i].append(int(ymax.text, 10))
return width, height, classes, boxes
def save_labels_to_xml(data, path):
"""
Save labels in data to xml-file specified by path or deletes the file, when data is empty.
Args:
data: dict containing the label data
path: path to xml-file where the labels will be saved
"""
classes = data['classes']
boxes = data['boxes']
width = data['width']
height = data['height']
if len(classes) != 0:
root = ET.Element('annotation')
size = ET.SubElement(root, 'size')
ET.SubElement(size, 'width').text = str(width)
ET.SubElement(size, 'height').text = str(height)
for i, c in enumerate(classes):
obj = ET.SubElement(root, 'object')
ET.SubElement(obj, 'name').text = c
box = ET.SubElement(obj, 'bndbox')
ET.SubElement(box, 'xmin').text = str(round(boxes[i][0]))
ET.SubElement(box, 'ymin').text = str(round(boxes[i][1]))
ET.SubElement(box, 'xmax').text = str(round(boxes[i][2]))
ET.SubElement(box, 'ymax').text = str(round(boxes[i][3]))
rough_str = ET.tostring(root)
pretty_str = minidom.parseString(rough_str).toprettyxml(indent=" ")
with open(path, 'w') as f:
f.write(pretty_str)
elif os.path.exists(path):
os.remove(path)
def create_tf_example(example: list):
"""
Creates a tf.train.Example object from an image and its labels which can be used in
the training pipeline for the object detector.
Args:
example: list containing information about the image and its labels.
Returns: information of example parsed into a tf.train.Example object
"""
width = int(example[0])
height = int(example[1])
filename = str.encode(example[2])
with tf.gfile.GFile(example[3], 'rb') as f:
encoded_image_data = bytes(f.read())
image_format = b'jpg'
boxes = example[5]
xmins = []
ymins = []
xmaxs = []
ymaxs = []
for b in boxes:
xmins.append(b[0])
ymins.append(b[1])
xmaxs.append(b[2])
ymaxs.append(b[3])
xmins = [x / width for x in xmins]
xmaxs = [x / width for x in xmaxs]
ymins = [y / height for y in ymins]
ymaxs = [y / height for y in ymaxs]
class_reader = ClassReader(known_class_ids_annotation_predictor)
classes_text = example[4][:]
classes = []
none_vals = []
for i, cls in enumerate(classes_text):
if cls is None:
none_vals.append(i)
for index in sorted(none_vals, reverse=True):
classes_text.pop(index)
xmins.pop(index)
ymins.pop(index)
xmaxs.pop(index)
ymaxs.pop(index)
for i, cls in enumerate(classes_text):
classes.append(class_reader.get_index_of_class_from_label(cls))
class_encoded = str.encode(cls)
classes_text[i] = class_encoded
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(filename),
'image/source_id': dataset_util.bytes_feature(filename),
'image/encoded': dataset_util.bytes_feature(encoded_image_data),
'image/format': dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
}))
return tf_example
@bp.route("/batches/")
@api_login_required
def batches():
"""Return all image and video directories and their stats."""
img_batches = ImageBatch.query.options(db.joinedload('tasks')).all()
video_batches = VideoBatch.query.all()
image_batch_data = image_batch_schema.dump(img_batches, many=True)
video_batch_data = video_batch_schema.dump(video_batches, many=True)
# Add postprocessing info about statistics
for batch in image_batch_data:
batch["imgCount"], batch["labeledCount"] = batch_statistics(batch)
return jsonify({"imageBatches": image_batch_data, "videoBatches": video_batch_data})
@bp.route("/img_batch/<int:batch_id>")
@api_login_required
def img_batch(batch_id):
"""Return data to a single image batch"""
img_batch = ImageBatch.query.filter_by(id=batch_id).options(db.joinedload('tasks')).first()
batch = image_batch_schema.dump(img_batch)
batch["imgCount"], batch["labeledCount"] = batch_statistics(batch)
return jsonify(batch)
@bp.route("/img_task/<int:task_id>")
@api_login_required
def image_task(task_id):
"""Return data to a single image task"""
img_task = ImageTask.query.filter_by(id=task_id).first()
return image_task_schema.jsonify(img_task)
@bp.route("/img_task/random/<int:batch_id>")
@api_login_required
def image_task_random(batch_id):
"""Return a random image task contained in a batch"""
img_tasks = []
labeled = request.args.get("labeled")
if labeled == "true":
img_tasks = ImageTask.query.filter_by(batch_id=batch_id, is_labeled=True).all()
elif labeled == "false":
img_tasks = ImageTask.query.filter_by(batch_id=batch_id, is_labeled=False).all()
else:
img_tasks = ImageTask.query.filter_by(batch_id=batch_id).all()
if not img_tasks:
return jsonify(dict())
img_task = random.choice(img_tasks)
return image_task_schema.jsonify(img_task)
@bp.route("/serve_image/<int:img_id>/")
@api_login_required
def serve_image(img_id):
"""Serves an image from the instance folder
Args:
img_id (int): Is the same as task id, since every task is matched to one image.
"""
img_path = get_path_to_image(img_id)
current_app.logger.info(img_path)
path, file = os.path.split(img_path)
return send_from_directory(path, file)
@bp.route("/serve_labels/")
@api_login_required
def serve_labels():
"""Serves labels for all images from the instance folder"""
labels = []
img_batches = ImageBatch.query.options(db.joinedload('tasks')).all()
image_batch_data = image_batch_schema.dump(img_batches, many=True)
for batch in image_batch_data:
for task in batch['tasks']:
label_path = get_path_to_label(task['id'])
width, height, classes, boxes = read_labels_from_xml(label_path)
labels.append({'id': str(task['id']),
'classes': classes,
'boxes': boxes,
'width': width,
'height': height})
return jsonify(labels)
@bp.route('/save_labels/<int:img_id>/', methods=['POST'])
@api_login_required
def save_labels(img_id):
""""Saves labels entered by a labeler for an image from the instance folder
Args:
img_id (int): Is the same as task id, since every task is matched to one image.
"""
data = request.get_json()
label_path = get_path_to_label(img_id)
if not os.path.exists(os.path.dirname(label_path)):
os.mkdir(label_path)
save_labels_to_xml(data, label_path)
return jsonify(success=True)
@bp.route('/serve_predictions/')
@api_login_required
def serve_predictions():
"""Serves predictions for all images from the instance folder"""
predictions = []
img_batches = ImageBatch.query.options(db.joinedload('tasks')).all()
image_batch_data = image_batch_schema.dump(img_batches, many=True)
for batch in image_batch_data:
for task in batch['tasks']:
img_path = get_path_to_image(task['id'])
pred_path = get_path_to_prediction(task['id'])
if os.path.exists(pred_path):
with open(pred_path, 'r') as f:
predictions.append({'id': str(task['id']), 'predictions': json.load(f)})
else:
prediction = send_od_request(img_path)
prediction = list(prediction.values())[0]
if len(prediction) > 0:
feature_vectors = []
for i, _ in enumerate(prediction):
feature_vectors.append(compute_feature_vector(prediction[i]))
acceptance_prediction = send_accept_prob_request(feature_vectors)
for i, p in enumerate(acceptance_prediction):
prediction[i]['acceptance_prediction'] = p
prediction.sort(key=lambda p: p['acceptance_prediction'], reverse=True)
predictions.append({'id': str(task['id']), 'predictions': prediction})
if not os.path.exists(os.path.dirname(pred_path)):
os.mkdir(os.path.dirname(pred_path))
with open(pred_path, 'w') as f:
json.dump(prediction, f)
return jsonify(predictions)
@bp.route('/update_predictions/<int:batch_id>/')
@api_login_required
def update_predictions(batch_id):
"""
Creates and returns predictions for all images contained in a batch whether predictions already
exist in the instance directory or not.
Args:
batch_id: id of the batch for which the predictions will be generated
"""
predictions = []
batch = ImageBatch.query.filter_by(id=batch_id).all()
batch_data = image_batch_schema.dump(batch, many=True)
for task in batch_data[0]['tasks']:
img_path = get_path_to_image(task['id'])
pred_path = get_path_to_prediction(task['id'])
prediction = send_od_request(img_path)
prediction = list(prediction.values())[0]
if len(prediction) > 0:
feature_vectors = []
for i, _ in enumerate(prediction):
feature_vectors.append(compute_feature_vector(prediction[i]))
acceptance_prediction = send_accept_prob_request(feature_vectors)
for i, p in enumerate(acceptance_prediction):
prediction[i]['acceptance_prediction'] = p
prediction.sort(key=lambda p: p['acceptance_prediction'], reverse=True)
predictions.append({'id': str(task['id']), 'predictions': prediction})
if not os.path.exists(os.path.dirname(pred_path)):
os.mkdir(os.path.dirname(pred_path))
with open(pred_path, 'w') as f:
json.dump(prediction, f)
return jsonify(predictions)
@bp.route('/save_predictions/<int:img_id>/', methods=['POST'])
@api_login_required
def save_predictions(img_id):
"""
Receives prediction data for an image and saves it in instance directory
Args:
img_id: id of the image for which the predictions will be saved
"""
predictions = request.get_json()
pred_path = get_path_to_prediction(img_id)
if not os.path.exists(os.path.dirname(pred_path)):
os.mkdir(os.path.dirname(pred_path))
result = []
for p in predictions:
result.append(p)
with open(pred_path, 'w') as f:
json.dump(result, f)
return jsonify(success=True)
# @bp.route('/update_label_performance_log/', methods=['POST'])
# @api_login_required
# def update_label_performance_log():
# """
# Receives logging data concerning the type and creation-times for newly added labels and
# appends them to the label_performance_log.
# """
# new_log_data = request.get_json()
# log_data = []
#
# if os.path.exists(path_to_label_performance_log):
# with open(path_to_label_performance_log, 'r') as f:
# log_data = json.load(f)
#
# log_data.extend(new_log_data)
#
# with open(path_to_label_performance_log, 'w') as f:
# json.dump(log_data, f)
#
# return jsonify(success=True)
#
# @bp.route('/update_model_performance_log/')
# @api_login_required
# def update_model_performance_log():
# """
# Normally called after retraining the object-detector, compute the mean average precision for the
# top 2, top 5 and top 10 detections of the detector respectively for a test set which is defined
# via the glbal variable path_to_test_data in settings.py
# """
# map_at_2 = compute_map(path_to_test_data, path_to_od_test_data_gt, 2)
# map_at_5 = compute_map(path_to_test_data, path_to_od_test_data_gt, 5)
# map_at_10 = compute_map(path_to_test_data, path_to_od_test_data_gt, 10)
#
# maps = [map_at_2, map_at_5, map_at_10]
#
# log_data = []
#
# if os.path.exists(path_to_map_log):
# with open(path_to_map_log, 'r') as f:
# log_data = json.load(f)
#
# log_data.append(maps)
#
# with open(path_to_map_log, 'w') as f:
# json.dump(log_data, f)
#
# return jsonify(success=True)
@bp.route("/serve_classes/")
@api_login_required
def serve_classes():
"""Serves classes for all images from the instance folder"""
class_reader = ClassReader(class_ids_od)
return jsonify(list(class_reader.class_ids.values()))
@bp.route('/train_models/')
@api_login_required
def train_models():
"""Checks instance folder for new training data and uses it to further train models"""
nr_of_labels = 0
feature_vectors = []
y_ = []
img_batches = ImageBatch.query.options(db.joinedload('tasks')).all()
image_batch_data = image_batch_schema.dump(img_batches, many=True)
class_reader_od = ClassReader(class_ids_od)
class_reader_acc_prob = ClassReader(known_class_ids_annotation_predictor)
writer = tf.python_io.TFRecordWriter(path_to_od_train_record)
for batch in image_batch_data:
for task in batch['tasks']:
label_path = get_path_to_label(task['id'])
if os.path.exists(label_path):
width, height, classes, boxes = read_labels_from_xml(label_path)
for i, cls in enumerate(classes):
nr_of_labels += 1
image_path = get_path_to_image(task['id'])
class_id_od = class_reader_od.get_index_of_class_from_label(cls)
if class_id_od == -1:
class_reader_od.add_class_to_file(cls)
parse_class_ids_json_to_pbtxt()
update_number_of_classes()
class_id_accept_prob = class_reader_acc_prob.get_index_of_class_from_label(
cls)
if class_id_accept_prob == -1:
class_reader_acc_prob.add_class_to_file(cls)
if classes is not None:
tf_example = create_tf_example(
[width, height, task['filename'], image_path, classes, boxes])
writer.write(tf_example.SerializeToString())
pred_path = get_path_to_prediction(task['id'])
if os.path.exists(pred_path):
with open(pred_path, 'r') as f:
predictions = json.load(f)
for i, p in enumerate(predictions):
if 'was_successful' in p:
label = p['LabelName']
predictions[i]['LabelName'] = label
if p['was_successful'] and p['acceptance_prediction'] is 0:
feature_vectors.append(compute_feature_vector(predictions[i]))
y_.append(1.0)
elif not p['was_successful'] and p['acceptance_prediction'] is 1:
feature_vectors.append(compute_feature_vector(predictions[i]))
y_.append(0.0)
with open(pred_path, 'w') as f:
json.dump(predictions, f)
writer.close()
if len(feature_vectors) > 0:
accept_prob_predictor.main(mode='train', user_feedback={'x': feature_vectors, 'y_': y_})
writer.close()
if nr_of_labels > 0:
train_od_model.train()
return jsonify(success=True)
| StarcoderdataPython |
78274 | # TODO: Faire un test QUI MARCHE sur une des annales du hashcode
# TODO: Coder une solution algo genetique.
# TODO: Voir si splitter Problem en une seconde classe (Solver?) (qui gère parsing + output) est pas plus pratique. C'est surement plus lisible.
import glob
import os
import collections
import ntpath
from typing import Union
Number = Union[int, float]
PATH_DIR_INPUTS = os.path.join("..", "inputs")
PATH_DIR_OUTPUTS = os.path.join("..", "outputs")
Input = collections.namedtuple("Input", [])
Solution = collections.namedtuple("Problem", [])
class Problem:
def parse_input(self, path_file_input: str) -> Input:
raise NotImplementedError()
def solve(self, inp: Input) -> Solution:
raise NotImplementedError()
def score(self, inp: Input, solution: Solution) -> Number:
raise NotImplementedError()
def write_output(self, solution: Solution, func_convert: callable, score: Number, id_problem: str,
path_dir_outputs: str = PATH_DIR_OUTPUTS):
# Create outputs directory if it does not exists
os.makedirs(path_dir_outputs, exist_ok=True)
path_file_output = os.path.join(path_dir_outputs, id_problem + '_' + str(score)) + ".out"
string = func_convert(solution)
with open(path_file_output, 'w') as fp:
fp.write(string)
def iter_path_files_input(extension: str = ".in", path_dir_inputs: str = PATH_DIR_INPUTS):
""" Iterate through all files located at `path_dir_inputs`.
:param extension: Suffix matching desired files. Empty string to match everything.
"""
for file_input in glob.glob(os.path.join(path_dir_inputs, "*" + extension)):
yield file_input
def get_id_problem(path_file_input: str) -> str:
""" Return the ID of a problem given its filename. """
return ntpath.basename(path_file_input)[0]
def main(problem_class: Problem, func_convert: callable, path_dir_inputs: str, path_dir_outputs: str,
inputs_to_skip=[]):
problem = problem_class()
for path_file_input in iter_path_files_input(path_dir_inputs=path_dir_inputs):
id_problem = get_id_problem(path_file_input)
print("Classe :", id_problem)
if id_problem in inputs_to_skip:
continue
inp = problem.parse_input(path_file_input)
solution = problem.solve(inp)
score = problem.score(inp=inp, solution=solution)
problem.write_output(
solution=solution,
func_convert=func_convert,
score=score,
id_problem=id_problem,
path_dir_outputs=path_dir_outputs
)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3368324 | <reponame>doersino/handwriting
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Designed to run on Uberspace. Get yourself an account and follow these guides:
# https://wiki.uberspace.de/database:postgresql
# https://gist.github.com/tessi/82981559017f79a06042d2229bfd72a8 (s/9.6/10.4/g)
import cgi
import json
import subprocess
import traceback
import sys
sys.stderr = sys.stdout
# via https://stackoverflow.com/a/14860540
def enc_print(string='', encoding='utf8'):
sys.stdout.buffer.write(string.encode(encoding) + b'\n')
enc_print("Content-type: text/html")
enc_print()
form = cgi.FieldStorage()
if not form or "pen" not in form:
enc_print("no data received :(")
else:
validated = json.loads(form["pen"].value)
pen = str(validated).replace("'", "\"")
try:
enc_print(subprocess.check_output(["bash", "backendhelper.sh", pen]).decode("utf-8").strip())
except subprocess.CalledProcessError as e:
enc_print('something went wrong, sorry about that :(')
raise
| StarcoderdataPython |
4818090 | <gh_stars>1-10
from django.db import models
class Snapshot(models.Model):
snapped_at = models.DateField(unique=True)
href = models.CharField(max_length=55)
completed = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Cryptocurrency(models.Model):
name = models.CharField(max_length=255)
symbol = models.CharField(max_length=20, unique=True)
slug = models.CharField(max_length=50)
added_at = models.DateTimeField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Quote(models.Model):
snapshot = models.ForeignKey(
Snapshot, on_delete=models.CASCADE, related_name='quotes')
cryptocurrency = models.ForeignKey(
Cryptocurrency, on_delete=models.CASCADE, related_name='quotes')
rank = models.IntegerField()
max_supply = models.IntegerField(null=True)
circulating_supply = models.IntegerField()
total_supply = models.IntegerField()
price = models.FloatField()
volume_24h = models.FloatField()
change_7d = models.FloatField()
market_cap = models.FloatField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
unique_together = ('snapshot', 'cryptocurrency', 'rank')
| StarcoderdataPython |
4800302 | <filename>tests/test_fields.py
import re
from datetime import datetime
from decimal import Decimal
import pytest
from bson import ObjectId, Decimal128
from aiomongodel import Document, EmbeddedDocument
from aiomongodel.errors import ValidationError
from aiomongodel.fields import (
AnyField, StrField, IntField, FloatField, BoolField, DateTimeField,
ObjectIdField, EmbDocField, ListField, RefField, EmailField,
DecimalField, SynonymField)
from aiomongodel.utils import _Empty
class EmbDoc(EmbeddedDocument):
int_field = IntField(required=True)
class WrongEmbDoc(EmbeddedDocument):
wrong = StrField(required=True)
class RefDoc(Document):
str_field = StrField(required=False)
class WrongRefDoc(Document):
wrong = IntField(required=False)
dt = datetime.strptime('1985-09-14 12:00:00', '%Y-%m-%d %H:%M:%S')
ref_doc = RefDoc(_id=ObjectId('58ce6d537e592254b67a503d'), str_field='xxx')
emb_doc = EmbDoc(int_field=1)
wrong_ref_doc = RefDoc(_id=ObjectId('58ce6d537e592254b67a503d'), wrong=1)
wrong_emb_doc = EmbDoc(wrong='xxx')
FIELD_DEFAULT = [
(AnyField, 'xxx'),
(StrField, 'xxx'),
(IntField, 13),
(FloatField, 1.3),
(BoolField, True),
(DateTimeField, dt),
(ObjectIdField, ObjectId('58ce6d537e592254b67a503d')),
(EmailField, '<EMAIL>'),
(DecimalField, Decimal('0.005')),
]
@pytest.mark.parametrize('field, expected', [
(StrField(required=False), None),
(IntField(required=False), None),
(FloatField(required=False), None),
(BoolField(required=False), None),
(DateTimeField(required=False), None),
(ObjectIdField(required=False), None),
(EmbDocField(EmbDoc, required=False), None),
(ListField(EmbDocField(EmbDoc), required=False), None),
(RefField(RefDoc, required=False), None),
(EmailField(required=False), None),
])
def test_field_not_exist_get_value(field, expected):
class Doc(Document):
value = field
assert Doc().value is expected
@pytest.mark.parametrize('field, default', FIELD_DEFAULT)
def test_field_attributes(field, default):
class Doc(Document):
value = field(required=False)
assert isinstance(Doc.value, field)
assert Doc.value.name == 'value'
assert Doc.value.mongo_name == 'value'
assert Doc.value.s == 'value'
assert Doc.value.required is False
assert Doc.value.default is _Empty
assert Doc.value.choices is None
assert Doc.value.allow_none is False
class DocWithMongo(Document):
value = field(required=True, default=default, mongo_name='val',
choices=[default], allow_none=True)
assert isinstance(DocWithMongo.value, field)
assert DocWithMongo.value.name == 'value'
assert DocWithMongo.value.mongo_name == 'val'
assert DocWithMongo.value.s == 'val'
assert DocWithMongo.value.required is True
assert DocWithMongo.value.default == default
assert DocWithMongo.value.choices == {default}
assert DocWithMongo.value.allow_none is True
@pytest.mark.parametrize('field, default', FIELD_DEFAULT)
def test_field_default(field, default):
class Doc(Document):
value = field()
assert Doc.value.default is _Empty
class DocWithDefault(Document):
value = field(required=True, default=default)
assert DocWithDefault.value.default == default
class DocWithCallableDefault(Document):
value = field(required=True, default=lambda: default)
assert DocWithCallableDefault.value.default == default
def test_compound_field_name():
class EmbDoc(EmbeddedDocument):
int_field = IntField(mongo_name='intf')
class ComplexEmbDoc(EmbeddedDocument):
emb_field = EmbDocField(EmbDoc, mongo_name='emb')
class ComplexListDoc(EmbeddedDocument):
lst_field = ListField(EmbDocField(ComplexEmbDoc))
class Doc(Document):
int_field = IntField()
emb_field = EmbDocField(EmbDoc, mongo_name='emb')
complex_emb_field = EmbDocField(ComplexEmbDoc, mongo_name='cmplx_emb')
lst_field = ListField(EmbDocField(EmbDoc), mongo_name='lst')
lst_int_field = ListField(IntField(), mongo_name='lst_int')
complex_lst_emb_field = EmbDocField(ComplexListDoc, mongo_name='clef')
assert EmbDoc.int_field.s == 'intf'
assert Doc.int_field.s == 'int_field'
assert Doc.emb_field.s == 'emb'
assert Doc.complex_emb_field.s == 'cmplx_emb'
assert Doc.lst_field.s == 'lst'
assert Doc.lst_int_field.s == 'lst_int'
assert Doc.emb_field.int_field.s == 'emb.intf'
assert Doc.complex_emb_field.emb_field.s == 'cmplx_emb.emb'
assert Doc.lst_field.int_field.s == 'lst.intf'
assert Doc.complex_emb_field.emb_field.int_field.s == 'cmplx_emb.emb.intf'
mn = 'clef.lst_field.emb.intf'
assert (
Doc.complex_lst_emb_field.lst_field.emb_field.int_field.s == mn)
with pytest.raises(AttributeError):
Doc.int_field.wrong_field.s
with pytest.raises(AttributeError):
Doc.emb_field.int_field.wrong_field.s
with pytest.raises(AttributeError):
Doc.lst_int_field.wrong_field.s
with pytest.raises(AttributeError):
Doc.complex_emb_field.emb_field.wrong.s
with pytest.raises(AttributeError):
Doc.complex_lst_emb_field.lst_field.wrong.s
def test_compound_field_document_class():
class Doc(Document):
emb = EmbDocField('test_fields.EmbDoc')
ref = RefField('test_fields.RefDoc')
lst_emb = ListField(EmbDocField('test_fields.EmbDoc'))
lst_ref = ListField(RefField('test_fields.RefDoc'))
lst_int = ListField(IntField())
wrong_emb = EmbDocField('xxx')
wrong_ref = RefField('xxx')
wrong_lst_emb = ListField(EmbDocField('xxx'))
wrong_emb_doc = EmbDocField('test_fields.RefDoc')
wrong_ref_doc = RefField('test_fields.EmbDoc')
assert Doc.emb.document_class is EmbDoc
assert Doc.ref.document_class is RefDoc
assert Doc.lst_emb.document_class is EmbDoc
assert Doc.lst_ref.document_class is None
assert Doc.lst_int.document_class is None
with pytest.raises(ImportError):
Doc.wrong_emb.document_class
with pytest.raises(ImportError):
Doc.wrong_lst_emb.document_class
with pytest.raises(ImportError):
Doc.wrong_ref.document_class
with pytest.raises(TypeError):
class WrongEmbDoc(Document):
wrong_emb = EmbDocField(RefDoc)
with pytest.raises(TypeError):
class WrongRefDoc(Document):
wrong_ref = RefField(EmbDoc)
with pytest.raises(TypeError):
Doc.wrong_ref_doc.document_class
with pytest.raises(TypeError):
Doc.wrong_emb_doc.document_class
@pytest.mark.parametrize('field, value, expected', [
(AnyField(), '1', '1'),
(AnyField(), 1, 1),
(AnyField(), True, True),
(AnyField(), None, None),
(StrField(), 'xxx', 'xxx'),
(StrField(), None, None),
(IntField(), 1, 1),
(IntField(), None, None),
(FloatField(), 13.0, pytest.approx(13.0)),
(FloatField(), None, None),
(BoolField(), True, True),
(BoolField(), False, False),
(BoolField(), None, None),
(DateTimeField(), dt, dt),
(DateTimeField(), None, None),
(ObjectIdField(),
ObjectId('58ce6d537e592254b67a503d'),
ObjectId('58ce6d537e592254b67a503d')),
(ObjectIdField(), None, None),
(EmbDocField(EmbDoc), emb_doc, {'int_field': 1}),
(EmbDocField(EmbDoc), None, None),
(ListField(IntField()), [], []),
(ListField(IntField()), [1, 2, 3], [1, 2, 3]),
(ListField(IntField()), None, None),
(ListField(EmbDocField(EmbDoc)), [emb_doc], [{'int_field': 1}]),
(ListField(EmbDocField(EmbDoc)), None, None),
(RefField(RefDoc),
ObjectId('58ce6d537e592254b67a503d'),
ObjectId('58ce6d537e592254b67a503d')),
(RefField(RefDoc), ref_doc, ref_doc._id),
(RefField(RefDoc), None, None),
(EmailField(), '<EMAIL>', '<EMAIL>'),
(EmailField(), None, None),
(DecimalField(), Decimal('0.005'), Decimal128(Decimal('0.005'))),
(DecimalField(), None, None),
])
def test_field_to_mongo(field, value, expected):
class Doc(Document):
value = field
assert Doc.value.to_mongo(value) == expected
@pytest.mark.parametrize('field, value, expected', [
(AnyField(), '1', '1'),
(AnyField(), 1, 1),
(AnyField(), True, True),
(AnyField(), None, None),
(StrField(), 'xxx', 'xxx'),
(StrField(), None, None),
(IntField(), 1, 1),
(IntField(), None, None),
(FloatField(), 13.0, pytest.approx(13.0)),
(FloatField(), None, None),
(BoolField(), True, True),
(BoolField(), False, False),
(BoolField(), None, None),
(DateTimeField(), dt, dt),
(DateTimeField(), None, None),
(ObjectIdField(),
ObjectId('58ce6d537e592254b67a503d'),
ObjectId('58ce6d537e592254b67a503d')),
(ObjectIdField(), None, None),
(ListField(IntField()), [], []),
(ListField(IntField()), [1, 2, 3], [1, 2, 3]),
(ListField(IntField()), None, None),
(ListField(IntField()), [None], [None]),
(RefField(RefDoc),
ObjectId('58ce6d537e592254b67a503d'),
ObjectId('58ce6d537e592254b67a503d')),
(RefField(RefDoc), None, None),
(EmailField(), '<EMAIL>', '<EMAIL>'),
(EmailField(), None, None),
(DecimalField(), Decimal128(Decimal('0.005')), Decimal('0.005')),
(DecimalField(), float(0.005), Decimal('0.005')),
(DecimalField(), str(0.005), Decimal('0.005')),
(DecimalField(), None, None),
(EmbDocField(EmbDoc, allow_none=True), None, None)
])
def test_field_from_mongo(field, value, expected):
class Doc(Document):
value = field
assert Doc.value.from_mongo(value) == expected
FROM_DATA = [
(AnyField(), '1', '1'),
(AnyField(), 1, 1),
(AnyField(), True, True),
(StrField(), '', ''),
(StrField(), 'xxx', 'xxx'),
(StrField(choices=('xxx', 'yyy')), 'xxx', 'xxx'),
(StrField(), 1, '1'),
(StrField(), True, 'True'),
(StrField(allow_blank=False), '', ''),
(StrField(choices=('xxx', 'yyy')), 'zzz', 'zzz'),
(StrField(choices=('xxx', 'yyy')), 1, '1'),
(IntField(), 1, 1),
(IntField(), '1', 1),
(IntField(choices=[*range(10)]), 5, 5),
(IntField(choices=[*range(10)]), 'xxx', 'xxx'),
(IntField(choices=[*range(10)]), 100, 100),
(IntField(), 'xxx', 'xxx'),
(IntField(), 1.3, 1),
(IntField(gte=1, lte=13), 1, 1),
(IntField(gte=1, lte=13), 13, 13),
(IntField(gte=1, lte=13), 10, 10),
(IntField(gte=1, lte=13), 0, 0),
(IntField(gte=1, lte=13), 20, 20),
(IntField(gt=1, lt=13), 10, 10),
(IntField(gt=1, lt=13), 1, 1),
(IntField(gt=1, lt=13), 13, 13),
(IntField(gt=1, lt=13), 0, 0),
(IntField(gt=1, lt=13), 20, 20),
(FloatField(), 1, pytest.approx(1.0)),
(FloatField(), 1.0, pytest.approx(1.0)),
(FloatField(), '1.0', pytest.approx(1.0)),
(FloatField(), '1', pytest.approx(1.0)),
(FloatField(), 'x', 'x'),
(FloatField(gt=1.0, lt=13.0), 10.0, pytest.approx(10.0)),
(FloatField(gt=1.0, lt=13.0), 0.0, pytest.approx(0.0)),
(FloatField(gt=1.0, lt=13.0), 20.0, pytest.approx(20.0)),
(BoolField(), True, True),
(BoolField(), False, False),
(BoolField(), 13, True),
(DateTimeField(), dt, dt),
(DateTimeField(), True, True),
(ObjectIdField(),
ObjectId('58ce6d537e592254b67a503d'),
ObjectId('58ce6d537e592254b67a503d')),
(ObjectIdField(), '58ce6d537e592254b67a503d',
ObjectId('58ce6d537e592254b67a503d')),
(ListField(IntField()), [], []),
(ListField(IntField()), [1, 2, 3], [1, 2, 3]),
(ListField(IntField()), ['1', '2', '3'], [1, 2, 3]),
(ListField(IntField()), [0, 'xxx', 1], [0, 'xxx', 1]),
(ListField(IntField(), min_length=3, max_length=5), [0, 1], [0, 1]),
(ListField(IntField(), min_length=3, max_length=5), [0, 1, 2], [0, 1, 2]),
(ListField(IntField(), min_length=3, max_length=5),
[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]),
(ListField(RefField(RefDoc)), [ref_doc], [ref_doc]),
(ListField(RefField(RefDoc)), [1], [1]),
(ListField(EmbDocField(EmbDoc)), [emb_doc], [emb_doc]),
(ListField(EmbDocField(EmbDoc)), [1], [1]),
(RefField(RefDoc),
ObjectId('58ce6d537e592254b67a503d'),
ObjectId('58ce6d537e592254b67a503d')),
(RefField(RefDoc), ref_doc, ref_doc),
(RefField(RefDoc), wrong_ref_doc, wrong_ref_doc),
(RefField(RefDoc), 'xxx', 'xxx'),
(EmbDocField(EmbDoc), emb_doc, emb_doc),
(EmbDocField(EmbDoc), wrong_emb_doc, wrong_emb_doc),
(EmbDocField(EmbDoc), 1, 1),
(EmbDocField(EmbDoc), ref_doc, ref_doc),
(EmailField(), '<EMAIL>', '<EMAIL>'),
(EmailField(), 'example.com', 'example.com'),
(EmailField(), '@example.com', '@example.com'),
(EmailField(), '<EMAIL>', '<EMAIL>'),
(EmailField(), 1, '1'),
(DecimalField(), Decimal(1), Decimal(1)),
(DecimalField(), '0.005', Decimal('0.005')),
(DecimalField(gte=1, lte=13), '1.0', Decimal('1.0')),
(DecimalField(gte=1, lte=13), '13', Decimal('13')),
(DecimalField(gte=1, lte=13), '10.5', Decimal('10.5')),
(DecimalField(gte=Decimal(1), lte=13), 0, 0),
(DecimalField(gte=1, lte=13), Decimal('20.5'), Decimal('20.5')),
(DecimalField(gt=1, lt=13), 10, Decimal(10)),
(DecimalField(gt=1, lt=13), 1, 1),
(DecimalField(gt=1, lt=Decimal('13.0')), 13, 13),
(DecimalField(gt=1, lt=Decimal('13.0')), Decimal('0'), Decimal('0')),
(DecimalField(gt=1, lt=13), Decimal('20'), Decimal('20'))
]
@pytest.mark.parametrize('field, value, expected', FROM_DATA)
def test_field_from_data(field, value, expected):
class Doc(Document):
value = field
assert Doc.value.from_data(value) == expected
@pytest.mark.parametrize('field, value, expected', FROM_DATA)
def test_field_init(field, value, expected):
class Doc(Document):
value = field
assert Doc(value=value).value == expected
@pytest.mark.parametrize('field, value, expected', FROM_DATA)
def test_field_assign(field, value, expected):
class Doc(Document):
value = field
d = Doc(_empty=True)
d.value = value
assert d.value == expected
def test_emb_doc_field():
class Doc(Document):
emb_field = EmbDocField(EmbDoc)
assert isinstance(Doc(emb_field={'int_field': 1}).emb_field, EmbDoc)
d = Doc(_empty=True)
d.emb_field = {'int_field': 1}
assert isinstance(d.emb_field, EmbDoc)
assert isinstance(Doc.emb_field.from_data({'int_field': 1}), EmbDoc)
d = Doc.from_mongo({'emb_field': {'int_field': 1}})
assert isinstance(d.emb_field, EmbDoc)
assert d.emb_field.int_field == 1
def test_list_field():
with pytest.raises(TypeError):
class Doc(Document):
lst_field = ListField(int)
def test_filed_choices():
class Doc(Document):
set_choices = StrField(choices={'xxx', 'yyy'})
dict_choices = StrField(choices={'xxx': 'AAA', 'yyy': 'BBB'})
d = Doc(set_choices='xxx', dict_choices='yyy')
d.validate()
d = Doc(set_choices='AAA', dict_choices='BBB')
with pytest.raises(ValidationError) as excinfo:
d.validate()
assert excinfo.value.as_dict() == {
'set_choices': 'value does not match any variant',
'dict_choices': 'value does not match any variant',
}
@pytest.mark.parametrize('field, value, expected', [
# AnyField
(AnyField(), '1', None),
(AnyField(), 1, None),
(AnyField(), True, None),
(AnyField(allow_none=True), None, None),
(AnyField(allow_none=False), None,
ValidationError('none value is not allowed')),
(AnyField(choices={'xxx', 'yyy'}), 'xxx', None),
(AnyField(choices={'xxx', 'yyy'}), 1,
ValidationError('value does not match any variant')),
# StrField
(StrField(), 'xxx', None),
(StrField(allow_none=True), None, None),
(StrField(allow_blank=True), '', None),
(StrField(choices=('xxx', 'yyy')), 'xxx', None),
(StrField(choices=('xxx', 'yyy'), max_length=2), 'xxx', None),
(StrField(choices=('xxx', 'yyy'), regex=r'zzz'), 'xxx', None),
(StrField(regex=r'[abc]+'), 'aa', None),
(StrField(regex=re.compile(r'[abc]+')), 'aa', None),
(StrField(min_length=2, max_length=3), 'aa', None),
(StrField(allow_none=False), None,
ValidationError('none value is not allowed')),
(StrField(), 1, ValidationError("invalid value type")),
(StrField(allow_none=True), True, ValidationError("invalid value type")),
(StrField(allow_blank=False), '',
ValidationError("blank value is not allowed")),
(StrField(choices=('xxx', 'yyy')), 'zzz',
ValidationError("value does not match any variant")),
(StrField(choices=('xxx', 'yyy')), 1,
ValidationError("invalid value type")),
(StrField(regex=r'[abc]+'), 'd',
ValidationError('value does not match pattern [abc]+')),
(StrField(regex=re.compile(r'[abc]+')), 'd',
ValidationError('value does not match pattern [abc]+')),
(StrField(min_length=2, max_length=3), 'a',
ValidationError('length is less than 2')),
(StrField(min_length=2, max_length=3), 'aaaa',
ValidationError('length is greater than 3')),
# IntField
(IntField(), 1, None),
(IntField(allow_none=True), None, None),
(IntField(choices=[*range(10)]), 5, None),
(IntField(choices=[*range(10)]), 'xxx',
ValidationError("invalid value type")),
(IntField(choices=[*range(10)]), 100,
ValidationError("value does not match any variant")),
(IntField(), 'xxx', ValidationError("invalid value type")),
(IntField(gte=1, lte=13), 1, None),
(IntField(gte=1, lte=13), 13, None),
(IntField(gte=1, lte=13), 10, None),
(IntField(gte=1, lte=13), 0, ValidationError('value is less than 1')),
(IntField(gte=1, lte=13), 20,
ValidationError('value is greater than 13')),
(IntField(gt=1, lt=13), 10, None),
(IntField(gt=1, lt=13), 1,
ValidationError('value should be greater than 1')),
(IntField(gt=1, lt=13), 13,
ValidationError('value should be less than 13')),
(IntField(gt=1, lt=13), 0,
ValidationError('value should be greater than 1')),
(IntField(gt=1, lt=13), 20,
ValidationError('value should be less than 13')),
# FloatField
(FloatField(), 1.0, None),
(FloatField(allow_none=True), None, None),
(FloatField(allow_none=False), None,
ValidationError('none value is not allowed')),
(FloatField(), 'x', ValidationError("invalid value type")),
(FloatField(), '1.0', ValidationError("invalid value type")),
(FloatField(gt=1.0, lt=13.0), 10.0, None),
(FloatField(gt=1.0, lt=13.0), 0.0,
ValidationError("value should be greater than 1.0")),
(FloatField(gt=1.0, lt=13.0), 20.0,
ValidationError("value should be less than 13.0")),
# BoolField
(BoolField(), True, None),
(BoolField(), False, None),
(BoolField(allow_none=True), None, None),
(BoolField(allow_none=False), None,
ValidationError('none value is not allowed')),
(BoolField(), 13, ValidationError('invalid value type')),
# DateTimeField
(DateTimeField(), dt, None),
(DateTimeField(allow_none=True), None, None),
(DateTimeField(allow_none=False), None,
ValidationError('none value is not allowed')),
(DateTimeField(), True, ValidationError('invalid value type')),
# ObjectIdField
(ObjectIdField(), ObjectId('58ce6d537e592254b67a503d'), None),
(ObjectIdField(allow_none=True), None, None),
(ObjectIdField(allow_none=False), None,
ValidationError('none value is not allowed')),
(ObjectIdField(), '58ce6d537e592254b67a503d',
ValidationError('invalid value type')),
# ListField
(ListField(IntField()), [], None),
(ListField(IntField()), [1, 2, 3], None),
(ListField(IntField(), allow_none=True), None, None),
(ListField(IntField(), allow_none=False), None,
ValidationError('none value is not allowed')),
(ListField(IntField()), [0, 'xxx', 1],
ValidationError({1: ValidationError('invalid value type')})),
(ListField(IntField(), min_length=3, max_length=5),
[0, 1], ValidationError('list length is less than 3')),
(ListField(IntField(), min_length=3, max_length=5), [0, 1, 2], None),
(ListField(IntField(), min_length=3, max_length=5),
[0, 1, 2, 3, 4, 5], ValidationError('list length is greater than 5')),
# (ListField(RefField(RefDoc)), [ref_doc], None),
(ListField(RefField(RefDoc)), [1],
ValidationError({0: ValidationError('invalid value type')})),
(ListField(EmbDocField(EmbDoc)), [emb_doc], None),
(ListField(EmbDocField(EmbDoc)), [1],
ValidationError({0: ValidationError('invalid value type')})),
# RefField
(RefField(RefDoc), ObjectId('58ce6d537e592254b67a503d'), None),
(RefField(RefDoc), ref_doc, None),
(RefField(RefDoc, allow_none=True), None, None),
(RefField(RefDoc, allow_none=False), None,
ValidationError('none value is not allowed')),
(RefField(RefDoc), 'xxx', ValidationError('invalid value type')),
(RefField(RefDoc), WrongRefDoc(),
ValidationError('invalid value type')),
# EmbDocField
(EmbDocField(EmbDoc), emb_doc, None),
(EmbDocField(EmbDoc, allow_none=True), None, None),
(EmbDocField(EmbDoc, allow_none=False), None,
ValidationError('none value is not allowed')),
(EmbDocField(EmbDoc), WrongEmbDoc(wrong='xxx'),
ValidationError("invalid value type")),
(EmbDocField(EmbDoc), 1,
ValidationError("invalid value type")),
(EmbDocField(EmbDoc), {'str_field': 1},
ValidationError("invalid value type")),
(EmbDocField(EmbDoc), EmbDoc(int_field='xxx'),
ValidationError({'int_field': ValidationError('invalid value type')})),
(EmbDocField(EmbDoc), RefDoc(),
ValidationError("invalid value type")),
# EmailField
(EmailField(), '<EMAIL>', None),
(EmailField(allow_none=True), None, None),
(EmailField(allow_none=False), None,
ValidationError('none value is not allowed')),
(EmailField(), 'example.com',
ValidationError("value is not a valid email address")),
(EmailField(), '@example.com',
ValidationError("value is not a valid email address")),
(EmailField(), '<EMAIL>',
ValidationError("value is not a valid email address")),
(EmailField(), 1,
ValidationError("invalid value type")),
(EmailField(max_length=10), '<EMAIL>',
ValidationError("length is greater than 10")),
# DecimalField
(DecimalField(), Decimal(1), None),
(DecimalField(allow_none=True), None, None),
(DecimalField(allow_none=False), None,
ValidationError('none value is not allowed')),
(DecimalField(gte=1, lte=13), Decimal('1.0'), None),
(DecimalField(gte=1, lte=13), Decimal('13'), None),
(DecimalField(gte=1, lte=13), Decimal('10.5'), None),
(DecimalField(gte=Decimal(1), lte=13), Decimal(0),
ValidationError('value is less than 1')),
(DecimalField(gte=1, lte=13), Decimal('20.5'),
ValidationError('value is greater than 13')),
(DecimalField(gt=1, lt=13), Decimal(10), None),
(DecimalField(gt=1, lt=13), Decimal(1),
ValidationError('value should be greater than 1')),
(DecimalField(gt=1, lt=Decimal('13.0')), Decimal(13),
ValidationError('value should be less than 13.0')),
(DecimalField(gt=1, lt=Decimal('13.0')), Decimal('0'),
ValidationError('value should be greater than 1')),
(DecimalField(gt=1, lt=13), Decimal('20'),
ValidationError('value should be less than 13')),
])
def test_fields_validation(field, value, expected):
if expected is not None:
with pytest.raises(ValidationError) as excinfo:
field.validate(value)
assert excinfo.value.as_dict() == expected.as_dict()
else:
# should be no errors
field.validate(value)
class DocWithSynonym(Document):
_id = StrField(required=True, allow_blank=False)
name = SynonymField(_id)
class DocWithSynonymStr(Document):
_id = StrField(required=True, allow_blank=False)
name = SynonymField('_id')
@pytest.mark.parametrize('Doc', [DocWithSynonym, DocWithSynonymStr])
def test_synonym_field(Doc):
assert Doc.name is Doc._id
assert Doc.name.name == '_id'
assert Doc.name.s == '_id'
assert Doc.meta.fields == {'_id': Doc._id}
d = Doc(_id='totti')
assert d.name == 'totti'
d.name = 'francesco'
assert d._id == 'francesco'
| StarcoderdataPython |
199839 | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Documentation TBD"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
from .common import container_type
class baseimage(object):
"""Documentation TBD"""
def __init__(self, **kwargs):
"""Documentation TBD"""
#super(baseimage, self).__init__()
self.__as = kwargs.get('AS', '') # Docker specific
self.__as = kwargs.get('_as', self.__as) # Docker specific
self.image = kwargs.get('image', 'nvcr.io/nvidia/cuda:9.0-cudnn7-devel-ubuntu16.04')
def toString(self, ctype):
"""Documentation TBD"""
if ctype == container_type.DOCKER:
image = 'FROM {}'.format(self.image)
if self.__as:
image = image + ' AS {}'.format(self.__as)
return image
elif ctype == container_type.SINGULARITY:
return 'BootStrap: docker\nFrom: {}'.format(self.image)
else:
logging.error('Unknown container type')
return ''
| StarcoderdataPython |
1738288 | import base64
import math
class DotDict(dict):
"""
a dictionary that supports dot notation access
as well as dictionary access notation
"""
def __init__(self, dictionary):
for key, val in dictionary.items():
self[key] = val
def __setitem__(self, key, val):
if isinstance(val, dict):
val = self.__class__(val)
return super().__setitem__(key, val)
__setattr__ = __setitem__
__getattr__ = dict.__getitem__
class Hasher:
@classmethod
def index_qid_to_sid(cls, index, qid):
# Remove '===...' padding - it's ugly and we can reconstruct it later
encoded_index = base64.b32encode(index.encode()).decode().rstrip("=")
sid = encoded_index + "_" + qid
return sid
@classmethod
def sid_to_index_qid(cls, sid):
index, _, qid = sid.partition("_")
index = base64.b32decode(cls.pad(index).encode()).decode()
return index, qid
@classmethod
def pad(cls, v, length=8): # TODO: Understand why it's 8
return v.ljust(math.ceil(float(len(v))/length)*length, "=")
| StarcoderdataPython |
173034 | """
:Copyright: 2006-2021 <NAME>
:License: Revised BSD (see `LICENSE` file for details)
"""
from byceps.services.authentication.session.models.current_user import (
CurrentUser,
)
from byceps.services.authentication.session import service as session_service
from byceps.services.shop.cart.models import Cart
from byceps.services.shop.order import service as order_service
from tests.integration.services.shop.helpers import create_orderer
def get_current_user_for_user(user) -> CurrentUser:
return session_service.get_authenticated_current_user(
user, locale=None, permissions=frozenset()
)
def place_order_with_items(
storefront_id, user, created_at=None, items_with_quantity=None
):
orderer = create_orderer(user)
cart = Cart()
if items_with_quantity is not None:
for article, quantity in items_with_quantity:
cart.add_item(article, quantity)
order, _ = order_service.place_order(
storefront_id, orderer, cart, created_at=created_at
)
return order
| StarcoderdataPython |
3388440 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 13 11:31:53 2020
@author: tobia
"""
import networkx as nx
#import network
filepath = ''
print("importing network from" + filepath)
comment_network = nx.read_gpickle(filepath)
#define function to be applied in different shapes
def surpress_problematic_nodes(problem = None, location = None, G = comment_network): #parameters require strings: what to look for in a node and where to look for it. example (surpress_problematic_nodes(problem = "$$", location = 'text'))
#detect problematic content
counter = 1
good_nodes = 0
bad_nodes = 0
problematic_nodes = []
#start debugging
print("Checking for problematic nodes")
for (node, attr) in G.nodes(data = True):
#check whether text attribute contains $ signs and surpress them from the network
if(problem in attr[location]):
print("problematic node found")
problematic_nodes.append(node)
bad_nodes = bad_nodes + 1
else:
print("node looks fine")
good_nodes = good_nodes + 1
counter = counter + 1
#report on nodes
print(str(good_nodes) + " nodes in the network")
print(str(bad_nodes) + " problematic nodes collected from the network")
print(str(len(G.nodes())) + " nodes in the network at collection end")
print(str(len(G.edges())) + " edges in the network at collection end")
print("collection of faulty nodes finished")
#remove problematic nodes
G.remove_nodes_from(problematic_nodes)
print(str(bad_nodes) + " faulty nodes erased from the network")
print(str(len(G.nodes())) + " nodes in the network after removal of problematic nodes")
print(str(len(G.edges())) + " edges in the network after removal of problematic nodes")
#execute funtions according to the problems that have risen
surpress_problematic_nodes(problem = "$$", location = 'text')
#save pickle to specified filepath
filepath = "" #create comment_network.gpickle
print("saving file to " + str(filepath))
nx.write_gpickle(comment_network, filepath)
print("exe finnished")
| StarcoderdataPython |
1791573 | <filename>common/camera_info.py
import json
import cv2
import numpy as np
OAK_L_CALIBRATION_JSON = open('../resources/14442C10218CCCD200.json')
OAK_L_CALIBRATION_DATA = json.load(OAK_L_CALIBRATION_JSON)
OAK_L_CAMERA_RGB = OAK_L_CALIBRATION_DATA['cameraData'][2][1]
OAK_L_CAMERA_LEFT = OAK_L_CALIBRATION_DATA['cameraData'][0][1]
OAK_L_CAMERA_RIGHT = OAK_L_CALIBRATION_DATA['cameraData'][1][1]
LR_Translation = np.array(list(OAK_L_CAMERA_LEFT['extrinsics']['translation'].values())) / 100 # Convert cm to m
LR_Rotation = np.array(list(OAK_L_CAMERA_LEFT['extrinsics']['rotationMatrix']))
L_DISTORTION = np.array(OAK_L_CAMERA_LEFT['distortionCoeff'])
L_INTRINSIC = np.array(OAK_L_CAMERA_LEFT['intrinsicMatrix'])
R_DISTORTION = np.array(OAK_L_CAMERA_RIGHT['distortionCoeff'])
R_INTRINSIC = np.array(OAK_L_CAMERA_RIGHT['intrinsicMatrix'])
R1, R2, L_Projection, R_Projection, Q, L_ROI, R_ROI = cv2.stereoRectify(L_INTRINSIC, L_DISTORTION, R_INTRINSIC, R_DISTORTION, (1280, 720), LR_Rotation, LR_Translation)
OAK_L_PARAMS = {
'l_intrinsic': L_INTRINSIC,
'r_intrinsic': R_INTRINSIC,
'l_distortion': L_DISTORTION,
'r_distortion': R_DISTORTION,
'l_projection': L_Projection,
'r_projection': R_Projection
}
# https://towardsdatascience.com/estimating-a-homography-matrix-522c70ec4b2c
CR_Translation = np.array(list(OAK_L_CAMERA_LEFT['extrinsics']['translation'].values())).T / 100 # Convert cm to m
CR_Rotation = np.array(list(OAK_L_CAMERA_RIGHT['extrinsics']['rotationMatrix'])).T
H_CR = np.matmul(R_INTRINSIC, np.concatenate((CR_Rotation[:, 0:2], CR_Translation.reshape(3, 1)), axis=1))
RL_Rotation = LR_Rotation.T
RL_Translation = LR_Translation * -1
H_LR = np.matmul(L_INTRINSIC, np.concatenate((RL_Rotation[:, 0:2], RL_Translation.reshape(3, 1)), axis=1)) | StarcoderdataPython |
1697697 | """
Creating signor database
:argument: DB_TYPE: name of the source database
:argument: DB_DESTINATION: saving location of the created database files
:argument: CSV_LIST: list of .csv files of each signalink pathway
:argument: FILENAME_TO_PATHWAY_MAP: dictionary from files name to SLK pathway
:argument: IS_DIRECT_MAP: dictionary of directness to MI ids
:argument: EFFECT_MAP: dictionary of effect to MI ids
:argument: MOLECULAR_MAP: dictionary of molecular background to MI ids
Important!
Since the signor db provides it's data in multiple files. This script can be called multiple times to generate
a single SQL .db files. On the first run the script creates a signore.db files. If the script is called again on
another signor .tsv files, it can extend the previously created signor.db files by adding 'signor.db' as a fourth argument.
"""
# -*- coding: utf-8 -*-
import csv, sys
from SLKlib.SQLiteDBApi.sqlite_db_api import PsimiSQL
#Defining constants
SQL_SEED = '../../../../../SLKlib/SQLiteDBApi/network-db-seed.sql'
DB_TYPE = 'Signor'
DB_DESTINATION = '../../../SLK_Core/output/signor'
CSV_LIST = ['files/SIGNOR_WNT.csv',
'files/SIGNOR_TOLLR.csv',
'files/SIGNOR_TGFb.csv',
'files/SIGNOR_SAPK_JNK.csv',
'files/SIGNOR_P38.csv',
'files/SIGNOR_NOTCH.csv',
'files/SIGNOR_NFKBNC.csv',
'files/SIGNOR_NFKBC.csv',
'files/SIGNOR_MTOR.csv',
'files/SIGNOR_MCAPO.csv',
'files/SIGNOR_INSR.csv',
'files/SIGNOR_IL1R.csv',
'files/SIGNOR_IAPO.csv',
'files/SIGNOR_AMPK.csv',
'files/SIGNOR_BMP.csv',
'files/SIGNOR_DR.csv',
'files/SIGNOR_EGF.csv',
'files/SIGNOR_HPP.csv',
'files/SIGNOR_Hedgehog.csv' ]
NUMBER_OF_FILES = len(CSV_LIST)
FILENAME_TO_PATHWAY_MAP = {
'SIGNOR_AMPK.csv': 'Receptor tyrosine kinase',
'SIGNOR_BMP.csv': 'TGF',
'SIGNOR_DR.csv': 'TNF pathway',
'SIGNOR_EGF.csv': 'Receptor tyrosine kinase',
'SIGNOR_HPP.csv': 'Hippo',
'SIGNOR_Hedgehog.csv': 'Hedgehog',
'SIGNOR_IAPO.csv': 'TNF pathway',
'SIGNOR_IL1R.csv': 'JAK/STAT',
'SIGNOR_INSR.csv': 'Receptor tyrosine kinase',
'SIGNOR_MCAPO.csv': 'TNF pathway',
'SIGNOR_MTOR.csv': 'Receptor tyrosine kinase',
'SIGNOR_NFKBC.csv': 'Innate immune pathways',
'SIGNOR_NFKBNC.csv': 'Innate immune pathways',
'SIGNOR_NOTCH.csv': 'Notch',
'SIGNOR_P38.csv': 'Receptor tyrosine kinase',
'SIGNOR_SAPK_JNK.csv': 'Receptor tyrosine kinase',
'SIGNOR_TGFb.csv': 'TGF',
'SIGNOR_TOLLR.csv': 'Toll-like receptor',
'SIGNOR_WNT.csv': 'WNT/Wingless'
}
IS_DIRECT_MAP = {
"YES": "MI:0407(direct interaction)",
"NO": "indirect",
"UNK": "unknown"
}
EFFECT_MAP = {
'Unknown': 'MI:0190(interaction type)',
'down-regulates': 'MI:2240(down-regulates)',
"down-regulates activity": 'MI:2241(down-regulates activity)',
"down-regulates quantity": 'MI:2242(down-regulates quantity)',
"down-regulates quantity by destabilization": 'MI:2244(down-regulates quantity by destabilization)',
"down-regulates quantity by repression": 'MI:2243(down-regulates quantity by repression)',
'unknown': 'MI:0190(interaction type)',
'up-regulates': 'MI:2235(up-regulates)',
"up-regulates activity": 'MI:2236(up-regulates activity)',
"up-regulates quantity by expression": 'MI:2238(up-regulates quantity by expression)',
"up-regulates quantity by stabilization": 'MI:2239(up-regulates quantity by stabilization)'
}
MOLECULAR_MAP = {
'binding' : 'MI:0462(bind)',
'transcriptional regulation' : 'MI:2247(transcriptional regulation)',
'phosphorylation' : 'MI:0217(phosphorylation reaction)',
'' : 'MI:0190(interaction type)',
'ubiquitination' : 'MI:0220(ubiquitination reaction)',
'relocalization' : 'MI:2256(relocalization reaction)',
'dephosphorylation' : 'MI:0203(dephosphorylation reaction)',
'cleavage' : 'MI:0194(cleavage reaction)',
'deubiquitination' : 'MI:0204(deubiquitination reaction)',
'guanine nucleotide exchange factor' : 'MI:2252(guanine nucleotide exchange factor)'
}
def main(logger):
# Initiating a PsimiSQL class
db_api = PsimiSQL(SQL_SEED)
# Making the script user friendly
file_counter = 1
print("Started parsing .csv files")
# Parsing data files
for csv_file_location in CSV_LIST:
csv_file_name = csv_file_location.split('/')[-1]
sys.stdout.write("Parsing '%s' (%d/%d)\r" % (csv_file_name, file_counter, NUMBER_OF_FILES))
csv_file = csv.reader(open(csv_file_location, encoding="ISO-8859-1"), delimiter = ';', quotechar = '"')
pathway = FILENAME_TO_PATHWAY_MAP[csv_file_name]
# Skipping the header
for cells in csv_file:
type_a = cells[1].lower()
type_b = cells[5].lower()
taxids = cells[12].split(';')[0]
if type_a == 'protein' and type_b == 'protein' and taxids == '9606':
# Dealing with the first node
node_a_name = f'Uniprot:{cells[2]}'
node_a_taxid = 'taxid:' + taxids
node_a_taxid = node_a_taxid
node_a_dict = {}
# If the node already exists in the db, than only it's pathway will be modified, otherwise it will be added to the db
if db_api.get_node(node_a_name,node_a_taxid):
node_a_dict = db_api.get_node(node_a_name,node_a_taxid)
if not pathway in node_a_dict['pathways']:
node_a_dict['pathways'] += '|'+pathway
db_api.update_node(node_a_dict)
else:
node_a_dict = {
'name' : node_a_name,
'alt_accession' : 'entrez gene/locuslink:'+cells[0],
'tax_id' : node_a_taxid,
'pathways' : pathway,
'aliases' : None,
'topology' : ""
}
db_api.insert_node(node_a_dict)
# Doing the same with node b
node_b_name = f'Uniprot:{cells[2]}'
node_b_taxid = 'taxid:' + taxids
node_b_taxid = node_b_taxid
node_b_dict = {}
# If the node already exists in the db, than only it's pathway will be modified, otherwise it will be added to the db
if db_api.get_node(node_b_name,node_b_taxid):
node_b_dict = db_api.get_node(node_b_name,node_b_taxid)
if not pathway in node_b_dict['pathways']:
node_b_dict['pathways'] += '|'+pathway
db_api.update_node(node_b_dict)
else:
node_b_dict = {
'name' : node_b_name,
'alt_accession' : 'entrez gene/locuslink:'+cells[4],
'tax_id' : node_b_taxid,
'pathways' : pathway,
'aliases' : None,
'topology' : ""
}
db_api.insert_node(node_b_dict)
# Getting publication id
publication_id = ['pubmed:'+cells[21]]
publication_id.append("pubmed:26467481")
effect = EFFECT_MAP[cells[8]]
molecular_background = MOLECULAR_MAP[cells[9]]
inttype_final = effect + '|' + molecular_background
is_direct = IS_DIRECT_MAP[cells[22]].strip()
if "MI:0407(direct interaction)" in is_direct:
is_direct = "true"
else:
is_direct = "false"
# Setting up the interaction type
interaction_types = "%s|is_directed:%s|is_direct:%s" \
% (inttype_final, "true", is_direct)
edge_dict = {
'interaction_detection_method': None,
'first_author': None,
'publication_ids': "|".join(publication_id),
'interaction_types': interaction_types,
'source_db': 'Signor',
'interaction_identifiers': None,
'confidence_scores': None,
'layer': "8"
}
db_api.insert_edge(node_a_dict,node_b_dict,edge_dict)
print("Parsing files finished!")
print("Finished parsing Signor. Saving db to %s.db" % (DB_TYPE))
db_api.save_db_to_file(DB_DESTINATION)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1755031 | <filename>demo/lib/faker_demo.py<gh_stars>1-10
from random import choice
import faker
f = faker.Faker('zh_CN')
# address
print('f.country(): {}'.format(f.country()))
print('f.country_code(): {}'.format(f.country_code()))
print('f.address(): {}'.format(f.address()))
print('f.city: {}'.format(f.city()))
print('f.city_name: {}'.format(f.city_name()))
print('f.city_suffix: {}'.format(f.city_suffix()))
print('f.street_name: {}'.format(f.street_name()))
print('f.postcode: {}'.format(f.postcode()))
# text
print('f.text: {}'.format(f.text(20)))
print('f.words: {}'.format(f.words(4)))
# person
print()
print('f.name: {}'.format(f.name()))
print('f.first_name: {}'.format(f.first_name()))
print('f.first_name_male: {}'.format(f.first_name_male()))
print('f.first_name_female: {}'.format(f.first_name_female()))
print('f.last_name: {}'.format(f.last_name()))
print('f.last_name_male: {}'.format(f.last_name_male()))
print('f.last_name_female: {}'.format(f.last_name_female()))
# phone
print()
print('f.phone_number: {}'.format(f.phone_number()))
# python
print()
print('f.pyfloat:{}'.format(f.pyfloat()))
print('f.pyint:{}'.format(f.pyint()))
print('f.pylist: {}'.format(f.pylist(3)))
print('f.py str: {}'.format(f.pystr()))
print('f.pydict: {}'.format(f.pydict(3)))
# job
print('f.job: {}'.format(f.job()))
# uuid
print('f.uuid4():{}'.format(f.uuid4()))
print('f.uuid4(cast_to=int):{}'.format(f.uuid4(cast_to=int)))
print('f.uuid4(cast_to=lambda x: x):{}'.format(f.uuid4(cast_to=lambda x: x)))
# internet
print('网络')
print('f.image_url(200, 300):{}'.format(f.image_url(200, 300)))
print('f.hostname():{}'.format(f.hostname()))
print('f.url():{}'.format(f.url()))
schemes_sets = [['usb'], ['ftp', 'file'], ['usb', 'telnet', 'http']]
print('f.url(schemes=): {}'.format(f.url(schemes=choice(schemes_sets))))
print('f.domain_name(): {}'.format(f.domain_name(10)))
print('f.tld(): {}'.format(f.tld()))
print('f.email(): {}'.format(f.email()))
print('f.domain_word(): {}'.format(f.domain_word()))
# geo
print('地理位置')
print('f.local_latlng(country_code=\'CN\'): {}'.format(f.local_latlng(country_code='CN')))
print('f.local_latlng(country_code=\'CN\', coords_only=True): {}'.format(
f.local_latlng(country_code='US', coords_only=True)))
print('factory.longitude(): {}'.format(f.longitude()))
print('factory.latitude(): {}'.format(f.latitude()))
print('factory.coordinate(): {}'.format(f.coordinate()))
print('factory.coordinate(center=23): {}'.format(f.coordinate(center=23)))
print('factory.location_on_land(): {}'.format(f.location_on_land()))
print('f.location_on_land(coords_only=True): {}'.format(f.location_on_land(coords_only=True)))
# file
print('文件')
print('f.file_path(): {}'.format(f.file_path()))
print('f.unix_device(\'sdas\'): {}'.format(f.unix_device('sdas')))
print('f.file_path(category=\'image\'): {}'.format(f.file_path(category='image')))
print('f.file_path(depth=3): {}'.format(f.file_path(depth=4)))
print('f.file_path(extension=\'pdf\')): {}'.format(f.file_path(extension='pdf')))
print('f.unix_device(): {}'.format(f.unix_device()))
print('f.unix_partition(): {}'.format(f.unix_partition()))
print('f.unix_partition(\'sff\'): {}'.format(f.unix_partition('sff')))
# datetime
print('日期')
print('f.date_of_birth(minimum_age=0): {}'.format(f.date_of_birth(minimum_age=0)))
print('f.date_of_birth(minimum_age=20, maximum_age=22): {}'.format(f.date_of_birth(minimum_age=20, maximum_age=22)))
# 公司
print('公司')
print('f.company(): {}'.format(f.company()))
print('f.company_prefix(): {}'.format(f.company_prefix()))
print('f.company_suffix(): {}'.format(f.company_suffix()))
# color
print('颜色')
print('f.color_name(): {}'.format(f.color_name()))
print('f.safe_color_name(): {}'.format(f.safe_color_name()))
print('f.rgb_css_color(): {}'.format(f.rgb_css_color()))
print('f.hex_color(): {}'.format(f.hex_color()))
print('f.safe_hex_color(): {}'.format(f.safe_hex_color()))
# bank
print('银行账户')
print('f.bban(): {}'.format(f.bban()))
print('f.iban(): {}'.format(f.iban()))
# automotive
print('汽车')
print('f.license_plate(): {}'.format(f.license_plate()))
| StarcoderdataPython |
1629895 | """
Stores the class for TimeSeriesDisplay.
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
import warnings
from re import search as re_search
from matplotlib import colors as mplcolors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from .plot import Display
# Import Local Libs
from . import common
from ..utils import datetime_utils as dt_utils
from ..utils.datetime_utils import reduce_time_ranges, determine_time_delta
from ..qc.qcfilter import parse_bit
from ..utils import data_utils
from ..utils.geo_utils import get_sunrise_sunset_noon
from copy import deepcopy
from scipy.interpolate import NearestNDInterpolator
class TimeSeriesDisplay(Display):
"""
This subclass contains routines that are specific to plotting
time series plots from data. It is inherited from Display and therefore
contains all of Display's attributes and methods.
Examples
--------
To create a TimeSeriesDisplay with 3 rows, simply do:
.. code-block:: python
ds = act.read_netcdf(the_file)
disp = act.plotting.TimeSeriesDisplay(
ds, subplot_shape=(3,), figsize=(15,5))
The TimeSeriesDisplay constructor takes in the same keyword arguments as
plt.subplots. For more information on the plt.subplots keyword arguments,
see the `matplotlib documentation
<https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html>`_.
If no subplot_shape is provided, then no figure or axis will be created
until add_subplots or plots is called.
"""
def __init__(self, obj, subplot_shape=(1,), ds_name=None, **kwargs):
super().__init__(obj, subplot_shape, ds_name, **kwargs)
def day_night_background(self, dsname=None, subplot_index=(0, )):
"""
Colorcodes the background according to sunrise/sunset.
Parameters
----------
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream then ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index to the subplot to place the day and night background in.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(("You must choose a datastream to derive the " +
"information needed for the day and night " +
"background when 2 or more datasets are in " +
"the display object."))
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get File Dates
try:
file_dates = self._obj[dsname].attrs['_file_dates']
except KeyError:
file_dates = []
if len(file_dates) == 0:
sdate = dt_utils.numpy_to_arm_date(
self._obj[dsname].time.values[0])
edate = dt_utils.numpy_to_arm_date(
self._obj[dsname].time.values[-1])
file_dates = [sdate, edate]
all_dates = dt_utils.dates_between(file_dates[0], file_dates[-1])
if self.axes is None:
raise RuntimeError("day_night_background requires the plot to "
"be displayed.")
ax = self.axes[subplot_index]
# Find variable names for latitude and longitude
variables = list(self._obj[dsname].data_vars)
lat_name = [var for var in ['lat', 'latitude'] if var in variables]
lon_name = [var for var in ['lon', 'longitude'] if var in variables]
if len(lat_name) == 0:
lat_name = None
else:
lat_name = lat_name[0]
if len(lon_name) == 0:
lon_name = None
else:
lon_name = lon_name[0]
# Variable name does not match, look for standard_name declaration
if lat_name is None or lon_name is None:
for var in variables:
try:
if self._obj[dsname][var].attrs['standard_name'] == 'latitude':
lat_name = var
except KeyError:
pass
try:
if self._obj[dsname][var].attrs['standard_name'] == 'longitude':
lon_name = var
except KeyError:
pass
if lat_name is not None and lon_name is not None:
break
if lat_name is None or lon_name is None:
return
try:
if self._obj[dsname][lat_name].data.size > 1:
# Look for non-NaN values to use for locaiton. If not found use first value.
lat = self._obj[dsname][lat_name].values
index = np.where(np.isfinite(lat))[0]
if index.size == 0:
index = [0]
lat = float(lat[index[0]])
# Look for non-NaN values to use for locaiton. If not found use first value.
lon = self._obj[dsname][lon_name].values
index = np.where(np.isfinite(lon))[0]
if index.size == 0:
index = [0]
lon = float(lon[index[0]])
else:
lat = float(self._obj[dsname][lat_name].values)
lon = float(self._obj[dsname][lon_name].values)
except AttributeError:
return
if not np.isfinite(lat):
warnings.warn(f"Latitude value in dataset of '{lat}' is not finite. ",
RuntimeWarning)
return
if not np.isfinite(lon):
warnings.warn(f"Longitude value in dataset of '{lon}' is not finite. ",
RuntimeWarning)
return
lat_range = [-90, 90]
if not (lat_range[0] <= lat <= lat_range[1]):
warnings.warn(f"Latitude value in dataset of '{lat}' not within acceptable "
f"range of {lat_range[0]} <= latitude <= {lat_range[1]}. ",
RuntimeWarning)
return
lon_range = [-180, 180]
if not (lon_range[0] <= lon <= lon_range[1]):
warnings.warn(f"Longitude value in dataset of '{lon}' not within acceptable "
f"range of {lon_range[0]} <= longitude <= {lon_range[1]}. ",
RuntimeWarning)
return
# initialize the plot to a gray background for total darkness
rect = ax.patch
rect.set_facecolor('0.85')
# Get date ranges to plot
plot_dates = []
for f in all_dates:
for ii in [-1, 0, 1]:
plot_dates.append(f + dt.timedelta(days=ii))
# Get sunrise, sunset and noon times
sunrise, sunset, noon = get_sunrise_sunset_noon(lat, lon, plot_dates)
# Plot daylight
for ii in range(0, len(sunrise)):
ax.axvspan(sunrise[ii], sunset[ii], facecolor='#FFFFCC', zorder=0)
# Plot noon line
for ii in noon:
ax.axvline(x=ii, linestyle='--', color='y', zorder=1)
def set_xrng(self, xrng, subplot_index=(0, )):
"""
Sets the x range of the plot.
Parameters
----------
xrng : 2 number array
The x limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError("set_xrng requires the plot to be displayed.")
if not hasattr(self, 'xrng') and len(self.axes.shape) == 2:
self.xrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2),
dtype='datetime64[D]')
elif not hasattr(self, 'xrng') and len(self.axes.shape) == 1:
self.xrng = np.zeros((self.axes.shape[0], 2),
dtype='datetime64[D]')
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
self.axes[subplot_index].set_xlim(xrng)
self.xrng[subplot_index, :] = np.array(xrng, dtype='datetime64[D]')
def set_yrng(self, yrng, subplot_index=(0, )):
"""
Sets the y range of the plot.
Parameters
----------
yrng : 2 number array
The y limits of the plot.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
"""
if self.axes is None:
raise RuntimeError("set_yrng requires the plot to be displayed.")
if not hasattr(self, 'yrng') and len(self.axes.shape) == 2:
self.yrng = np.zeros((self.axes.shape[0], self.axes.shape[1], 2))
elif not hasattr(self, 'yrng') and len(self.axes.shape) == 1:
self.yrng = np.zeros((self.axes.shape[0], 2))
if yrng[0] == yrng[1]:
yrng[1] = yrng[1] + 1
self.axes[subplot_index].set_ylim(yrng)
self.yrng[subplot_index, :] = yrng
def plot(self, field, dsname=None, subplot_index=(0, ),
cmap=None, set_title=None,
add_nan=False, day_night_background=False,
invert_y_axis=False, abs_limits=(None, None), time_rng=None,
y_rng=None, use_var_for_y=None, set_shading='auto',
assessment_overplot=False,
overplot_marker='.',
overplot_behind=False,
overplot_markersize=6,
assessment_overplot_category={'Incorrect': ['Bad', 'Incorrect'],
'Suspect': ['Indeterminate', 'Suspect']},
assessment_overplot_category_color={'Incorrect': 'red', 'Suspect': 'orange'},
force_line_plot=False, labels=False, cbar_label=None, secondary_y=False,
**kwargs):
"""
Makes a timeseries plot. If subplots have not been added yet, an axis
will be created assuming that there is only going to be one plot.
Parameters
----------
field : str
The name of the field to plot.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
cmap : matplotlib colormap
The colormap to use.
set_title : str
The title for the plot.
add_nan : bool
Set to True to fill in data gaps with NaNs.
day_night_background : bool
Set to True to fill in a color coded background.
according to the time of day.
abs_limits : tuple or list
Sets the bounds on plot limits even if data values exceed
those limits. Set to (ymin,ymax). Use None if only setting
minimum or maximum limit, i.e. (22., None).
time_rng : tuple or list
List or tuple with (min, max) values to set the x-axis range
limits.
y_rng : tuple or list
List or tuple with (min, max) values to set the y-axis range
use_var_for_y : str
Set this to the name of a data variable in the Dataset to use as
the y-axis variable instead of the default dimension. Useful for
instances where data has an index-based dimension instead of a
height-based dimension. If shapes of arrays do not match it will
automatically revert back to the original ydata.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
assessment_overplot : boolean
Option to overplot quality control colored symbols over plotted
data using flag_assessment categories.
overplot_marker : str
Marker to use for overplot symbol.
overplot_behind : bool
Place the overplot marker behind the data point.
overplot_markersize : float or int
Size of overplot marker. If overplot_behind or force_line_plot
are set the marker size will be double overplot_markersize so
the color is visible.
assessment_overplot_category : dict
Lookup to categorize assessments into groups. This allows using
multiple terms for the same quality control level of failure.
Also allows adding more to the defaults.
assessment_overplot_category_color : dict
Lookup to match overplot category color to assessment grouping.
force_line_plot : boolean
Option to plot 2D data as 1D line plots.
labels : boolean or list
Option to overwrite the legend labels. Must have same dimensions as
number of lines plotted.
cbar_label : str
Option to overwrite default colorbar label.
secondary_y : boolean
Option to plot on secondary y axis.
**kwargs : keyword arguments
The keyword arguments for :func:`plt.plot` (1D timeseries) or
:func:`plt.pcolormesh` (2D timeseries).
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(("You must choose a datastream when there are 2 "
"or more datasets in the TimeSeriesDisplay "
"object."))
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
data = self._obj[dsname][field]
dim = list(self._obj[dsname][field].dims)
xdata = self._obj[dsname][dim[0]]
if 'units' in data.attrs:
ytitle = ''.join(['(', data.attrs['units'], ')'])
else:
ytitle = field
if cbar_label is None:
cbar_default = ytitle
if len(dim) > 1:
if use_var_for_y is None:
ydata = self._obj[dsname][dim[1]]
else:
ydata = self._obj[dsname][use_var_for_y]
ydata_dim1 = self._obj[dsname][dim[1]]
if np.shape(ydata) != np.shape(ydata_dim1):
ydata = ydata_dim1
units = ytitle
if 'units' in ydata.attrs.keys():
units = ydata.attrs['units']
ytitle = ''.join(['(', units, ')'])
else:
units = ''
ytitle = dim[1]
# Create labels if 2d as 1d
if force_line_plot is True:
if labels is True:
labels = [' '.join([str(d), units]) for d in ydata.values]
ytitle = f"({data.attrs['units']})"
ydata = None
else:
ydata = None
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set up secondary y axis if requested
if secondary_y is False:
ax = self.axes[subplot_index]
else:
ax = self.axes[subplot_index].twinx()
if ydata is None:
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# If limiting data being plotted use masked arrays
# Need to do it this way because of autoscale() method
if abs_limits[0] is not None and abs_limits[1] is not None:
data = np.ma.masked_outside(
data, abs_limits[0], abs_limits[1])
elif abs_limits[0] is not None and abs_limits[1] is None:
data = np.ma.masked_less_equal(
data, abs_limits[0])
elif abs_limits[0] is None and abs_limits[1] is not None:
data = np.ma.masked_greater_equal(
data, abs_limits[1])
# Plot the data
lines = ax.plot(xdata, data, '.', **kwargs)
# Check if we need to call legend method after plotting. This is only
# called when no assessment overplot is called.
add_legend = False
if 'label' in kwargs.keys():
add_legend = True
# Overplot failing data if requested
if assessment_overplot:
# If we are doing forced line plot from 2D data need to manage
# legend lables. Will make arrays to hold labels of QC failing
# because not set when labels not set.
if not isinstance(labels, list) and add_legend is False:
labels = []
lines = []
# For forced line plot need to plot QC behind point instead of
# on top of point.
zorder = None
if force_line_plot or overplot_behind:
zorder = 0
overplot_markersize *= 2.
for assessment, categories in assessment_overplot_category.items():
flag_data = self._obj[dsname].qcfilter.get_masked_data(
field, rm_assessments=categories, return_inverse=True)
if np.invert(flag_data.mask).any() and np.isfinite(flag_data).any():
try:
flag_data.mask = np.logical_or(data.mask, flag_data.mask)
except AttributeError:
pass
qc_ax = ax.plot(
xdata, flag_data, marker=overplot_marker, linestyle='',
markersize=overplot_markersize,
color=assessment_overplot_category_color[assessment],
label=assessment, zorder=zorder)
# If labels keyword is set need to add labels for calling legend
if isinstance(labels, list):
# If plotting forced_line_plot need to subset the Line2D object
# so we don't have more than one added to legend.
if len(qc_ax) > 1:
lines.extend(qc_ax[:1])
else:
lines.extend(qc_ax)
labels.append(assessment)
add_legend = True
# Add legend if labels are available
if isinstance(labels, list):
ax.legend(lines, labels)
elif add_legend:
ax.legend()
else:
# Add in nans to ensure the data are not streaking
if add_nan is True:
xdata, data = data_utils.add_in_nan(xdata, data)
# Sets shading parameter to auto. Matplotlib will check deminsions.
# If X,Y and C are same deminsions shading is set to nearest.
# If X and Y deminsions are 1 greater than C shading is set to flat.
mesh = ax.pcolormesh(np.asarray(xdata), ydata, data.transpose(),
shading=set_shading, cmap=cmap, edgecolors='face',
**kwargs)
# Set Title
if set_title is None:
set_title = ' '.join([dsname, field, 'on',
dt_utils.numpy_to_arm_date(
self._obj[dsname].time.values[0])])
if secondary_y is False:
ax.set_title(set_title)
# Set YTitle
ax.set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
if not hasattr(self, 'time_rng'):
if time_rng is not None:
self.time_rng = list(time_rng)
else:
self.time_rng = [xdata.min().values, xdata.max().values]
self.set_xrng(self.time_rng, subplot_index)
# Set Y Limit
if y_rng is not None:
self.set_yrng(y_rng)
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if ydata is None:
if abs_limits[0] is not None or abs_limits[1] is not None:
our_data = data
else:
our_data = data.values
else:
our_data = ydata
finite = np.isfinite(our_data)
if finite.any():
our_data = our_data[finite]
if invert_y_axis is False:
yrng = [np.min(our_data), np.max(our_data)]
else:
yrng = [np.max(our_data), np.min(our_data)]
else:
yrng = [0, 1]
# Check if current range is outside of new range an only set
# values that work for all data plotted.
current_yrng = ax.get_ylim()
if yrng[0] > current_yrng[0]:
yrng[0] = current_yrng[0]
if yrng[1] < current_yrng[1]:
yrng[1] = current_yrng[1]
# Set y range the normal way if not secondary y
# If secondary, just use set_ylim
if secondary_y is False:
self.set_yrng(yrng, subplot_index)
else:
ax.set_ylim(yrng)
# Set X Format
if len(subplot_index) == 1:
days = (self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0])
else:
days = (self.xrng[subplot_index[0], subplot_index[1], 1] -
self.xrng[subplot_index[0], subplot_index[1], 0])
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
# Set X format - We want the same time axes for all subplots
if not hasattr(self, 'time_fmt'):
self.time_fmt = myFmt
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
ax.set_xlabel('Time [UTC]')
if ydata is not None:
if cbar_label is None:
self.add_colorbar(mesh, title=cbar_default, subplot_index=subplot_index)
else:
self.add_colorbar(mesh, title=''.join(['(', cbar_label, ')']),
subplot_index=subplot_index)
return ax
def plot_barbs_from_spd_dir(self, dir_field, spd_field, pres_field=None,
dsname=None, **kwargs):
"""
This procedure will make a wind barb plot timeseries.
If a pressure field is given and the wind fields are 1D, which, for
example, would occur if one wants to plot a timeseries of
rawinsonde data, then a time-height cross section of
winds will be made.
Note: This procedure calls plot_barbs_from_u_v and will take in the
same keyword arguments as that procedure.
Parameters
----------
dir_field : str
The name of the field specifying the wind direction in degrees.
0 degrees is defined to be north and increases clockwise like
what is used in standard meteorological notation.
spd_field : str
The name of the field specifying the wind speed in m/s.
pres_field : str
The name of the field specifying pressure or height. If using
height coordinates, then we recommend setting invert_y_axis
to False.
dsname : str
The name of the datastream to plot. Setting to None will make
ACT attempt to autodetect this.
kwargs : dict
Any additional keyword arguments will be passed into
:func:`act.plotting.TimeSeriesDisplay.plot_barbs_from_u_and_v`.
Returns
-------
the_ax : matplotlib axis handle
The handle to the axis where the plot was made on.
Examples
--------
..code-block :: python
sonde_ds = act.io.armfiles.read_netcdf(
act.tests.sample_files.EXAMPLE_TWP_SONDE_WILDCARD)
BarbDisplay = act.plotting.TimeSeriesDisplay(
{'sonde_darwin': sonde_ds}, figsize=(10,5))
BarbDisplay.plot_barbs_from_spd_dir('deg', 'wspd', 'pres',
num_barbs_x=20)
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(("You must choose a datastream when there are 2 "
"or more datasets in the TimeSeriesDisplay "
"object."))
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Make temporary field called tempu, tempv
spd = self._obj[dsname][spd_field]
dir = self._obj[dsname][dir_field]
tempu = -np.sin(np.deg2rad(dir)) * spd
tempv = -np.cos(np.deg2rad(dir)) * spd
self._obj[dsname]["temp_u"] = deepcopy(self._obj[dsname][spd_field])
self._obj[dsname]["temp_v"] = deepcopy(self._obj[dsname][spd_field])
self._obj[dsname]["temp_u"].values = tempu
self._obj[dsname]["temp_v"].values = tempv
the_ax = self.plot_barbs_from_u_v("temp_u", "temp_v", pres_field,
dsname, **kwargs)
del self._obj[dsname]["temp_u"], self._obj[dsname]["temp_v"]
return the_ax
def plot_barbs_from_u_v(self, u_field, v_field, pres_field=None,
dsname=None, subplot_index=(0, ),
set_title=None,
day_night_background=False,
invert_y_axis=True,
num_barbs_x=20, num_barbs_y=20,
use_var_for_y=None, **kwargs):
"""
This function will plot a wind barb timeseries from u and v wind
data. If pres_field is given, a time-height series will be plotted
from 1-D wind data.
Parameters
----------
u_field : str
The name of the field containing the U component of the wind.
v_field : str
The name of the field containing the V component of the wind.
pres_field : str or None
The name of the field containing the pressure or height. Set
to None to not use this.
dsname : str or None
The name of the datastream to plot. Setting to None will make
ACT automatically try to determine this.
subplot_index : 2-tuple
The index of the subplot to make the plot on.
set_title : str or None
The title of the plot.
day_night_background : bool
Set to True to plot a day/night background.
invert_y_axis : bool
Set to True to invert the y axis (i.e. for plotting pressure as
the height coordinate).
num_barbs_x : int
The number of wind barbs to plot in the x axis.
num_barbs_y : int
The number of wind barbs to plot in the y axis.
cmap : matplotlib.colors.LinearSegmentedColormap
A color map to use with wind barbs. If this is set the plt.barbs
routine will be passed the C parameter scaled as sqrt of sum of the
squares and used with the passed in color map. A colorbar will also
be added. Setting the limits of the colorbar can be done with 'clim'.
Setting this changes the wind barbs from black to colors.
use_var_for_y : str
Set this to the name of a data variable in the Dataset to use as the
y-axis variable instead of the default dimension. Useful for instances
where data has an index-based dimension instead of a height-based
dimension. If shapes of arrays do not match it will automatically
revert back to the original ydata.
**kwargs : keyword arguments
Additional keyword arguments will be passed into plt.barbs.
Returns
-------
ax : matplotlib axis handle
The axis handle that contains the reference to the
constructed plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(("You must choose a datastream when there are 2 "
"or more datasets in the TimeSeriesDisplay "
"object."))
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
u = self._obj[dsname][u_field].values
v = self._obj[dsname][v_field].values
dim = list(self._obj[dsname][u_field].dims)
xdata = self._obj[dsname][dim[0]].values
num_x = xdata.shape[-1]
barb_step_x = round(num_x / num_barbs_x)
if barb_step_x == 0:
barb_step_x = 1
if len(dim) > 1 and pres_field is None:
if use_var_for_y is None:
ydata = self._obj[dsname][dim[1]]
else:
ydata = self._obj[dsname][use_var_for_y]
ydata_dim1 = self._obj[dsname][dim[1]]
if np.shape(ydata) != np.shape(ydata_dim1):
ydata = ydata_dim1
if 'units' in ydata.attrs:
units = ydata.attrs['units']
else:
units = ''
ytitle = ''.join(['(', units, ')'])
num_y = ydata.shape[0]
barb_step_y = round(num_y / num_barbs_y)
if barb_step_y == 0:
barb_step_y = 1
xdata, ydata = np.meshgrid(xdata, ydata, indexing='ij')
elif pres_field is not None:
# What we will do here is do a nearest-neighbor interpolation
# for each member of the series. Coordinates are time, pressure
pres = self._obj[dsname][pres_field]
u_interp = NearestNDInterpolator(
(xdata, pres.values), u, rescale=True)
v_interp = NearestNDInterpolator(
(xdata, pres.values), v, rescale=True)
barb_step_x = 1
barb_step_y = 1
x_times = pd.date_range(xdata.min(), xdata.max(),
periods=num_barbs_x)
if num_barbs_y == 1:
y_levels = pres.mean()
else:
y_levels = np.linspace(np.nanmin(pres), np.nanmax(pres),
num_barbs_y)
xdata, ydata = np.meshgrid(x_times, y_levels, indexing='ij')
u = u_interp(xdata, ydata)
v = v_interp(xdata, ydata)
if 'units' in pres.attrs:
units = pres.attrs['units']
else:
units = ''
ytitle = ''.join(['(', units, ')'])
else:
ydata = None
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
if ydata is None:
ydata = np.ones(xdata.shape)
if 'cmap' in kwargs.keys():
map_color = np.sqrt(np.power(u[::barb_step_x], 2) +
np.power(v[::barb_step_x], 2))
map_color[np.isnan(map_color)] = 0
ax = self.axes[subplot_index].barbs(xdata[::barb_step_x],
ydata[::barb_step_x],
u[::barb_step_x],
v[::barb_step_x], map_color,
**kwargs)
plt.colorbar(ax, ax=[self.axes[subplot_index]],
label='Wind Speed (' +
self._obj[dsname][u_field].attrs['units'] + ')')
else:
self.axes[subplot_index].barbs(xdata[::barb_step_x],
ydata[::barb_step_x],
u[::barb_step_x],
v[::barb_step_x],
**kwargs)
self.axes[subplot_index].set_yticks([])
else:
if 'cmap' in kwargs.keys():
map_color = np.sqrt(np.power(u[::barb_step_x, ::barb_step_y], 2) +
np.power(v[::barb_step_x, ::barb_step_y], 2))
map_color[np.isnan(map_color)] = 0
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x, ::barb_step_y],
ydata[::barb_step_x, ::barb_step_y],
u[::barb_step_x, ::barb_step_y],
v[::barb_step_x, ::barb_step_y], map_color,
**kwargs)
plt.colorbar(ax, ax=[self.axes[subplot_index]],
label='Wind Speed (' +
self._obj[dsname][u_field].attrs['units'] + ')')
else:
ax = self.axes[subplot_index].barbs(
xdata[::barb_step_x, ::barb_step_y],
ydata[::barb_step_x, ::barb_step_y],
u[::barb_step_x, ::barb_step_y],
v[::barb_step_x, ::barb_step_y],
**kwargs)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# Set Title
if set_title is None:
set_title = ' '.join([dsname, 'on',
dt_utils.numpy_to_arm_date(
self._obj[dsname].time.values[0])])
self.axes[subplot_index].set_title(set_title)
# Set YTitle
if 'ytitle' in locals():
self.axes[subplot_index].set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
time_rng = [xdata.min(), xdata.max()]
self.set_xrng(time_rng, subplot_index)
# Set Y Limit
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if not np.all(self.yrng[subplot_index] == 0):
self.set_yrng(self.yrng[subplot_index], subplot_index)
else:
if ydata is None:
our_data = xdata
else:
our_data = ydata
if np.isfinite(our_data).any():
if invert_y_axis is False:
yrng = [np.nanmin(our_data), np.nanmax(our_data)]
else:
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [0, 1]
self.set_yrng(yrng, subplot_index)
# Set X Format
if len(subplot_index) == 1:
days = (self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0])
else:
days = (self.xrng[subplot_index[0], subplot_index[1], 1] -
self.xrng[subplot_index[0], subplot_index[1], 0])
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
myFmt = common.get_date_format(days)
self.axes[subplot_index].xaxis.set_major_formatter(myFmt)
return self.axes[subplot_index]
def plot_time_height_xsection_from_1d_data(
self, data_field, pres_field, dsname=None, subplot_index=(0, ),
set_title=None, day_night_background=False, num_time_periods=20,
num_y_levels=20, invert_y_axis=True, cbar_label=None,
set_shading='auto', **kwargs):
"""
This will plot a time-height cross section from 1D datasets using
nearest neighbor interpolation on a regular time by height grid.
All that is needed are a data variable and a height variable.
Parameters
----------
data_field : str
The name of the field to plot.
pres_field : str
The name of the height or pressure field to plot.
dsname : str or None
The name of the datastream to plot
subplot_index : 2-tuple
The index of the subplot to create the plot on.
set_title : str or None
The title of the plot.
day_night_background : bool
Set to true to plot the day/night background.
num_time_periods : int
Set to determine how many time periods. Setting to None
will do one time period per day.
num_y_levels : int
The number of levels in the y axis to use.
invert_y_axis : bool
Set to true to invert the y-axis (recommended for
pressure coordinates).
cbar_label : str
Option to overwrite default colorbar label.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
**kwargs : keyword arguments
Additional keyword arguments will be passed
into :func:`plt.pcolormesh`
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle pointing to the plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(("You must choose a datastream when there are 2"
"or more datasets in the TimeSeriesDisplay"
"object."))
elif dsname is None:
dsname = list(self._obj.keys())[0]
dim = list(self._obj[dsname][data_field].dims)
if len(dim) > 1:
raise ValueError(("plot_time_height_xsection_from_1d_data only "
"supports 1-D datasets. For datasets with 2 or "
"more dimensions use plot()."))
# Get data and dimensions
data = self._obj[dsname][data_field].values
xdata = self._obj[dsname][dim[0]].values
# What we will do here is do a nearest-neighbor interpolation for each
# member of the series. Coordinates are time, pressure
pres = self._obj[dsname][pres_field]
u_interp = NearestNDInterpolator(
(xdata, pres.values), data, rescale=True)
# Mask points where we have no data
# Count number of unique days
x_times = pd.date_range(xdata.min(), xdata.max(),
periods=num_time_periods)
y_levels = np.linspace(np.nanmin(pres), np.nanmax(pres), num_y_levels)
tdata, ydata = np.meshgrid(x_times, y_levels, indexing='ij')
data = u_interp(tdata, ydata)
ytitle = ''.join(['(', pres.attrs['units'], ')'])
units = (data_field + ' (' +
self._obj[dsname][data_field].attrs['units'] + ')')
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
mesh = self.axes[subplot_index].pcolormesh(
x_times, y_levels, np.transpose(data), shading=set_shading,
**kwargs)
if day_night_background is True:
self.day_night_background(subplot_index=subplot_index, dsname=dsname)
# Set Title
if set_title is None:
set_title = ' '.join(
[dsname, 'on',
dt_utils.numpy_to_arm_date(self._obj[dsname].time.values[0])])
self.axes[subplot_index].set_title(set_title)
# Set YTitle
if 'ytitle' in locals():
self.axes[subplot_index].set_ylabel(ytitle)
# Set X Limit - We want the same time axes for all subplots
time_rng = [x_times[-1], x_times[0]]
self.set_xrng(time_rng, subplot_index)
# Set Y Limit
if hasattr(self, 'yrng'):
# Make sure that the yrng is not just the default
if not np.all(self.yrng[subplot_index] == 0):
self.set_yrng(self.yrng[subplot_index], subplot_index)
else:
if ydata is None:
our_data = data.values
else:
our_data = ydata
if np.isfinite(our_data).any():
if invert_y_axis is False:
yrng = [np.nanmin(our_data), np.nanmax(our_data)]
else:
yrng = [np.nanmax(our_data), np.nanmin(our_data)]
else:
yrng = [0, 1]
self.set_yrng(yrng, subplot_index)
# Set X Format
if len(subplot_index) == 1:
days = (self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0])
else:
days = (self.xrng[subplot_index[0], subplot_index[1], 1] -
self.xrng[subplot_index[0], subplot_index[1], 0])
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
if ydata is not None:
if cbar_label is None:
self.add_colorbar(mesh, title=units, subplot_index=subplot_index)
else:
self.add_colorbar(mesh, title=cbar_label, subplot_index=subplot_index)
myFmt = common.get_date_format(days)
self.axes[subplot_index].xaxis.set_major_formatter(myFmt)
return self.axes[subplot_index]
def time_height_scatter(
self, data_field=None, dsname=None, cmap='rainbow',
alt_label=None, alt_field='alt', cb_label=None, **kwargs):
"""
Create a time series plot of altitude and data variable with
color also indicating value with a color bar. The Color bar is
positioned to serve both as the indicator of the color intensity
and the second y-axis.
Parameters
----------
data_field : str
Name of data field in the object to plot on second y-axis.
height_field : str
Name of height field in the object to plot on first y-axis.
dsname : str or None
The name of the datastream to plot.
cmap : str
Colorbar color map to use.
alt_label : str
Altitude first y-axis label to use. If None, will try to use
long_name and units.
alt_field : str
Label for field in the object to plot on first y-axis.
cb_label : str
Colorbar label to use. If not set will try to use
long_name and units.
**kwargs : keyword arguments
Any other keyword arguments that will be passed
into TimeSeriesDisplay.plot module when the figure
is made.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(("You must choose a datastream when there are 2 "
"or more datasets in the TimeSeriesDisplay "
"object."))
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
data = self._obj[dsname][data_field]
altitude = self._obj[dsname][alt_field]
dim = list(self._obj[dsname][data_field].dims)
xdata = self._obj[dsname][dim[0]]
if alt_label is None:
try:
alt_label = (altitude.attrs['long_name'] +
''.join([' (', altitude.attrs['units'], ')']))
except KeyError:
alt_label = alt_field
if cb_label is None:
try:
cb_label = (data.attrs['long_name'] +
''.join([' (', data.attrs['units'], ')']))
except KeyError:
cb_label = data_field
colorbar_map = plt.cm.get_cmap(cmap)
self.fig.subplots_adjust(left=0.1, right=0.86,
bottom=0.16, top=0.91)
ax1 = self.plot(alt_field, color='black', **kwargs)
ax1.set_ylabel(alt_label)
ax2 = ax1.twinx()
sc = ax2.scatter(xdata.values, data.values, c=data.values,
marker='.', cmap=colorbar_map)
cbaxes = self.fig.add_axes(
[self.fig.subplotpars.right + 0.02, self.fig.subplotpars.bottom,
0.02, self.fig.subplotpars.top - self.fig.subplotpars.bottom])
cbar = plt.colorbar(sc, cax=cbaxes)
ax2.set_ylim(cbar.mappable.get_clim())
cbar.ax.set_ylabel(cb_label)
ax2.set_yticklabels([])
return self.axes[0]
def qc_flag_block_plot(
self, data_field=None, dsname=None,
subplot_index=(0, ), time_rng=None, assessment_color=None,
edgecolor='face', set_shading='auto', **kwargs):
"""
Create a time series plot of embedded quality control values
using broken barh plotting.
Parameters
----------
data_field : str
Name of data field in the object to plot corresponding quality
control.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
time_rng : tuple or list
List or tuple with (min, max) values to set the x-axis range limits.
assessment_color : dict
Dictionary lookup to override default assessment to color. Make sure
assessment work is correctly set with case syntax.
set_shading : string
Option to to set the matplotlib.pcolormesh shading parameter.
Default to 'auto'
**kwargs : keyword arguments
The keyword arguments for :func:`plt.broken_barh`.
"""
# Color to plot associated with assessment.
color_lookup = {'Bad': 'red',
'Incorrect': 'red',
'Indeterminate': 'orange',
'Suspect': 'orange',
'Missing': 'darkgray',
'Not Failing': 'green',
'Acceptable': 'green'}
if assessment_color is not None:
for asses, color in assessment_color.items():
color_lookup[asses] = color
if asses == 'Incorrect':
color_lookup['Bad'] = color
if asses == 'Suspect':
color_lookup['Indeterminate'] = color
# Set up list of test names to use for missing values
missing_val_long_names = ['Value equal to missing_value*',
'Value set to missing_value*',
'Value is equal to missing_value*',
'Value is set to missing_value*']
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(("You must choose a datastream when there are 2 "
"or more datasets in the TimeSeriesDisplay "
"object."))
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Set up or get current plot figure
if self.fig is None:
self.fig = plt.figure()
# Set up or get current axes
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
ax = self.axes[subplot_index]
# Set X Limit - We want the same time axes for all subplots
data = self._obj[dsname][data_field]
dim = list(self._obj[dsname][data_field].dims)
xdata = self._obj[dsname][dim[0]]
# Get data and attributes
qc_data_field = self._obj[dsname].qcfilter.check_for_ancillary_qc(data_field,
add_if_missing=False,
cleanup=False)
if qc_data_field is None:
raise ValueError(f"No quality control ancillary variable in Dataset for {data_field}")
flag_masks = self._obj[dsname][qc_data_field].attrs['flag_masks']
flag_meanings = self._obj[dsname][qc_data_field].attrs['flag_meanings']
flag_assessments = self._obj[dsname][qc_data_field].attrs['flag_assessments']
# Get time ranges for green blocks
time_delta = determine_time_delta(xdata.values)
barh_list_green = reduce_time_ranges(xdata.values, time_delta=time_delta,
broken_barh=True)
# Set background to gray indicating not available data
ax.set_facecolor('dimgray')
# Check if plotting 2D data vs 1D data. 2D data will be summarized by
# assessment category instead of showing each test.
data_shape = self._obj[dsname][qc_data_field].shape
if len(data_shape) > 1:
cur_assessments = list(set(flag_assessments))
cur_assessments.sort()
cur_assessments.reverse()
qc_data = np.full(data_shape, -1, dtype=np.int16)
plot_colors = []
tick_names = []
index = self._obj[dsname][qc_data_field].values == 0
if index.any():
qc_data[index] = 0
plot_colors.append(color_lookup['Not Failing'])
tick_names.append('Not Failing')
for ii, assess in enumerate(cur_assessments):
if assess not in color_lookup:
color_lookup[assess] = list(mplcolors.CSS4_COLORS.keys())[ii]
ii += 1
assess_data = self._obj[dsname].qcfilter.get_masked_data(data_field,
rm_assessments=assess)
if assess_data.mask.any():
qc_data[assess_data.mask] = ii
plot_colors.append(color_lookup[assess])
tick_names.append(assess)
# Overwrite missing data. Not sure if we want to do this because VAPs set
# the value to missing but the test is set to Bad. This tries to overcome that
# by looking for correct test description that would only indicate the values
# are missing not that they are set to missing by a test... most likely.
missing_test_nums = []
for ii, flag_meaning in enumerate(flag_meanings):
# Check if the bit set is indicating missing data.
for val in missing_val_long_names:
if re_search(val, flag_meaning):
test_num = parse_bit(flag_masks[ii])[0]
missing_test_nums.append(test_num)
assess_data = self._obj[dsname].qcfilter.get_masked_data(data_field,
rm_tests=missing_test_nums)
if assess_data.mask.any():
qc_data[assess_data.mask] = -1
plot_colors.append(color_lookup['Missing'])
tick_names.append('Missing')
# Create a masked array to allow not plotting where values are missing
qc_data = np.ma.masked_equal(qc_data, -1)
dims = self._obj[dsname][qc_data_field].dims
xvalues = self._obj[dsname][dims[0]].values
yvalues = self._obj[dsname][dims[1]].values
cMap = mplcolors.ListedColormap(plot_colors)
mesh = ax.pcolormesh(xvalues, yvalues, np.transpose(qc_data),
cmap=cMap, vmin=0, shading=set_shading)
divider = make_axes_locatable(ax)
# Determine correct placement of words on colorbar
tick_nums = ((np.arange(0, len(tick_names) * 2 + 1) /
(len(tick_names) * 2) * np.nanmax(qc_data))[1::2])
cax = divider.append_axes('bottom', size='5%', pad=0.3)
cbar = self.fig.colorbar(mesh, cax=cax, orientation='horizontal', spacing='uniform',
ticks=tick_nums, shrink=0.5)
cbar.ax.set_xticklabels(tick_names)
# Set YTitle
dim_name = list(set(self._obj[dsname][qc_data_field].dims) - set(['time']))
try:
ytitle = f"{dim_name[0]} ({self._obj[dsname][dim_name[0]].attrs['units']})"
ax.set_ylabel(ytitle)
except KeyError:
pass
# Add which tests were set as text to the plot
unique_values = []
for ii in np.unique(self._obj[dsname][qc_data_field].values):
unique_values.extend(parse_bit(ii))
if len(unique_values) > 0:
unique_values = list(set(unique_values))
unique_values.sort()
unique_values = [str(ii) for ii in unique_values]
self.fig.text(0.5, -0.35, f"QC Tests Tripped: {', '.join(unique_values)}",
transform=ax.transAxes, horizontalalignment='center',
verticalalignment='center', fontweight='bold')
else:
test_nums = []
for ii, assess in enumerate(flag_assessments):
if assess not in color_lookup:
color_lookup[assess] = list(mplcolors.CSS4_COLORS.keys())[ii]
# Plot green data first.
ax.broken_barh(barh_list_green, (ii, ii + 1), facecolors=color_lookup['Not Failing'],
edgecolor=edgecolor, **kwargs)
# Get test number from flag_mask bitpacked number
test_nums.append(parse_bit(flag_masks[ii]))
# Get masked array data to use mask for finding if/where test is set
data = self._obj[dsname].qcfilter.get_masked_data(
data_field, rm_tests=test_nums[-1])
if np.any(data.mask):
# Get time ranges from time and masked data
barh_list = reduce_time_ranges(xdata.values[data.mask],
time_delta=time_delta,
broken_barh=True)
# Check if the bit set is indicating missing data. If so change
# to different plotting color than what is in flag_assessments.
for val in missing_val_long_names:
if re_search(val, flag_meanings[ii]):
assess = "Missing"
break
# Lay down blocks of tripped tests using correct color
ax.broken_barh(barh_list, (ii, ii + 1),
facecolors=color_lookup[assess],
edgecolor=edgecolor, **kwargs)
# Add test description to plot.
ax.text(xdata.values[0], ii + 0.5, ' ' + flag_meanings[ii], va='center')
# Change y ticks to test number
plt.yticks([ii + 0.5 for ii in range(0, len(test_nums))],
labels=['Test ' + str(ii[0]) for ii in test_nums])
# Set ylimit to number of tests plotted
ax.set_ylim(0, len(flag_assessments))
# Set X Limit - We want the same time axes for all subplots
if not hasattr(self, 'time_rng'):
if time_rng is not None:
self.time_rng = list(time_rng)
else:
self.time_rng = [xdata.min().values, xdata.max().values]
self.set_xrng(self.time_rng, subplot_index)
# Get X format - We want the same time axes for all subplots
if hasattr(self, 'time_fmt'):
ax.xaxis.set_major_formatter(self.time_fmt)
else:
# Set X Format
if len(subplot_index) == 1:
days = (self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0])
else:
days = (self.xrng[subplot_index[0], subplot_index[1], 1] -
self.xrng[subplot_index[0], subplot_index[1], 0])
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
self.time_fmt = myFmt
return self.axes[subplot_index]
def fill_between(self, field, dsname=None, subplot_index=(0, ),
set_title=None, secondary_y=False, **kwargs):
"""
Makes a fill_between plot, based on matplotlib
Parameters
----------
field : str
The name of the field to plot.
dsname : None or str
If there is more than one datastream in the display object the
name of the datastream needs to be specified. If set to None and
there is only one datastream ACT will use the sole datastream
in the object.
subplot_index : 1 or 2D tuple, list, or array
The index of the subplot to set the x range of.
set_title : str
The title for the plot.
secondary_y : boolean
Option to indicate if the data should be plotted on second y-axis.
**kwargs : keyword arguments
The keyword arguments for :func:`plt.plot` (1D timeseries) or
:func:`plt.pcolormesh` (2D timeseries).
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle of the plot.
"""
if dsname is None and len(self._obj.keys()) > 1:
raise ValueError(("You must choose a datastream when there are 2 "
"or more datasets in the TimeSeriesDisplay "
"object."))
elif dsname is None:
dsname = list(self._obj.keys())[0]
# Get data and dimensions
data = self._obj[dsname][field]
dim = list(self._obj[dsname][field].dims)
xdata = self._obj[dsname][dim[0]]
if 'units' in data.attrs:
ytitle = ''.join(['(', data.attrs['units'], ')'])
else:
ytitle = field
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes()])
self.fig.add_axes(self.axes[0])
# Set ax to appropriate axis
if secondary_y is False:
ax = self.axes[subplot_index]
else:
ax = self.axes[subplot_index].twinx()
ax.fill_between(xdata.values, data, **kwargs)
# Set X Format
if len(subplot_index) == 1:
days = (self.xrng[subplot_index, 1] - self.xrng[subplot_index, 0])
else:
days = (self.xrng[subplot_index[0], subplot_index[1], 1] -
self.xrng[subplot_index[0], subplot_index[1], 0])
myFmt = common.get_date_format(days)
ax.xaxis.set_major_formatter(myFmt)
# Set X format - We want the same time axes for all subplots
if not hasattr(self, 'time_fmt'):
self.time_fmt = myFmt
# Put on an xlabel, but only if we are making the bottom-most plot
if subplot_index[0] == self.axes.shape[0] - 1:
self.axes[subplot_index].set_xlabel('Time [UTC]')
# Set YTitle
ax.set_ylabel(ytitle)
# Set Title
if set_title is None:
set_title = ' '.join([dsname, field, 'on',
dt_utils.numpy_to_arm_date(
self._obj[dsname].time.values[0])])
if secondary_y is False:
ax.set_title(set_title)
return self.axes[subplot_index]
| StarcoderdataPython |
122327 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2021. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless REQUIRED by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""main function to convert user scripts"""
import os
import pandas as pd
import util_global
from conver_by_ast import conver_ast
from file_op import mkdir
from file_op import mkdir_and_copyfile
from file_op import write_report_terminator
from file_op import abs_join
from file_op import get_api_statistic
from file_op import adjust_index
from util import check_path_length
from util import log_warning
def conver():
"""The entry point to convert Tensorflow script"""
print("Begin conver, input file: " + util_global.get_value('input') + '\n')
out_path = util_global.get_value('output')
dst_path = os.path.split(util_global.get_value('input').rstrip('\\/'))[-1]
dst_path_new = dst_path + util_global.get_value('timestap')
conver_path = os.walk(util_global.get_value('input'))
report_dir = util_global.get_value('report')
mkdir(report_dir)
report_xlsx = os.path.join(report_dir, 'api_analysis_report.xlsx')
util_global.set_value('generate_dir_report', pd.DataFrame())
for path, _, file_list in conver_path:
for file_name in file_list:
out_path_dst = abs_join(dst_path_new, path.split(util_global.get_value('input'))[1])
file_path = os.path.join(path, file_name).replace('\\', '/')
if not check_path_length(file_path):
content = "".join(["The file:", file_path, " length is invalid, skip convert."])
log_warning(content)
continue
content = "".join(["Begin conver file: ", file_path])
print(content)
threshold_file_size = 10 * 1024 * 1024
if file_name.endswith(".py"):
if os.path.getsize(file_path) > threshold_file_size:
content = "".join(["The file:", file_path, " size is over 10M, skip convert."])
log_warning(content)
continue
util_global.set_value('path', file_path)
mkdir(os.path.join(out_path, out_path_dst))
conver_ast(path, out_path_dst, file_name)
if util_global.get_value('need_conver', False):
content = "".join(["Finish conver file: ", file_path, '\n'])
print(content)
write_report_terminator(content)
else:
mkdir_and_copyfile(path, abs_join(out_path, out_path_dst), file_name)
else:
mkdir_and_copyfile(path, abs_join(out_path, out_path_dst), file_name)
adjust_index()
analysis_report = util_global.get_value('generate_dir_report')
if analysis_report.empty:
print('No api data in the report')
else:
analysis_report.to_excel(report_xlsx, index=True)
get_api_statistic(analysis_report)
print("Finish conver, output file: " + out_path + "; report file: " + util_global.get_value('report'))
| StarcoderdataPython |
3276216 | <gh_stars>0
from pygolang.io_callback import IO
from pygolang.errors import StopPyGoLangInterpreterError
class FakeIO(IO):
def __init__(self, stdin_as_str_list):
"""
:param list[str] stdin_as_str_list: list(or iterable) of strings, to
simulate the lines from stdin
"""
self.stdin = stdin_as_str_list
self.stdout = []
self.stderr = []
self.input_generator = None
# super().__init__()
def from_stdin(self):
def iterate_over_stdin():
for line in self.stdin:
yield line
raise StopPyGoLangInterpreterError
if not self.input_generator:
self.input_generator = iterate_over_stdin()
return next(self.input_generator)
def to_stdout(self, stuff):
self.stdout.append(stuff)
def to_stderr(self, stuff):
self.stderr.append(stuff)
def interpreter_prompt(self):
pass
def newline(self):
pass
def format_stderr_for_debugging(self):
return '\n'.join(str(e) for e in self.stderr)
| StarcoderdataPython |
182200 | from flask import Flask, render_template, request, redirect, url_for
from joblib import load
from auth import get_related_tweets
pipeline = load("twitter_classification.joblib")
def requestResults(name):
tweets = get_related_tweets(name)
tweets['prediction'] = pipeline.predict(tweets['tweet_text'])
data = str(tweets.prediction.value_counts()) + '\n\n'
return data + str(tweets)
app = Flask(__name__)
@app.route('/')
def home():
return render_template('home.html')
@app.route('/', methods=['POST', 'GET'])
def get_data():
if request.method == 'POST':
user = request.form['search']
return redirect(url_for('success', name=user))
@app.route('/success/<name>')
def success(name):
return "<xmp>" + str(requestResults(name)) + " </xmp> "
if __name__ == '__main__' :
app.run(debug=True) | StarcoderdataPython |
1768706 | <filename>memote/suite/tests/test_annotation.py<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2017 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests performed on the annotations of an instance of ``cobra.Model``."""
from __future__ import absolute_import, division
from builtins import dict
import pytest
import memote.support.annotation as annotation
from memote.utils import annotate, truncate, get_ids, wrapper
@annotate(title="Presence of Metabolite Annotation", format_type="count")
def test_metabolite_annotation_presence(model):
"""
Expect all metabolites to have a non-empty annotation attribute.
This test checks if any annotations at all are present in the SBML
annotations field for each metabolite, irrespective of the type of
annotation i.e. specific database cross-references, ontology terms,
additional information. For this test to pass the model is expected to
have metabolites and each of them should have some form of annotation.
Implementation:
Check if the annotation attribute of each cobra.Metabolite object of the
model is unset or empty.
"""
ann = test_metabolite_annotation_presence.annotation
ann["data"] = get_ids(annotation.find_components_without_annotation(
model, "metabolites"))
ann["metric"] = len(ann["data"]) / len(model.metabolites)
ann["message"] = wrapper.fill(
"""A total of {} metabolites ({:.2%}) lack any form of annotation:
{}""".format(len(ann["data"]), ann["metric"], truncate(ann["data"])))
assert len(ann["data"]) == 0, ann["message"]
@annotate(title="Presence of Reaction Annotation", format_type="count")
def test_reaction_annotation_presence(model):
"""
Expect all reactions to have a non-empty annotation attribute.
This test checks if any annotations at all are present in the SBML
annotations field for each reaction, irrespective of the type of
annotation i.e. specific database cross-references, ontology terms,
additional information. For this test to pass the model is expected to
have reactions and each of them should have some form of annotation.
Implementation:
Check if the annotation attribute of each cobra.Reaction object of the
model is unset or empty.
"""
ann = test_reaction_annotation_presence.annotation
ann["data"] = get_ids(annotation.find_components_without_annotation(
model, "reactions"))
ann["metric"] = len(ann["data"]) / len(model.reactions)
ann["message"] = wrapper.fill(
"""A total of {} reactions ({:.2%}) lack any form of annotation:
{}""".format(len(ann["data"]), ann["metric"], truncate(ann["data"])))
assert len(ann["data"]) == 0, ann["message"]
@annotate(title="Presence of Gene Annotation", format_type="count")
def test_gene_product_annotation_presence(model):
"""
Expect all genes to have a non-empty annotation attribute.
This test checks if any annotations at all are present in the SBML
annotations field (extended by FBC package) for each gene product,
irrespective of the type of annotation i.e. specific database,
cross-references, ontology terms, additional information. For this test to
pass the model is expected to have genes and each of them should have some
form of annotation.
Implementation:
Check if the annotation attribute of each cobra.Gene object of the
model is unset or empty.
"""
ann = test_gene_product_annotation_presence.annotation
ann["data"] = get_ids(annotation.find_components_without_annotation(
model, "genes"))
ann["metric"] = len(ann["data"]) / len(model.genes)
ann["message"] = wrapper.fill(
"""A total of {} genes ({:.2%}) lack any form of
annotation: {}""".format(
len(ann["data"]), ann["metric"], truncate(ann["data"])))
assert len(ann["data"]) == 0, ann["message"]
@pytest.mark.parametrize("db", list(annotation.METABOLITE_ANNOTATIONS))
@annotate(title="Metabolite Annotations Per Database",
format_type="percent", message=dict(), data=dict(), metric=dict())
def test_metabolite_annotation_overview(model, db):
"""
Expect all metabolites to have annotations from common databases.
Specific database cross-references are paramount to mapping information.
To provide references to as many databases as possible helps to make the
metabolic model more accessible to other researchers. This does not only
facilitate the use of a model in a broad array of computational pipelines,
it also promotes the metabolic model itself to become an organism-specific
knowledge base.
For this test to pass, each metabolite annotation should contain
cross-references to a number of databases. The currently selection is
listed in `annotation.py`, but an ongoing discussion can be found at
https://github.com/opencobra/memote/issues/332. For each database this
test checks for the presence of its corresponding namespace ID to comply
with the MIRIAM guidelines i.e. they have to match those defined on
https://identifiers.org/.
Since each database is quite different and some potentially incomplete, it
may not be feasible to achieve 100% coverage for each of them. Generally
it should be possible, however, to obtain cross-references to at least
one of the databases for all metabolites consistently.
Implementation:
Check if the keys of the annotation attribute of each cobra.Metabolite of
the model match with a selection of common biochemical databases. The
annotation attribute of cobrapy components is a dictionary of
key:value pairs.
"""
ann = test_metabolite_annotation_overview.annotation
ann["data"][db] = get_ids(
annotation.generate_component_annotation_overview(
model.metabolites, db))
ann["metric"][db] = len(ann["data"][db]) / len(model.metabolites)
ann["message"][db] = wrapper.fill(
"""The following {} metabolites ({:.2%}) lack annotation for {}:
{}""".format(len(ann["data"][db]), ann["metric"][db], db,
truncate(ann["data"][db])))
assert len(ann["data"][db]) == 0, ann["message"][db]
@pytest.mark.parametrize("db", list(annotation.REACTION_ANNOTATIONS))
@annotate(title="Reaction Annotations Per Database",
format_type="percent", message=dict(), data=dict(), metric=dict())
def test_reaction_annotation_overview(model, db):
"""
Expect all reactions to have annotations from common databases.
Specific database cross-references are paramount to mapping information.
To provide references to as many databases as possible helps to make the
metabolic model more accessible to other researchers. This does not only
facilitate the use of a model in a broad array of computational pipelines,
it also promotes the metabolic model itself to become an organism-specific
knowledge base.
For this test to pass, each reaction annotation should contain
cross-references to a number of databases. The currently selection is
listed in `annotation.py`, but an ongoing discussion can be found at
https://github.com/opencobra/memote/issues/332. For each database this
test checks for the presence of its corresponding namespace ID to comply
with the MIRIAM guidelines i.e. they have to match those defined on
https://identifiers.org/.
Since each database is quite different and some potentially incomplete, it
may not be feasible to achieve 100% coverage for each of them. Generally
it should be possible, however, to obtain cross-references to at least
one of the databases for all reactions consistently.
Implementation:
Check if the keys of the annotation attribute of each cobra.Reaction of
the model match with a selection of common biochemical databases. The
annotation attribute of cobrapy components is a dictionary of
key:value pairs.
"""
ann = test_reaction_annotation_overview.annotation
ann["data"][db] = get_ids(
annotation.generate_component_annotation_overview(
model.reactions, db))
ann["metric"][db] = len(ann["data"][db]) / len(model.reactions)
ann["message"][db] = wrapper.fill(
"""The following {} reactions ({:.2%}) lack annotation for {}:
{}""".format(len(ann["data"][db]), ann["metric"][db], db,
truncate(ann["data"][db])))
assert len(ann["data"][db]) == 0, ann["message"][db]
@pytest.mark.parametrize("db", list(annotation.GENE_PRODUCT_ANNOTATIONS))
@annotate(title="Gene Annotations Per Database",
format_type="percent", message=dict(), data=dict(), metric=dict())
def test_gene_product_annotation_overview(model, db):
"""
Expect all genes to have annotations from common databases.
Specific database cross-references are paramount to mapping information.
To provide references to as many databases as possible helps to make the
metabolic model more accessible to other researchers. This does not only
facilitate the use of a model in a broad array of computational pipelines,
it also promotes the metabolic model itself to become an organism-specific
knowledge base.
For this test to pass, each gene annotation should contain
cross-references to a number of databases. The currently selection is
listed in `annotation.py`, but an ongoing discussion can be found at
https://github.com/opencobra/memote/issues/332. For each database this
test checks for the presence of its corresponding namespace ID to comply
with the MIRIAM guidelines i.e. they have to match those defined on
https://identifiers.org/.
Since each database is quite different and some potentially incomplete, it
may not be feasible to achieve 100% coverage for each of them. Generally
it should be possible, however, to obtain cross-references to at least
one of the databases for all gene products consistently.
Implementation:
Check if the keys of the annotation attribute of each cobra.Gene of
the model match with a selection of common genome databases. The
annotation attribute of cobrapy components is a dictionary of
key:value pairs.
"""
ann = test_gene_product_annotation_overview.annotation
ann["data"][db] = get_ids(
annotation.generate_component_annotation_overview(
model.genes, db))
ann["metric"][db] = len(ann["data"][db]) / len(model.genes)
ann["message"][db] = wrapper.fill(
"""The following {} genes ({:.2%}) lack annotation for {}:
{}""".format(len(ann["data"][db]), ann["metric"][db], db,
truncate(ann["data"][db])))
assert len(ann["data"][db]) == 0, ann["message"][db]
@pytest.mark.parametrize("db", list(annotation.METABOLITE_ANNOTATIONS))
@annotate(title="Metabolite Annotation Conformity Per Database",
format_type="percent", message=dict(), data=dict(), metric=dict())
def test_metabolite_annotation_wrong_ids(model, db):
"""
Expect all annotations of metabolites to be in the correct format.
To identify databases and the identifiers belonging to them, computational
tools rely on the presence of specific patterns. Only when these patterns
can be identified consistently is an ID truly machine-readable. This test
checks if the database cross-references in metabolite annotations conform
to patterns defined according to the MIRIAM guidelines, i.e. matching
those that are defined at https://identifiers.org/.
The required formats, i.e., regex patterns are further outlined in
`annotation.py`. This test does not carry out a web query for the composed
URI, it merely controls that the regex patterns match the identifiers.
Implementation:
For those metabolites whose annotation keys match any of the tested
databases, check if the corresponding values match the identifier pattern
of each database.
"""
ann = test_metabolite_annotation_wrong_ids.annotation
ann["data"][db] = total = get_ids(
set(model.metabolites).difference(
annotation.generate_component_annotation_overview(
model.metabolites, db)))
ann["metric"][db] = 1.0
ann["message"][db] = wrapper.fill(
"""There are no metabolite annotations for the {} database.
""".format(db))
assert len(total) > 0, ann["message"][db]
ann["data"][db] = get_ids(
annotation.generate_component_annotation_miriam_match(
model.metabolites, "metabolites", db))
ann["metric"][db] = len(ann["data"][db]) / len(total)
ann["message"][db] = wrapper.fill(
"""A total of {} metabolite annotations ({:.2%}) do not match the
regular expression patterns defined on identifiers.org for the {}
database: {}""".format(
len(ann["data"][db]), ann["metric"][db], db,
truncate(ann["data"][db])))
assert len(ann["data"][db]) == 0, ann["message"][db]
@pytest.mark.parametrize("db", annotation.REACTION_ANNOTATIONS)
@annotate(title="Reaction Annotation Conformity Per Database",
format_type="percent", message=dict(), data=dict(), metric=dict())
def test_reaction_annotation_wrong_ids(model, db):
"""
Expect all annotations of reactions to be in the correct format.
To identify databases and the identifiers belonging to them, computational
tools rely on the presence of specific patterns. Only when these patterns
can be identified consistently is an ID truly machine-readable. This test
checks if the database cross-references in reaction annotations conform
to patterns defined according to the MIRIAM guidelines, i.e. matching
those that are defined at https://identifiers.org/.
The required formats, i.e., regex patterns are further outlined in
`annotation.py`. This test does not carry out a web query for the composed
URI, it merely controls that the regex patterns match the identifiers.
Implementation:
For those reaction whose annotation keys match any of the tested
databases, check if the corresponding values match the identifier pattern
of each database.
"""
ann = test_reaction_annotation_wrong_ids.annotation
ann["data"][db] = total = get_ids(
set(model.reactions).difference(
annotation.generate_component_annotation_overview(
model.reactions, db)))
ann["metric"][db] = 1.0
ann["message"][db] = wrapper.fill(
"""There are no reaction annotations for the {} database.
""".format(db))
assert len(total) > 0, ann["message"][db]
ann["data"][db] = get_ids(
annotation.generate_component_annotation_miriam_match(
model.reactions, "reactions", db))
ann["metric"][db] = len(ann["data"][db]) / len(model.reactions)
ann["message"][db] = wrapper.fill(
"""A total of {} reaction annotations ({:.2%}) do not match the
regular expression patterns defined on identifiers.org for the {}
database: {}""".format(
len(ann["data"][db]), ann["metric"][db], db,
truncate(ann["data"][db])))
assert len(ann["data"][db]) == 0, ann["message"][db]
@pytest.mark.parametrize("db", annotation.GENE_PRODUCT_ANNOTATIONS)
@annotate(title="Gene Annotation Conformity Per Database",
format_type="percent", message=dict(), data=dict(), metric=dict())
def test_gene_product_annotation_wrong_ids(model, db):
"""
Expect all annotations of genes/gene-products to be in the correct format.
To identify databases and the identifiers belonging to them, computational
tools rely on the presence of specific patterns. Only when these patterns
can be identified consistently is an ID truly machine-readable. This test
checks if the database cross-references in reaction annotations conform
to patterns defined according to the MIRIAM guidelines, i.e. matching
those that are defined at https://identifiers.org/.
The required formats, i.e., regex patterns are further outlined in
`annotation.py`. This test does not carry out a web query for the composed
URI, it merely controls that the regex patterns match the identifiers.
Implementation:
For those genes whose annotation keys match any of the tested
databases, check if the corresponding values match the identifier pattern
of each database.
"""
ann = test_gene_product_annotation_wrong_ids.annotation
ann["data"][db] = total = get_ids(
set(model.genes).difference(
annotation.generate_component_annotation_overview(
model.genes, db)))
ann["metric"][db] = 1.0
ann["message"][db] = wrapper.fill(
"""There are no gene annotations for the {} database.
""".format(db))
assert len(total) > 0, ann["message"][db]
ann["data"][db] = get_ids(
annotation.generate_component_annotation_miriam_match(
model.genes, "genes", db))
ann["metric"][db] = len(ann["data"][db]) / len(model.genes)
ann["message"][db] = wrapper.fill(
"""A total of {} gene annotations ({:.2%}) do not match the
regular expression patterns defined on identifiers.org for the {}
database: {}""".format(
len(ann["data"][db]), ann["metric"][db], db,
truncate(ann["data"][db])))
assert len(ann["data"][db]) == 0, ann["message"][db]
@annotate(title="Uniform Metabolite Identifier Namespace", format_type="count")
def test_metabolite_id_namespace_consistency(model):
"""
Expect metabolite identifiers to be from the same namespace.
In well-annotated models it is no problem if the pool of main identifiers
for metabolites consists of identifiers from several databases. However,
in models that lack appropriate annotations, it may hamper the ability of
other researchers to use it. Running the model through a computational
pipeline may be difficult without first consolidating the namespace.
Hence, this test checks if the main metabolite identifiers can be
attributed to one single namespace based on the regex patterns defined at
https://identifiers.org/
Implementation:
Generate a table with each column corresponding to one
database from the selection and each row to a metabolite identifier. A
Boolean entry indicates whether the identifier matches the regular
expression of the corresponding database. Since the Biocyc pattern matches
broadly, we assume that any instance of an identifier matching to Biocyc
AND any other database pattern is a false positive match for Biocyc and
thus set it to ``false``. Sum the positive matches for each database and
assume that the largest set is the 'main' identifier namespace.
"""
ann = test_metabolite_id_namespace_consistency.annotation
overview = annotation.generate_component_id_namespace_overview(
model, "metabolites")
distribution = overview.sum()
cols = list(distribution.index)
largest = distribution[cols].idxmax()
# Assume that all identifiers match the largest namespace.
ann["data"] = list(set(get_ids(model.metabolites)).difference(
overview[overview[largest]].index.tolist()))
ann["metric"] = len(ann["data"]) / len(model.metabolites)
ann["message"] = wrapper.fill(
"""{} metabolite identifiers ({:.2%}) deviate from the largest found
namespace ({}): {}""".format(
len(ann["data"]), ann["metric"], largest, truncate(ann["data"])))
assert len(ann["data"]) == 0, ann["message"]
@annotate(title="Uniform Reaction Identifier Namespace", format_type="count")
def test_reaction_id_namespace_consistency(model):
"""
Expect reaction identifiers to be from the same namespace.
In well-annotated models it is no problem if the pool of main identifiers
for reactions consists of identifiers from several databases. However,
in models that lack appropriate annotations, it may hamper the ability of
other researchers to use it. Running the model through a computational
pipeline may be difficult without first consolidating the namespace.
Hence, this test checks if the main reaction identifiers can be
attributed to one single namespace based on the regex patterns defined at
https://identifiers.org/
Implementation:
Generate a pandas.DataFrame with each column corresponding to one
database from the selection and each row to the reaction ID. A boolean
entry indicates whether the metabolite ID matches the regex pattern
of the corresponding database. Since the Biocyc pattern matches quite,
assume that any instance of an identifier matching to Biocyc
AND any other DB pattern is a false positive match for Biocyc and then set
the boolean to ``false``. Sum the positive matches for each database and
assume that the largest set is the 'main' identifier namespace.
"""
ann = test_reaction_id_namespace_consistency.annotation
overview = annotation.generate_component_id_namespace_overview(
model, "reactions")
distribution = overview.sum()
cols = list(distribution.index)
largest = distribution[cols].idxmax()
# Assume that all identifiers match the largest namespace.
ann["data"] = list(set(get_ids(model.reactions)).difference(
overview[overview[largest]].index.tolist()))
ann["metric"] = len(ann["data"]) / len(model.reactions)
ann["message"] = wrapper.fill(
"""{} reaction identifiers ({:.2%}) deviate from the largest found
namespace ({}): {}""".format(
len(ann["data"]), ann["metric"], largest, truncate(ann["data"])))
assert len(ann["data"]) == 0, ann["message"]
| StarcoderdataPython |
3383384 | <filename>test/with_server/test_server.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
test/test_server.py
~~~~~~~~~~~~~~~~~~~
Tests the pyrc server by actually spinning up an actual server, and actually
sending actual socket messages to it. This is integration testing, not unit
testing, but it's suitably useful that it's the default testing mode.
"""
class TestServer(object):
"""
Test the pyrc server.
"""
def test_echo_ws_to_tcp(self, client):
data = "Hi there sir.\r\n"
# Perform the handshake.
ws_conn, tcp_conn = client.establish_connections()
# Send a message through the websocket.
ws_conn.write_message(data)
# Read it on the TCP socket.
tcp_conn.read_until(b"\r\n", client.stop)
received = client.wait()
assert received.decode("utf-8") == data
def test_echo_tcp_to_ws(self, client):
data = "Hi there sir\r\n"
# Perform the handshake.
ws_conn, tcp_conn = client.establish_connections()
# Send a message through the TCP connection.
tcp_conn.write(data.encode("utf-8"))
# Read it on the websocket.
ws_conn.read_message(client.stop)
received = client.wait().result()
assert received == data
| StarcoderdataPython |
3278577 | '''Forge URLs and utility functions'''
from . import AFWExceptions
AUTODESK_BASE_URL = "https://developer.api.autodesk.com"
TOKENFLEX_API = AUTODESK_BASE_URL+"/tokenflex/v1"
RECAP_API = AUTODESK_BASE_URL+"/photo-to-3d/v1"
AUTH_API = AUTODESK_BASE_URL+"/authentication/v1"
INFO_AUTH = AUTODESK_BASE_URL+"/userprofile/v1"
DA_API = AUTODESK_BASE_URL+"/da/us-east/v3"
# BIM360 and data management APIs are not consistent with their API urls
def checkScopes(token, endpoint_scope: str):
'''Checks scopes before making the request.'''
token_scope = token.scope.split()
endpoint_scope = endpoint_scope.split()
result = all(elem in token_scope for elem in endpoint_scope)
if result:
return True
else:
raise AFWExceptions.AFWError("Missing required scopes:", endpoint_scope)
def checkResponse(r):
'''If the response raised an error, this will detect it'''
if "code" in r and "message" in r:
raise AFWExceptions.APIError("CODE {e1} - {e2}".format(e1=r["code"], e2=r["message"]))
elif "developerMessage" in r and "errorCode" in r:
raise AFWExceptions.APIError("CODE {e1} - {e2}".format(e1=r["errorCode"], e2=r["developerMessage"]))
elif "code" in r and "msg" in r:
raise AFWExceptions.APIError("CODE {e1} - {e2}".format(e1=r["code"], e2=r["msg"]))
elif "jsonapi" in r and "errors" in r: # Check for dm errors, response returns a list of errors so raise that list
raise AFWExceptions.APIError(r["errors"])
elif "Error" in r: # This is ReCap format... too many error formats
raise AFWExceptions.APIError("CODE {e1} - {e2}".format(e1=r["Error"]["code"], e2=r["Error"]["msg"]))
def batch(iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
def allowed_kwargs_check(allowedKwgs, kwgs):
'''Check kwargs'''
for kwg in kwgs:
if kwg not in allowedKwgs:
raise AFWExceptions.AFWError("Invalid kwarg. See allowed kwargs in the docstring")
| StarcoderdataPython |
196178 | import re
from collections import defaultdict
from django.db import migrations
def add_users_to_groups_based_on_users_permissions(apps, schema_editor):
"""Add every user to group with "user_permissions" if exists, else create new one.
For each user, if the group with the exact scope of permissions exists,
add the user to it, else create a new group with this scope of permissions
and add the user to it.
"""
User = apps.get_model("account", "User")
Group = apps.get_model("auth", "Group")
groups = Group.objects.all().prefetch_related("permissions")
counter = get_counter_value(Group)
mapping = create_permissions_mapping(User)
for perms, users in mapping.items():
group = get_group_with_given_permissions(perms, groups)
if group:
group.user_set.add(*users)
continue
group = create_group_with_given_permissions(perms, counter, Group)
group.user_set.add(*users)
counter += 1
def get_counter_value(Group):
"""Get the number of next potential group."""
pattern = r"^Group (\d+)$"
group = Group.objects.filter(name__iregex=pattern).order_by("name").last()
if not group:
return 1
return int(re.match(pattern, group.name).group(1)) + 1
def create_permissions_mapping(User):
"""Create mapping permissions to users and potential new group name."""
mapping = defaultdict(set)
users = User.objects.filter(user_permissions__isnull=False).distinct().iterator()
for user in users:
permissions = user.user_permissions.all().order_by("pk")
perm_pks = tuple([perm.pk for perm in permissions])
mapping[perm_pks].add(user.pk)
user.user_permissions.clear()
return mapping
def get_group_with_given_permissions(permissions, groups):
"""Get group with given set of permissions."""
for group in groups:
group_perm_pks = {perm.pk for perm in group.permissions.all()}
if group_perm_pks == set(permissions):
return group
def create_group_with_given_permissions(perm_pks, counter, Group):
"""Create new group with given set of permissions."""
group_name = f"Group {counter:03d}"
group = Group.objects.create(name=group_name)
group.permissions.add(*perm_pks)
return group
class Migration(migrations.Migration):
dependencies = [
("account", "0040_auto_20200415_0443"),
]
operations = [
migrations.RunPython(
add_users_to_groups_based_on_users_permissions, migrations.RunPython.noop
),
]
| StarcoderdataPython |
4802523 | <gh_stars>1-10
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('challenges.views',
url(r'$', 'show', name='challenge_show'),
url(r'entries/$', 'entries_all', name='entries_all'),
url(r'entries/add/$', 'create_entry', name='entry_create'),
url(r'entries/(?P<entry_id>\d+)/$', 'entry_show',
name='entry_show'),
)
| StarcoderdataPython |
61642 | <reponame>mjachowdhury/PracticalMachineLearning<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 9 15:24:36 2020
@author: Ted.Scully
"""
import numpy as np
from sklearn.ensemble import IsolationForest
from sklearn.linear_model import Lasso
def main():
trainAll = np.genfromtxt("trainingData.csv", delimiter=",")
testAll = np.genfromtxt("testData.csv", delimiter=",")
# Extract feature data
train_features = trainAll[:, :-1]
train_labels = trainAll[:, -1]
test_features = testAll[:, :-1]
test_labels = testAll[:, -1]
reg_model = Lasso()
reg_model.fit(train_features, train_labels)
print("R2 result for Lasso without removing outliers: ", reg_model.score(test_features, test_labels))
# Create an isolation forest to remove mutlivariate outliers
clf = IsolationForest(contamination = 0.01)
clf.fit(train_features)
# Predict returns an array contains 1 (not outlier) and -1 (outlier) values
results = clf.predict(train_features)
# Exact only non-outlier instances
normal_features = train_features[results == 1]
normal_labels = train_labels[results == 1]
# Rerun the Lasso regression model
reg_model = Lasso()
reg_model.fit(normal_features, normal_labels)
print("R2 result for Lasso after removal of outliers: ",reg_model.score(test_features, test_labels))
main()
| StarcoderdataPython |
3244979 | <reponame>StillScripts/react-generator
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 23 16:28:26 2022
@author: danie
"""
def create_file(path, content):
index_file = open(path, 'w')
index_file.write(content)
index_file.close
print(f"MAKING FILE - {path}") | StarcoderdataPython |
3285679 | # Generated by Django 3.1.6 on 2021-02-26 14:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("workstation_configs", "0001_squashed_0008_auto_20201001_0758"),
]
operations = [
migrations.AddField(
model_name="workstationconfig",
name="image_context",
field=models.CharField(
blank=True,
choices=[
("PATH", "Pathology"),
("OPHTH", "Ophthalmology"),
("MPMRI", "Multiparametric MRI"),
],
max_length=6,
),
),
]
| StarcoderdataPython |
1694605 | import discord
import os
import yaml
import random
from datetime import datetime
from discord.ext import commands
from discord.utils import get
from discord.ext.commands.errors import CommandNotFound, CommandInvokeError
from dotenv import load_dotenv
from os import system
load_dotenv()
TOKEN = os.getenv("DISCORD_TOKEN")
BOT_PREFIX = "!"
bot = commands.Bot(command_prefix=BOT_PREFIX)
def commands_dict():
with open("commands.yaml", encoding="utf8") as infile:
commands_dict = yaml.safe_load(infile)
return commands_dict
# delete from current chat the last <amount> + 1 messages(command included)
async def clr(ctx, amount=0):
if ctx.author == bot.user:
return
await ctx.channel.purge(limit=amount + 1)
# send dick pic
async def send_callback(ctx):
if ctx.author == bot.user:
return
await ctx.channel.send(commands_dict[ctx.command.qualified_name]["text"])
# send random butthole
async def random_send_callback(ctx):
if ctx.author == bot.user:
return
await ctx.channel.send(
random.choice(commands_dict[ctx.command.qualified_name]["choices"])
)
# play shit
async def audio_callback(ctx):
if ctx.message.author.voice == None:
return
global voice
channel = ctx.message.author.voice.channel
voice = get(bot.voice_clients, guild=ctx.guild)
if voice and voice.is_connected():
await voice.move_to(channel)
else:
voice = await channel.connect()
print(
f"{datetime.now().strftime('%H:%M:%S')} The bot has connected to {channel} "
f"[requested by {ctx.author.name} ({ctx.author})]"
)
voice = get(bot.voice_clients, guild=ctx.guild)
voice.play(
discord.FFmpegPCMAudio(commands_dict[ctx.command.qualified_name]["file"]),
after=lambda e: print(
f"{datetime.now().strftime('%H:%M:%S')} Finished playing !{ctx.command.qualified_name}"
),
)
voice.source = discord.PCMVolumeTransformer(voice.source)
voice.source.volume = 0.65
while voice.is_playing() == True:
continue
if voice and voice.is_connected():
await voice.disconnect()
print(f"{datetime.now().strftime('%H:%M:%S')} The bot has left {channel}\n")
if ctx.author == bot.user:
return
# build all commands from dict
def command_builder(commands_dict):
command_list = []
for command_name in commands_dict:
if commands_dict[command_name]["type"] == "send":
func = send_callback
elif commands_dict[command_name]["type"] == "audio":
func = audio_callback
elif commands_dict[command_name]["type"] == "random_choice":
func = random_send_callback
else:
continue
c = commands.Command(
func, name=command_name, help=commands_dict[command_name]["help"]
)
command_list.append(c)
return command_list
# display when the bot is connected to discord
@bot.event
async def on_ready():
print(
f"{datetime.now().strftime('%H:%M:%S')} {bot.user.name} has connected to Discord!"
)
# prevent CLI spam from non-existent commands
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, (CommandInvokeError, CommandNotFound)):
return
raise error
if __name__ == "__main__":
commands_dict = commands_dict()
commands = command_builder(commands_dict)
for c in commands:
bot.add_command(c)
bot.run(TOKEN)
| StarcoderdataPython |
161054 | <reponame>krizex/fund
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import tensorflow as tf
from fund.log.logger import log
__author__ = '<NAME>'
"""
Created on 07/03/2017
@author: <NAME>
"""
# http://www.shareditor.com/blogshow/?blogId=94
class SoftmaxTrainer(object):
def __init__(self, feature_count, label_count, learning_rate, iterate_count):
self.x = tf.placeholder(tf.float32, [None, feature_count])
self.W = tf.Variable(tf.zeros([feature_count, label_count]), name='W')
self.b = tf.Variable(tf.zeros([label_count]), name='b')
self.y = tf.nn.softmax(tf.matmul(self.x, self.W) + self.b)
self.y_ = tf.placeholder(tf.float32, [None, label_count])
# We should use `tf.reduce_mean` so learning rate could be independent with batch size
self.cross_entropy = -tf.reduce_mean(self.y_ * tf.log(tf.clip_by_value(self.y, 1e-10, 1.0)))
self.train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(self.cross_entropy)
self.iterate_count = iterate_count
self.session = tf.InteractiveSession()
def train(self, training_pairs, verify_pairs):
self.session.run(tf.global_variables_initializer())
xs = [p[0] for p in training_pairs]
ys = [p[1] for p in training_pairs]
recognize_accuracy = 0.0
for i in range(self.iterate_count):
self.session.run(self.train_step, feed_dict={self.x: xs, self.y_: ys})
# cost = self.session.run(self.cross_entropy, feed_dict={self.x: xs, self.y_: ys})
# print 'Cost: %f' % cost
correct_prediction = tf.equal(tf.argmax(self.y, 1), tf.argmax(self.y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
recognize_accuracy = accuracy.eval(feed_dict={
self.x: [p[0] for p in verify_pairs],
self.y_: [p[1] for p in verify_pairs],
})
log.debug('(%d)Recognize accuracy is %f' % (i, recognize_accuracy))
log.debug('Recognize accuracy is %f' % recognize_accuracy)
return recognize_accuracy
def save(self, filename):
saver = tf.train.Saver()
saver.save(self.session, filename)
print self.b.eval()
print self.W.eval()
def restore(self, filename):
ckpt = tf.train.get_checkpoint_state(os.path.dirname(filename))
saver = tf.train.Saver()
saver.restore(self.session, ckpt.model_checkpoint_path)
print self.b.eval()
print self.W.eval()
def recognize(self, feature):
if not isinstance(feature[0], (list, tuple)):
feature = [feature]
return self.session.run(tf.argmax(self.y, 1), {self.x: feature})
# def init_recognizer(recognizer):
# log.info('Init recognizer...')
# from scv.datamanager.dataset import DataSet
# dataset = DataSet()
# training_set = dataset.get_training_set()
# verify_set = dataset.get_verify_set()
# recognizer.train(training_set, verify_set)
if __name__ == '__main__':
filename = data_file() | StarcoderdataPython |
3341277 | from model.utils import *
from dataloader.util_func import *
class LanguageModelsAgent(RetrievalBaseAgent):
'''no train mode, only test'''
def __init__(self, vocab, model, args):
super(LanguageModelsAgent, self).__init__()
self.args = args
self.vocab, self.model = vocab, model
self.cuda_models = ['gpt2lm']
if self.args['model'] in self.cuda_models:
if torch.cuda.is_available():
self.model.cuda()
@torch.no_grad()
def rerank(self, batches):
'''rerank scores'''
if self.args['model'] in self.cuda_models:
self.model.eval()
scores = []
for batch in batches:
# compatible for the predict function
score = self.model.predict(batch)
scores.append(score)
return scores
def load_model(self, path):
pass
@torch.no_grad()
def inference(self, inf_iter, size=500000):
self.model.eval()
pbar = tqdm(inf_iter)
ppls, texts = [], []
for batch in pbar:
ppl = self.model.module.predict(batch)
ppls.extend(ppl)
texts.extend(batch['candidates'])
torch.save(
(ppls, texts),
f'{self.args["root_dir"]}/data/{self.args["dataset"]}/inference_ppl_{self.args["local_rank"]}.pt'
)
| StarcoderdataPython |
4807052 | from pytest import mark
from mysign_app.management.commands.seed import Command
from mysign_app.models import Company, DoorDevice, User
from mysign_app.tests.factories import CompanyFactory
@mark.django_db
def test_objects_are_seeded():
# Run seeds
Command().handle()
assert Company.objects.count() == 20
assert DoorDevice.objects.count() == 20
assert User.objects.filter(email='<EMAIL>').count() == 1
assert User.objects.filter(email='<EMAIL>').first().check_password('<PASSWORD>')
assert User.objects.filter(email='<EMAIL>').count() == 1
assert User.objects.filter(email='<EMAIL>').first().check_password('<PASSWORD>')
assert User.objects.filter(email='<EMAIL>').first().company_id
assert User.objects.filter(email='<EMAIL>').count() == 1
assert User.objects.filter(email='<EMAIL>').first().check_password('<PASSWORD>')
assert User.objects.filter(email='<EMAIL>').first().is_staff
assert User.objects.filter(email='<EMAIL>').first().is_superuser
@mark.django_db
def test_database_is_cleared():
CompanyFactory.create(name="really awesome company")
# Run seeds
Command().handle()
assert Company.objects.filter(name="really awesome company").count() == 0
@mark.django_db
def test_production_seed():
Command().handle(production=True)
assert Company.objects.count() == 0
assert DoorDevice.objects.count() == 0
assert User.objects.count() == 1
assert User.objects.first().check_password('<PASSWORD>')
| StarcoderdataPython |
3212185 | <reponame>mcgreevy/chromium-infra
#!/usr/bin/env python
# This file mocks typical recipes.py that normally runs a recipe.
import argparse
import json
import sys
import shutil
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--output-result-json')
parser.add_argument('--properties-file')
args, _ = parser.parse_known_args()
assert args.output_result_json
assert args.properties_file
with open(args.properties_file) as f:
properties = json.load(f)
cfg = properties.pop('recipe_mock_cfg')
with open(cfg['input_path'], 'w') as f:
json.dump({
'args': sys.argv,
'properties': properties,
}, f)
mocked_result_path = cfg.get('mocked_result_path')
if mocked_result_path:
shutil.copyfile(mocked_result_path, args.output_result_json)
return cfg['exitCode']
if __name__ == '__main__':
sys.exit(main()) | StarcoderdataPython |
3229116 | <gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
from PIL import Image, ImageDraw, ImageFont
import random
import sys
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email import encoders
class crypto:
def __init__(self):
"""the fuction creates a photo and splits it then to two with visual cryptography"""
# Text_Pic
img = Image.new('1', (150, 150), color=255) # creates a new image sized 150x150, black&white (mode 1)
self.txt = ''
for i in range(6):
self.txt += chr(random.randint(97, 122))
ImageDraw.Draw(img).text(xy=(0, 50), text=self.txt,
font=ImageFont.truetype('C:\WINDOWS\Fonts\ARLRDBD.TTF'
, 37))
img.save('source_image.jpg')
# Generate
# my visual cryptography works with this concept (minimum):
# when black pixel with a black pixel merged, the output will be a black pixel
# when white pixel and black pixels merged or white pixel and white pixel - output will be white
image = Image.open('source_image.jpg')
image = image.convert('1') # mode 1 turns picture to black and white only!
# now we will create two images in mode 1- black and white
# size will be duplicated
out1 = Image.new('1', [dimension * 2 for dimension in
image.size]) # PIL.Image.new(mode, size, color=0), size is doubled
out2 = Image.new('1', [dimension * 2 for dimension in
image.size])
lists=[[255,0,255,0], [0,255,0,255]]
for x in range(0, image.size[0]): # a loop from 0 to the x of the image
for y in range(0, image.size[1]): # a loop from 0 to the y of the image
pixel=image.getpixel((x,y)) # loops - for each x all the ys
pattern=random.choice(lists) #Return a random list from the list of pattern lists
if pixel==0: # if the pixel is black the pixel splits by the random pattern with an anti pattern
out1.putpixel((x * 2, y * 2), pattern[0])
out1.putpixel((x * 2 + 1, y * 2), pattern[1])
out1.putpixel((x * 2, y * 2 + 1), pattern[2])
out1.putpixel((x * 2 + 1, y * 2 + 1), pattern[3])
out2.putpixel((x * 2, y * 2), 255-pattern[0])
out2.putpixel((x * 2 + 1, y * 2), 255-pattern[1])
out2.putpixel((x * 2, y * 2 + 1), 255-pattern[2])
out2.putpixel((x * 2 + 1, y * 2 + 1), 255-pattern[3])
else: # if the pixel is white the pixel splits by the random pattern with the same pattern
out1.putpixel((x * 2, y * 2), pattern[0])
out1.putpixel((x * 2 + 1, y * 2), pattern[1])
out1.putpixel((x * 2, y * 2 + 1), pattern[2])
out1.putpixel((x * 2 + 1, y * 2 + 1), pattern[3])
out2.putpixel((x * 2, y * 2), pattern[0])
out2.putpixel((x * 2 + 1, y * 2), pattern[1])
out2.putpixel((x * 2, y * 2 + 1), pattern[2])
out2.putpixel((x * 2 + 1, y * 2 + 1), pattern[3])
# pictures saved
out1.save(r'out1.jpg')
out2.save('out2.jpg')
def GetPassword(self):
return self.txt
def GetPicture(self):
with open(r'out1.jpg', 'rb') as infile1:
infile_read = infile1.read()
infile1.close()
return infile_read
def Send_Out2_By_Email(self, email_addr):
SMTP_SERVER = 'smtp.gmail.com'
SMTP_PORT = 587
content = MIMEMultipart()
content['From'] = '<EMAIL>'
content['To'] = email_addr
content['Subject'] = 'Password First Picture'
content.attach(MIMEText('Here is the first picture of the password:'
, 'plain'))
filename = 'out2.jpg'
attachment = open(filename, 'rb')
part = MIMEBase('application', 'octet-stream')
part.set_payload(attachment.read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename= '
+ filename)
content.attach(part)
content = content.as_string()
mail = smtplib.SMTP(SMTP_SERVER, SMTP_PORT)
mail.starttls()
mail.login('<EMAIL>', 'YOUR CREATED EMAIL"S PASSWORD')
# To run the project you have to create an email (or use an existing one)
# The email will send the out2.jpg file to the client
try:
mail.sendmail('<EMAIL>', [email_addr], content)
except:
print ("Unexpected Client Error 1.")
| StarcoderdataPython |
3315580 | <gh_stars>10-100
from __future__ import print_function
from lm import lm
import sys
import codecs
stdin = codecs.getreader('utf-8')(sys.stdin)
stdout = codecs.getwriter('utf-8')(sys.stdout)
lm = lm.LM()
lm.load('../data/')
collationLM_sum = 0
ngramLM_sum = 0
count = 0
for line in stdin:
line = line.rstrip('\n')
sentence = line.split(' ')
col_score = lm.collocationLM.score(sentence, debug=False)
slm_score = lm.slm_score(sentence)
print(line + "," + str(col_score) + "," + str(slm_score), file=stdout)
collationLM_sum += col_score
ngramLM_sum += slm_score
count += 1
print("cross_entropy, " + str(collationLM_sum / count) + "," + str(ngramLM_sum / count)) | StarcoderdataPython |
1781294 | # -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payment', '0004_auto_20150415_2210'),
]
operations = [
migrations.CreateModel(
name='PaymentPrice',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('price', models.IntegerField(verbose_name='pris')),
('description', models.CharField(max_length=128, null=True, blank=True)),
('payment', models.ForeignKey(to='payment.Payment')),
],
options={
'verbose_name': 'pris',
'verbose_name_plural': 'priser',
},
bases=(models.Model,),
),
migrations.AlterModelOptions(
name='paymentdelay',
options={'verbose_name': 'betalingsutsettelse', 'verbose_name_plural': 'betalingsutsettelser'},
),
migrations.AddField(
model_name='payment',
name='stripe_key_index',
field=models.SmallIntegerField(default=0, verbose_name='stripe key', choices=[(0, b'Arrkom'), (1, b'Prokom')]),
preserve_default=False,
),
migrations.AddField(
model_name='paymentrelation',
name='payment_price',
field=models.ForeignKey(default=0, to='payment.PaymentPrice'),
preserve_default=False,
),
migrations.AddField(
model_name='paymentrelation',
name='refunded',
field=models.BooleanField(default=False),
preserve_default=True,
),
migrations.AddField(
model_name='paymentrelation',
name='stripe_id',
field=models.CharField(default=0, max_length=128),
preserve_default=False,
),
migrations.AlterField(
model_name='payment',
name='delay',
field=models.SmallIntegerField(default=2, null=True, verbose_name='utsettelse', blank=True),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='payment',
unique_together=set([('content_type', 'object_id')]),
),
migrations.RemoveField(
model_name='payment',
name='price',
),
migrations.AlterUniqueTogether(
name='paymentdelay',
unique_together=set([('payment', 'user')]),
),
]
| StarcoderdataPython |
4819125 | <reponame>Hadisalman/AirSim
import copy
import json
import threading
import numpy as np
import torch
from robustness import airsim
from .sim_object import SimObject
class AdversarialObjects(SimObject):
def __init__(self, name='3DAdversary', car=None, **kwargs):
super().__init__(name)
assert 'resolution_coord_descent' in kwargs and 'num_iter' in kwargs and 'adv_config_path' in kwargs
self.ped_detection_callback = car.detection.ped_detection_callback
# TODO: un-hardcode this.
self.ped_object_name = 'Adv_Ped2'
self.thread = threading.Thread(target=self.coordinate_ascent_object_attack, args=(kwargs['resolution_coord_descent'], kwargs['num_iter']))
self.is_thread_active = False
self.scene_objs = self.client.simListSceneObjects()
self.adv_objects = [
'Adv_House',
'Adv_Fence',
'Adv_Hedge',
'Adv_Car',
'Adv_Tree'
]
self.adv_config_path = kwargs['adv_config_path']
for obj in self.adv_objects:
print('{} exists? {}'.format(obj, obj in self.scene_objs))
for obj in ['BoundLowerLeft', 'BoundUpperRight']:
print('{} exists? {}'.format(obj, obj in self.scene_objs))
self.BoundLowerLeft = self.client.simGetObjectPose('BoundLowerLeft')
self.BoundUpperRight = self.client.simGetObjectPose('BoundUpperRight')
self.x_range_adv_objects_bounds = (self.BoundLowerLeft.position.x_val, self.BoundUpperRight.position.x_val)
self.y_range_adv_objects_bounds = (self.BoundLowerLeft.position.y_val, self.BoundUpperRight.position.y_val)
def dump_env_config_to_json(self, path):
def _populate_pose_dic(pose_dic, pose):
pose_dic['X'] = pose.position.x_val
pose_dic['Y'] = pose.position.y_val
pose_dic['Z'] = pose.position.z_val
euler_angles = airsim.to_eularian_angles(pose.orientation)
pose_dic['Pitch'] = euler_angles[0]
pose_dic['Roll'] = euler_angles[1]
pose_dic['Yaw'] = euler_angles[2]
with open(path, 'w') as f:
output = {}
output['Vehicle'] = {}
pose = self.client.simGetVehiclePose()
_populate_pose_dic(output['Vehicle'], pose)
output[self.ped_object_name] = {}
pose = self.client.simGetObjectPose(self.ped_object_name)
_populate_pose_dic(output[self.ped_object_name], pose)
for obj in self.adv_objects:
output[obj] = {}
pose = self.client.simGetObjectPose(obj)
_populate_pose_dic(output[obj], pose)
# print(output)
json.dump(output, f, indent=2, sort_keys=False)
def update_env_from_config(self, path):
with open(path, 'r') as f:
dic = json.load(f)
for obj_name, obj_pose in dic.items():
pose = airsim.Pose(airsim.Vector3r(obj_pose['X'], obj_pose['Y'], obj_pose['Z']),
airsim.to_quaternion(obj_pose['Pitch'], obj_pose['Roll'], obj_pose['Yaw']))
if obj_name == 'Vehicle':
self.client.simSetVehiclePose(pose, ignore_collison=True)
else:
assert obj_name in self.scene_objs, 'Object {} is not found in the scene'.format(obj_name)
self.client.simSetObjectPose(obj_name, pose)
print('-->[Updated the position of the {}]'.format(obj_name))
def coordinate_ascent_object_attack(self, resolution=10, num_iter=1):
x_range = np.linspace(self.x_range_adv_objects_bounds[0], self.x_range_adv_objects_bounds[1], resolution)
y_range = np.linspace(self.y_range_adv_objects_bounds[0], self.y_range_adv_objects_bounds[1], resolution)
xv, yv = np.meshgrid(x_range, y_range)
self.adv_poses = []
best_loss = -1
for _ in range(num_iter):
for obj in np.random.permutation(self.adv_objects).tolist():
pose = self.client.simGetObjectPose(obj)
best_pose = copy.deepcopy(pose)
grid2d_poses_list = zip(xv.flatten(), yv.flatten())
for grid2d_pose in grid2d_poses_list:
pose.position.x_val = grid2d_pose[0]
pose.position.y_val = grid2d_pose[1]
self.client.simSetObjectPose(obj, pose)
if not self.is_thread_active:
print('-->[Saving whatever coniguration is reached]')
self.dump_env_config_to_json(path=self.adv_config_path)
return
_, correct, loss = self.ped_detection_callback()
if loss > best_loss:
best_loss = loss
best_pose = copy.deepcopy(pose)
print('Best loss so far {}'.format(best_loss.item()))
self.client.simSetObjectPose(obj, best_pose)
# dump results into a json file after each iteration
self.dump_env_config_to_json(path=self.adv_config_path)
def spsa_object_attack(self, resolution=10, num_iter=1):
def calc_est_grad(func, x, y, rad, num_samples):
B, *_ = x.shape
Q = num_samples//2
N = len(x.shape) - 1
with torch.no_grad():
# Q * B * C * H * W
extender = [1]*N
queries = x.repeat(Q, *extender)
noise = torch.randn_like(queries)
norm = noise.view(B*Q, -1).norm(dim=-1).view(B*Q, *extender)
noise = noise / norm
noise = torch.cat([-noise, noise])
queries = torch.cat([queries, queries])
y_shape = [1] * (len(y.shape) - 1)
l = func(queries + rad * noise, y.repeat(2*Q, *y_shape)).view(-1, *extender)
grad = (l.view(2*Q, B, *extender) * noise.view(2*Q, B, *noise.shape[1:])).mean(dim=0)
return grad
x_range = np.linspace(self.x_range_adv_objects_bounds[0], self.x_range_adv_objects_bounds[1], resolution)
y_range = np.linspace(self.y_range_adv_objects_bounds[0], self.y_range_adv_objects_bounds[1], resolution)
xv, yv = np.meshgrid(x_range, y_range)
self.adv_poses = []
best_loss = -1
for _ in range(num_iter):
for obj in np.random.permutation(self.adv_objects).tolist():
pose = self.client.simGetObjectPose(obj)
best_pose = copy.deepcopy(pose)
grid2d_poses_list = zip(xv.flatten(), yv.flatten())
for grid2d_pose in grid2d_poses_list:
pose.position.x_val = grid2d_pose[0]
pose.position.y_val = grid2d_pose[1]
self.client.simSetObjectPose(obj, pose)
if not self.is_thread_active:
print('[-->[Saving whatever coniguration is reached]')
self.dump_env_config_to_json(path=self.adv_config_path)
return
_, correct, loss = self.ped_detection_callback()
if loss > best_loss:
best_loss = loss
best_pose = copy.deepcopy(pose)
print('Best loss so far {}'.format(best_loss.item()))
self.client.simSetObjectPose(obj, best_pose)
# dump results into a json file after each iteration
self.dump_env_config_to_json(path=self.adv_config_path)
def attack(self):
if not self.is_thread_active:
self.is_thread_active = True
self.thread.start()
print("-->[Started adv thread]")
| StarcoderdataPython |
4822399 | <gh_stars>0
'''
@description 86.【Python面向对象】重写父类的方法 2019/10/04 10:44
'''
class Person(object):
def __init__(self, name, age):
self.name = name
self.age = age
def eat(self):
print('人在吃饭!....')
class Student(Person):
# 1.如果父类的方法不能满足子类的需求,那么可以重写这个方法,以后对象调用同名
# 方法的时候,就会执行子类的这个方法。
# 2.虽然父类的方法不能完全满足子类的需求,但是父类的方法的代码还是需要执行,
# 那么可以通过super这个函数来调用父类的方法。
# 3.super函数的用法:super(类名,self).方法名([可选参数])
# 例:super(Student, self).__init__(name, age)
# 例:super(Student, self).eat()
def __init__(self, name, age):
super(Student, self).__init__(name, age)
# TODO: 重写父类Person的eat方法
def eat(self):
# super(Student, self).eat()
print('学生在吃饭!....')
def greet(self):
print('hello, my name is %s, my age is %s'%(self.name, self.age))
student = Student('zhiliao', 18)
student.eat()
student.greet() | StarcoderdataPython |
1693217 | """Locations class module for Squaredown.
"""
from aracnid_logger import Logger
from squaredown.connector import Connector
# initialize logging
logger = Logger(__name__).get_logger()
class Locations(Connector):
"""Contains the code to connect and pull locations from Square to MongoDB.
Environment Variables:
None.
Attributes:
collection: Square Orders collection in MongoDB.
collection_name: Name of the Square Orders collection in MongoDB.
"""
def __init__(self):
"""Initializes the Locations Connector.
Establishes connections to Square and MongoDB.
Sets up access to configuration properties.
"""
self.collection_name = 'square_locations'
logger.debug(f'collection_name: {self.collection_name}')
super().__init__(config_name=self.collection_name)
# initialize MongoDB collection
self.collection = self.read_collection(self.collection_name)
def pull(self):
"""Retrieves a set of Square Locations and saves them in MongoDB.
Args:
None
Returns:
None
"""
logger.debug('pulling')
result = self.api_locations.list_locations()
locations = None
if result.is_success():
locations = result.body.get('locations')
elif result.is_error():
logger.error(result.errors)
update_count = 0
if locations:
for location in locations:
self.update_location(location)
update_count += 1
logger.debug(f'locations processed: {update_count}')
def update_location(self, location):
"""Save the provided Square Location into MongoDB.
Args:
location: Square Location object
Returns:
The MongoDB representation of the Square Location object.
"""
self.decode_location(location)
# get the location properties
location_id = location['id']
# update the database
self.mdb.square_locations.find_one_and_replace(
filter={'_id': location_id},
replacement=location,
upsert=True
)
return location
| StarcoderdataPython |
1695818 | import math
def nth_fact(nth):
# Enter your code here
return(math.factorial(nth)) | StarcoderdataPython |
4820974 | <reponame>faraixyz/farais-code-graveyard
from base64 import urlsafe_b64encode
from hashlib import sha1
import hmac
import json
import pprint
import secrets
from urllib.parse import quote_plus, quote
from time import time
import requests
with open('config.json', 'rb') as config_file:
CONFIG = json.load(config_file)
#Defaults
oauth_consumer_key = CONFIG['consumer']['key']
oauth_signature_method = "HMAC-SHA1"
oauth_token = CONFIG['user']['key']
oauth_version = '1.0'
SIGN_KEY = bytes(CONFIG['consumer']['secret']+'&'+CONFIG['user']['secret'], 'utf-8')
def signrequest(request):
signer = hmac.new(SIGN_KEY, bytes(request,'utf-8'), sha1)
digest = urlsafe_b64encode(signer.digest())
return digest.decode('utf-8')
def make_oauth_obj():
oauth_nonce = secrets.token_urlsafe(16)
oauth_timestamp = time()
return {
"oauth_consumer_key":oauth_consumer_key,
"oauth_nonce":oauth_nonce,
"oauth_signature_method":oauth_signature_method,
"oauth_timestamp":oauth_timestamp,
"oauth_token":oauth_token,
"oauth_version":oauth_version,
}
def make_sign_str(method, url, oauth_obj, params):
sign_str = f"{method}&{quote_plus(url)}&"
for key, val in sorted([*oauth_obj.items(), *params.items()]):
sign_str += f"{quote_plus(key)}%3D{quote_plus(str(val))}%26"
return sign_str[:-3]
def make_auth_header(oauth_obj):
header = "OAUTH "
oauth_prop = [f'{quote_plus(key)}="{quote_plus(str(value))}"' for key, value in sorted(oauth_obj.items())]
header += ", ".join(oauth_prop)
return header
def get_friends():
url = 'https://api.twitter.com/1.1/friends/ids.json'
method = "GET"
oauth = make_oauth_obj()
sign_string = make_sign_str(method, url, oauth, {})
oauth["oauth_signature"] = signrequest(sign_string)
headers = {"Authorization":make_auth_header(oauth)}
r = requests.get(url, headers=headers)
ids = r.json()['ids']
return ids
def get_friends_with_hidden_retweets():
url = 'https://api.twitter.com/1.1/friendships/no_retweets/ids.json'
method = "GET"
oauth = make_oauth_obj()
sign_string = make_sign_str(method, url, oauth, {})
oauth["oauth_signature"] = signrequest(sign_string)
headers = {"Authorization":make_auth_header(oauth)}
r = requests.get(url, headers=headers)
ids = r.json()
return ids
def hide_retweets_from_user(id):
url = "https://api.twitter.com/1.1/friendships/update.json"
method = "POST"
oauth = make_oauth_obj()
args = {"user_id":id, "retweets":False}
sign_string = make_sign_str(method, url, oauth, args)
oauth["oauth_signature"] = signrequest(sign_string)
headers = {"Authorization":make_auth_header(oauth)}
r = requests.post(url, data=args, headers=headers)
return r
def get_tweets():
url = 'https://api.twitter.com/1.1/statuses/user_timeline.json'
method = 'GET'
oauth = make_oauth_obj()
args = {"trim_user": 1}
sign_string = make_sign_str(method, url, oauth, {})
oauth["oauth_signature"] = signrequest(sign_string)
headers = {"Authorization":make_auth_header(oauth)}
r = requests.get(url, headers=headers)
data = r.json()
ids = list(map(lambda x: x['id_str'], data))
return ids
def get_favorites():
url = 'https://api.twitter.com/1.1/favorites/list.json'
method = 'GET'
oauth = make_oauth_obj()
sign_string = make_sign_str(method, url, oauth, {})
oauth["oauth_signature"] = signrequest(sign_string)
headers = {"Authorization":make_auth_header(oauth)}
r = requests.get(url, headers=headers)
data = r.json()
ids = list(map(lambda x: x['id_str'], data))
return ids
def delete_tweet(id):
url = f'https://api.twitter.com/1.1/statuses/destroy/{id}.json'
method = 'POST'
oauth = make_oauth_obj()
sign_string = make_sign_str(method, url, oauth, {})
oauth["oauth_signature"] = signrequest(sign_string)
headers = {"Authorization":make_auth_header(oauth)}
r = requests.post(url, data={}, headers=headers)
def make_fav(id):
url = 'https://api.twitter.com/1.1/favorites/create.json'
method = 'POST'
oauth = make_oauth_obj()
args = {'id':id}
sign_string = make_sign_str(method, url, oauth, args)
oauth["oauth_signature"] = signrequest(sign_string)
headers = {"Authorization":make_auth_header(oauth)}
r = requests.post(url, data=args, headers=headers)
def delete_fav(id):
url = 'https://api.twitter.com/1.1/favorites/destroy.json'
method = 'POST'
oauth = make_oauth_obj()
args = {'id':id}
sign_string = make_sign_str(method, url, oauth, args)
oauth["oauth_signature"] = signrequest(sign_string)
headers = {"Authorization":make_auth_header(oauth)}
r = requests.post(url, data=args, headers=headers)
def rate_limit():
url = 'https://api.twitter.com/1.1/application/rate_limit_status.json'
method= 'GET'
oauth = make_oauth_obj()
sign_string = make_sign_str(method, url, oauth, {})
oauth["oauth_signature"] = signrequest(sign_string)
headers = {"Authorization":make_auth_header(oauth)}
r = requests.get(url, headers=headers)
return r.json()
def delete_tweets():
deleted = 0
while True:
a = get_tweets()
print(a)
for i in a:
delete_tweet(i)
print('Deleted ' + i)
deleted +=1
print(f'{deleted} tweets deleted so far')
def hide_retweets_from_friends():
friends = get_friends()
unretweeted = get_friends_with_hidden_retweets()
retweeded = list(filter(lambda x:x not in unretweeted, friends))
log = []
print(retweeded)
try:
for user in retweeded:
out = hide_retweets_from_user(user)
log.append(out)
except Exception as e:
print("Something went Wrong")
raise(e)
print("All done!")
def delete_favs():
for i in range(200):
a = get_favorites()
print(a)
for i in a:
make_fav(i)
print(f'liked {i}')
delete_fav(i)
print(f'deleted {i}')
print('eh')
'''
# The following code was a more complex means of deleting favs
# Takes forever and Twitter doesn't like it.
with open('like','r') as likes:
likes = likes.read().splitlines()
try:
deleted = 0
while True:
tweet = likes.pop()
print(tweet)
make_favs(tweet)
delete_favs(tweet)
deleted += 1
except Exception as e:
print(f"{deleted} tweets deleted.")
with open('like','r') as new_likes:
for like in likes:
new_likes.write(like + '\n')
print(e)
'''
| StarcoderdataPython |
4838030 | <gh_stars>0
class Dessin():
"""docstring for Dessin."""
def __init__(self, nom):
self.nom = nom
self.liste = []
def add(self, obj):
self.liste.append(obj)
def affiche(self):
print("===",self.nom,"===")
for f in self.liste:
f.affiche()
| StarcoderdataPython |
1611177 | <reponame>RCTom168/Intro-to-Python-1<gh_stars>0
# Write a function is_even that will return true if the passed-in number is even.
# YOUR CODE HERE
def is_even(num): # Define the function
if num % 2 == 0:
return True
# Read a number from the keyboard
num = input("Enter a number: ")
num = int(num)
# Print out "Even!" if the number is even. Otherwise print "Odd"
answer = is_even(num)
# YOUR CODE HERE
if answer is True:
print("Even!")
else:
print("Odd!") | StarcoderdataPython |
1644072 | """
Defines upper bounds of YPD media for FBA
"""
from yeast.core.media.constants import reagents
from yeast.core.media.yp.base import yp
d = {
reagents["D-glucose"]: 22.6,
}
ypd = {**yp, **d}
| StarcoderdataPython |
1757653 | <reponame>Ljqiii/google_translate_api_python
from .GetTKK import getTKK
import time
import ctypes
import requests
class GoogleTranslate():
def __init__(self, sl='', tl='', domainnames=""):
"""
A python wrapped free and unlimited API for Google Translate.
:param sl:from Language
:param tl:to Language
:param domainnames: google domainnames, for example if domainnames="com" ,the url is "translate.google.com". In China the com domainnames is blocked by GFW,you can use "cn".
"""
self.sl = sl
self.tl = tl
self.hl = tl
if(domainnames==""):
self.domainnames ="com"
else:
self.domainnames = domainnames
self.TKK = getTKK(domainnames=self.domainnames)
def _returnintorzero(self,d):
try:
temp = int(d)
except:
temp = 0
return temp
def _xr(self, a, b):
size_b = len(b)
c = 0
while c < size_b - 2:
d = b[c + 2]
d = ord(d[0]) - 87 if 'a' <= d else int(d)
d = (a % 0x100000000) >> d if '+' == b[c + 1] else a << d
a = a + d & 4294967295 if '+' == b[c] else a ^ d
c += 3
return a
def trans(self,text):
"""
translate text
:param text: The text to be translate
:return:
"""
tk=self._gettk(text)
timeh = int(time.time() / 3600)
if (self.TKK.split(".")[0]!=timeh):
self.TKK = getTKK(domainnames=self.domainnames)
data = {
"client": 't',
"sl": self.sl,
"tl": self.tl,
"hl": self.hl,
"dt": ['at', 'bd', 'ex', 'ld', 'md', 'qca', 'rw', 'rm', 'ss', 't'],
"ie": 'UTF-8',
"oe": 'UTF-8',
"otf": 1,
"ssel": 0,
"tsel": 0,
"kc": 7,
"q": text,
"tk": tk
};
url='https://translate.google.'+self.domainnames+'/translate_a/single';
jsonres=requests.get(url=url,params=data)
return jsonres.json()[0][0][0]
def _gettk(self,a):
d = self.TKK.split(".")
b = int(d[0])
e = []
for g in range(len(a)):
l = ord(a[g])
if (128 > l):
e.append(l)
else:
if (2048 > l):
e.append(l >> 6 | 192)
else:
if (55296 == (l & 64512) and g + 1 < len(a) and 56320 == (ord(a[g + 1]) & 64512)):
l = 65536 + ((l & 1023) << 10) + (a.charCodeAt(++g) & 1023)
e.append(l >> 18 | 240)
e.append(l >> 12 & 63 | 128)
else:
e.append(l >> 12 | 224)
e.append(l >> 6 & 63 | 128)
e.append(l & 63 | 128)
a = b
for f in range(len(e)):
a = a + int(e[f])
a = self._xr(a, "+-a^+6")
a = self._xr(a, "+-3^+b+-f");
a ^=self._returnintorzero(d[1])
if(0>a):
a = (a & 2147483647) + 2147483648
a %= 1E6;
return str(int(a))+ "." + str(int(a) ^ b)
# a = GoogleTranslate(domainnames="cn",sl="en",tl="zh-CN")
# print(a.trans("I am a boy and she is a girl."))
# print(a.trans("She is a girl."))
| StarcoderdataPython |
4839862 | <gh_stars>100-1000
__all__ = ["filters", "generators"] | StarcoderdataPython |
3279784 | <gh_stars>1-10
""" This script is an example of benchmarking the continuous mlp baseline."""
import datetime
import os
import os.path as osp
import random
from baselines.bench import benchmarks
import dowel
from dowel import logger as dowel_logger
import gym
import pytest
import tensorflow as tf
from metarl.envs import normalize
from metarl.experiment import deterministic
from metarl.tf.algos import PPO
from metarl.tf.baselines import ContinuousMLPBaseline
from metarl.tf.envs import TfEnv
from metarl.tf.experiment import LocalTFRunner
from metarl.tf.policies import GaussianLSTMPolicy
from tests.fixtures import snapshot_config
policy_params = {
'policy_lr': 1e-3,
'policy_hidden_sizes': 32,
'hidden_nonlinearity': tf.nn.tanh
}
baseline_params = {'regressor_args': dict(hidden_sizes=(64, 64))}
algo_params = {
'n_envs':
8,
'n_epochs':
20,
'n_rollout_steps':
2048,
'discount':
0.99,
'max_path_length':
100,
'gae_lambda':
0.95,
'lr_clip_range':
0.2,
'policy_ent_coeff':
0.02,
'entropy_method':
'max',
'optimizer_args':
dict(
batch_size=32,
max_epochs=10,
tf_optimizer_args=dict(learning_rate=policy_params['policy_lr']),
),
'center_adv':
False
}
# number of processing elements to use for tensorflow
num_proc = 4 * 2
# number of trials to run per environment
num_trials = 3
@pytest.mark.huge
def test_benchmark_ppo_continuous_mlp_baseline():
""" Compare benchmarks between CMB and potentially other baselines."""
mujoco1m = benchmarks.get_benchmark('Mujoco1M')
timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S-%f')
benchmark_dir = osp.join(os.getcwd(), 'data', 'local', 'benchmarks',
'ppo_cmb', timestamp)
for task in mujoco1m['tasks']:
env_id = task['env_id']
env = gym.make(env_id)
seeds = random.sample(range(100), num_trials)
task_dir = osp.join(benchmark_dir, env_id)
cmb_csvs = []
for trial in range(num_trials):
seed = seeds[trial]
trial_dir = task_dir + '/trial_%d_seed_%d' % (trial + 1, seed)
cmb_dir = trial_dir + '/continuous_mlp_baseline'
with tf.Graph().as_default():
env.reset()
cmb_csv = ppo_cmb(env, seed, cmb_dir)
cmb_csvs.append(cmb_csv)
env.close()
def ppo_cmb(env, seed, log_dir):
"""Create test continuous mlp baseline on ppo.
Args:
env (gym_env): Environment of the task.
seed (int): Random seed for the trial.
log_dir (str): Log dir path.
Returns:
str: training results in csv format.
"""
deterministic.set_seed(seed)
config = tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=num_proc,
inter_op_parallelism_threads=num_proc)
sess = tf.Session(config=config)
with LocalTFRunner(snapshot_config, sess=sess,
max_cpus=num_proc) as runner:
env = TfEnv(normalize(env))
policy = GaussianLSTMPolicy(
env_spec=env.spec,
hidden_dim=policy_params['policy_hidden_sizes'],
hidden_nonlinearity=policy_params['hidden_nonlinearity'],
)
baseline = ContinuousMLPBaseline(
env_spec=env.spec,
regressor_args=baseline_params['regressor_args'],
)
algo = PPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=algo_params['max_path_length'],
discount=algo_params['discount'],
gae_lambda=algo_params['gae_lambda'],
lr_clip_range=algo_params['lr_clip_range'],
entropy_method=algo_params['entropy_method'],
policy_ent_coeff=algo_params['policy_ent_coeff'],
optimizer_args=algo_params['optimizer_args'],
center_adv=algo_params['center_adv'],
stop_entropy_gradient=True)
# Set up logger since we are not using run_experiment
tabular_log_file = osp.join(log_dir, 'progress.csv')
dowel_logger.add_output(dowel.StdOutput())
dowel_logger.add_output(dowel.CsvOutput(tabular_log_file))
dowel_logger.add_output(dowel.TensorBoardOutput(log_dir))
runner.setup(algo,
env,
sampler_args=dict(n_envs=algo_params['n_envs']))
runner.train(n_epochs=algo_params['n_epochs'],
batch_size=algo_params['n_rollout_steps'])
dowel_logger.remove_all()
return tabular_log_file
| StarcoderdataPython |
66073 | <filename>hazelcast/transaction.py
import logging
import threading
import time
import uuid
from hazelcast.errors import TransactionError, IllegalStateError
from hazelcast.invocation import Invocation
from hazelcast.protocol.codec import (
transaction_create_codec,
transaction_commit_codec,
transaction_rollback_codec,
)
from hazelcast.proxy.transactional_list import TransactionalList
from hazelcast.proxy.transactional_map import TransactionalMap
from hazelcast.proxy.transactional_multi_map import TransactionalMultiMap
from hazelcast.proxy.transactional_queue import TransactionalQueue
from hazelcast.proxy.transactional_set import TransactionalSet
from hazelcast.util import thread_id
_logger = logging.getLogger(__name__)
_STATE_ACTIVE = "active"
_STATE_NOT_STARTED = "not_started"
_STATE_COMMITTED = "committed"
_STATE_ROLLED_BACK = "rolled_back"
_STATE_PARTIAL_COMMIT = "rolling_back"
TWO_PHASE = 1
"""
The two phase commit is separated in 2 parts. First it tries to execute the prepare; if there are any conflicts,
the prepare will fail. Once the prepare has succeeded, the commit (writing the changes) can be executed.
Hazelcast also provides three phase transaction by automatically copying the backlog to another member so that in case
of failure during a commit, another member can continue the commit from backup.
"""
ONE_PHASE = 2
"""
The one phase transaction executes a transaction using a single step at the end; committing the changes. There
is no prepare of the transactions, so conflicts are not detected. If there is a conflict, then when the transaction
commits the changes, some of the changes are written and others are not; leaving the system in a potentially permanent
inconsistent state.
"""
RETRY_COUNT = 20
class TransactionManager:
"""Manages the execution of client transactions and provides Transaction objects."""
def __init__(self, context):
self._context = context
def _connect(self):
connection_manager = self._context.connection_manager
for count in range(0, RETRY_COUNT):
connection = connection_manager.get_random_connection()
if connection:
return connection
_logger.debug(
"Could not get a connection for the transaction. Attempt %d of %d",
count,
RETRY_COUNT,
exc_info=True,
)
if count + 1 == RETRY_COUNT:
raise IllegalStateError("No active connection is found")
def new_transaction(
self, timeout: float, durability: int, transaction_type: int
) -> "Transaction":
"""Creates a Transaction object with given timeout, durability and
transaction type.
Args:
timeout: The timeout in seconds determines the maximum lifespan
of a transaction.
durability: The durability is the number of machines that can take
over if a member fails during a transaction commit or rollback.
transaction_type: the transaction type which can be
``hazelcast.transaction.TWO_PHASE`` or
``hazelcast.transaction.ONE_PHASE``.
Returns:
New created Transaction.
"""
connection = self._connect()
return Transaction(self._context, connection, timeout, durability, transaction_type)
class Transaction:
"""Provides transactional operations: beginning/committing transactions,
but also retrieving transactional data-structures like the
TransactionalMap.
"""
state = _STATE_NOT_STARTED
id: uuid.UUID = None
start_time: float = None
_locals = threading.local()
thread_id: int = None
def __init__(self, context, connection, timeout, durability, transaction_type):
self._context = context
self.connection = connection
self.timeout = timeout
self.durability = durability
self.transaction_type = transaction_type
self._objects = {}
def begin(self) -> None:
"""Begins this transaction."""
if hasattr(self._locals, "transaction_exists") and self._locals.transaction_exists:
raise TransactionError("Nested transactions are not allowed.")
if self.state != _STATE_NOT_STARTED:
raise TransactionError("Transaction has already been started.")
self._locals.transaction_exists = True
self.start_time = time.time()
self.thread_id = thread_id()
try:
request = transaction_create_codec.encode_request(
timeout=int(self.timeout * 1000),
durability=self.durability,
transaction_type=self.transaction_type,
thread_id=self.thread_id,
)
invocation = Invocation(
request, connection=self.connection, response_handler=lambda m: m
)
invocation_service = self._context.invocation_service
invocation_service.invoke(invocation)
response = invocation.future.result()
self.id = transaction_create_codec.decode_response(response)
self.state = _STATE_ACTIVE
except:
self._locals.transaction_exists = False
raise
def commit(self) -> None:
"""Commits this transaction."""
self._check_thread()
if self.state != _STATE_ACTIVE:
raise TransactionError("Transaction is not active.")
try:
self._check_timeout()
request = transaction_commit_codec.encode_request(self.id, self.thread_id)
invocation = Invocation(request, connection=self.connection)
invocation_service = self._context.invocation_service
invocation_service.invoke(invocation)
invocation.future.result()
self.state = _STATE_COMMITTED
except:
self.state = _STATE_PARTIAL_COMMIT
raise
finally:
self._locals.transaction_exists = False
def rollback(self) -> None:
"""Rollback of this current transaction."""
self._check_thread()
if self.state not in (_STATE_ACTIVE, _STATE_PARTIAL_COMMIT):
raise TransactionError("Transaction is not active.")
try:
if self.state != _STATE_PARTIAL_COMMIT:
request = transaction_rollback_codec.encode_request(self.id, self.thread_id)
invocation = Invocation(request, connection=self.connection)
invocation_service = self._context.invocation_service
invocation_service.invoke(invocation)
invocation.future.result()
self.state = _STATE_ROLLED_BACK
finally:
self._locals.transaction_exists = False
def get_list(self, name: str) -> TransactionalList:
"""Returns the transactional list instance with the specified name.
Args:
name: The specified name.
Returns:
The instance of Transactional List with the specified name.
"""
return self._get_or_create_object(name, TransactionalList)
def get_map(self, name: str) -> TransactionalMap:
"""Returns the transactional map instance with the specified name.
Args:
name: The specified name.
Returns:
The instance of Transactional Map with the specified name.
"""
return self._get_or_create_object(name, TransactionalMap)
def get_multi_map(self, name: str) -> TransactionalMultiMap:
"""Returns the transactional multimap instance with the specified name.
Args:
name: The specified name.
Returns:
The instance of Transactional MultiMap with the specified name.
"""
return self._get_or_create_object(name, TransactionalMultiMap)
def get_queue(self, name: str) -> TransactionalQueue:
"""Returns the transactional queue instance with the specified name.
Args:
name: The specified name.
Returns:
The instance of Transactional Queue with the specified name.
"""
return self._get_or_create_object(name, TransactionalQueue)
def get_set(self, name: str) -> TransactionalSet:
"""Returns the transactional set instance with the specified name.
Args:
name: The specified name.
Returns:
The instance of Transactional Set with the specified name.
"""
return self._get_or_create_object(name, TransactionalSet)
def _get_or_create_object(self, name, proxy_type):
if self.state != _STATE_ACTIVE:
raise TransactionError("Transaction is not in active state.")
self._check_thread()
key = (proxy_type, name)
try:
return self._objects[key]
except KeyError:
proxy = proxy_type(name, self, self._context)
self._objects[key] = proxy
return proxy
def _check_thread(self):
if not thread_id() == self.thread_id:
raise TransactionError("Transaction cannot span multiple threads.")
def _check_timeout(self):
if time.time() > self.timeout + self.start_time:
raise TransactionError("Transaction has timed out.")
def __enter__(self):
self.begin()
return self
def __exit__(self, type, value, traceback):
if not type and not value and self.state == _STATE_ACTIVE:
self.commit()
elif self.state in (_STATE_PARTIAL_COMMIT, _STATE_ACTIVE):
self.rollback()
| StarcoderdataPython |
122108 | <filename>examples/distributed/simple_sync_distributed.py
#!/usr/bin/env python
""" Simple example of using leap_ec.distrib.synchronous
"""
import os
import multiprocessing.popen_spawn_posix # Python 3.9 workaround for Dask. See https://github.com/dask/distributed/issues/4168
from distributed import Client
import toolz
from leap_ec import context, test_env_var
from leap_ec import ops
from leap_ec.decoder import IdentityDecoder
from leap_ec.binary_rep.initializers import create_binary_sequence
from leap_ec.binary_rep.ops import mutate_bitflip
from leap_ec.binary_rep.problems import MaxOnes
from leap_ec.distrib import DistributedIndividual
from leap_ec.distrib import synchronous
from leap_ec.probe import AttributesCSVProbe
##############################
# Entry point
##############################
if __name__ == '__main__':
# We've added some additional state to the probe for DistributedIndividual,
# so we want to capture that.
probe = AttributesCSVProbe(attributes=['hostname',
'pid',
'uuid',
'birth_id',
'start_eval_time',
'stop_eval_time'],
do_fitness=True,
do_genome=True,
stream=open('simple_sync_distributed.csv', 'w'))
# Just to demonstrate multiple outputs, we'll have a separate probe that
# will take snapshots of the offspring before culling. That way we can
# compare the before and after to see what specific individuals were culled.
offspring_probe = AttributesCSVProbe(attributes=['hostname',
'pid',
'uuid',
'birth_id',
'start_eval_time',
'stop_eval_time'],
do_fitness=True,
stream=open('simple_sync_distributed_offspring.csv', 'w'))
with Client() as client:
# create an initial population of 5 parents of 4 bits each for the
# MAX ONES problem
parents = DistributedIndividual.create_population(5, # make five individuals
initialize=create_binary_sequence(
4), # with four bits
decoder=IdentityDecoder(),
problem=MaxOnes())
# Scatter the initial parents to dask workers for evaluation
parents = synchronous.eval_population(parents, client=client)
# probes rely on this information for printing CSV 'step' column
context['leap']['generation'] = 0
probe(parents) # generation 0 is initial population
offspring_probe(parents) # generation 0 is initial population
# When running the test harness, just run for two generations
# (we use this to quickly ensure our examples don't get bitrot)
if os.environ.get(test_env_var, False) == 'True':
generations = 2
else:
generations = 5
for current_generation in range(generations):
context['leap']['generation'] += 1
offspring = toolz.pipe(parents,
ops.tournament_selection,
ops.clone,
mutate_bitflip(expected_num_mutations=1),
ops.uniform_crossover,
# Scatter offspring to be evaluated
synchronous.eval_pool(client=client,
size=len(parents)),
offspring_probe, # snapshot before culling
ops.elitist_survival(parents=parents),
# snapshot of population after culling
# in separate CSV file
probe)
print('generation:', current_generation)
[print(x.genome, x.fitness) for x in offspring]
parents = offspring
print('Final population:')
[print(x.genome, x.fitness) for x in parents]
| StarcoderdataPython |
3244860 | from __future__ import division
import fire
from pathlib import Path
import torch
from torchvision import transforms
from tea.config.app_cfg import parse_cfg, print_cfg, get_epochs, get_data_in_dir, get_model_out_dir, get_device
import tea.data.data_loader_factory as DLFactory
import tea.models.factory as MFactory
from tea.trainer.basic_learner import find_max_lr, build_trainer, create_optimizer
from tea.plot.commons import plot_lr_losses
from tea.data.tiny_imageset import TinyImageSet
import matplotlib.pyplot as plt
from fastai.basic_data import DataBunch
from fastai.train import lr_find, fit_one_cycle, Learner
from fastai.vision import accuracy
def build_train_val_datasets(cfg, in_memory=False):
data_in_dir = get_data_in_dir(cfg)
normalize = transforms.Normalize((.5, .5, .5), (.5, .5, .5))
train_aug = transforms.Compose([
transforms.RandomResizedCrop(56),
transforms.RandomHorizontalFlip(),
# transforms.RandomRotation(10)
])
val_aug = transforms.Compose([
transforms.Resize(64),
transforms.CenterCrop(56)
])
training_transform = transforms.Compose([
transforms.Lambda(lambda x: x.convert("RGB")),
train_aug,
transforms.ToTensor(),
normalize
])
valid_transform = transforms.Compose([
transforms.Lambda(lambda x: x.convert("RGB")),
val_aug,
transforms.ToTensor(),
normalize
])
train_ds = TinyImageSet(data_in_dir, 'train', transform=training_transform, in_memory=in_memory)
valid_ds = TinyImageSet(data_in_dir, 'val', transform=valid_transform, in_memory=in_memory)
return train_ds, valid_ds
"""
Like anything in life, it is good to follow pattern.
In this case, any application starts with cfg file,
with optional override arguments like the following:
data_dir/path
model_cfg
model_out_dir
epochs, lr, batch etc
"""
def run(ini_file='tinyimg.ini',
data_in_dir='./../../dataset',
model_cfg='../cfg/vgg-tiny.cfg',
model_out_dir='./models',
epochs=30,
lr=3.0e-5,
batch_sz=256,
num_worker=4,
log_freq=20,
use_gpu=True):
# Step 1: parse config
cfg = parse_cfg(ini_file,
data_in_dir=data_in_dir,
model_cfg=model_cfg,
model_out_dir=model_out_dir,
epochs=epochs, lr=lr, batch_sz=batch_sz, log_freq=log_freq,
num_worker=num_worker, use_gpu=use_gpu)
print_cfg(cfg)
# Step 2: create data sets and loaders
train_ds, val_ds = build_train_val_datasets(cfg, in_memory=True)
train_loader, val_loader = DLFactory.create_train_val_dataloader(cfg, train_ds, val_ds)
# Step 3: create model
model = MFactory.create_model(cfg)
# Step 4: train/valid
# This demos our approach can be easily intergrate with our app framework
device = get_device(cfg)
data = DataBunch(train_loader, val_loader, device=device)
learn = Learner(data, model, loss_func=torch.nn.CrossEntropyLoss(),
metrics=accuracy)
# callback_fns=[partial(EarlyStoppingCallback, monitor='accuracy', min_delta=0.01, patience=2)])
# lr_find(learn, start_lr=1e-7, end_lr=10)
# learn.recorder.plot()
# lrs_losses = [(lr, loss) for lr, loss in zip(learn.recorder.lrs, learn.recorder.losses)]
# min_lr = min(lrs_losses[10:-5], key=lambda x: x[1])[0]
# lr = min_lr/10.0
# plt.show()
# print(f'Minimal lr rate is {min_lr} propose init lr {lr}')
# fit_one_cycle(learn, epochs, lr)
learn.fit(epochs, lr)
if __name__ == '__main__':
fire.Fire(run)
| StarcoderdataPython |
119420 | <filename>core/migrations/0025_course_welcome_email.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0024_coursestudent_is_active'),
]
operations = [
migrations.AddField(
model_name='course',
name='welcome_email',
field=models.TextField(verbose_name='Welcome Email', blank=True),
),
]
| StarcoderdataPython |
3371955 | from bank_bot.bankbot.core import bot, client_factory, safe_send_message
from bank_bot import settings
from bank_bot.banking_system import UserError, TransactionError, Database, HackerError, MessageError, AddressRecordError
@bot.message_handler(regexp=r"\/message [a-zA-Z0-9]{10} [\w\W]+")
def send_message(message):
# Generic messaging command; allows to send any message to another user registered in bot
# Only user's unique hash is required to send message; message is signed by sender's hash
client = client_factory.create_client(message)
try:
reciever_chat_id, sent_message = client.prepare_message(message.text)
except (UserError, MessageError) as err:
bot.send_message(client.chat_id, err)
return
safe_send_message(bot, client.chat_id, f"{settings.MESSAGE_SEND_RESULT} {sent_message}")
safe_send_message(bot, reciever_chat_id, f"{settings.INCOMING_MESSAGE} {sent_message}.\n{settings.MESSAGE_SENDER} {client.user.character_hash}")
@bot.message_handler(commands=['history_messages_sent',])
def list_sent_messages(message):
client = client_factory.create_client(message)
try:
message = client.inspect_messages(is_sender=True)
except (UserError, MessageError) as err:
message = err.message
safe_send_message(bot, client.chat_id, message)
@bot.message_handler(commands=['history_messages_recieved',])
def list_recieved_messages(message):
client = client_factory.create_client(message)
try:
message = client.inspect_messages(is_sender=False)
except (UserError, MessageError) as err:
message = err.message
safe_send_message(bot, client.chat_id, message)
@bot.message_handler(commands=['history_messages',])
def list_all_messages(message):
client = client_factory.create_client(message)
try:
message = client.inspect_all_messages()
except (UserError, MessageError) as err:
message = err.message
safe_send_message(bot, client.chat_id, message)
@bot.message_handler(regexp=r"^\/history_messages_pair [a-zA-Z0-9]{10}")
def list_pair_messages(message):
client = client_factory.create_client(message)
try:
message = client.inspect_pair_history_messages(message=message.text)
except (UserError, MessageError) as err:
message = err.message
safe_send_message(bot, client.chat_id, message)
| StarcoderdataPython |
3300711 | <filename>lieutenant/lieutenant/urls.py
from django.conf.urls import patterns, include, url
from django.contrib import admin
from lieutenant.views import Home
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'lieutenant.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', Home.as_view(), name='home'),
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/', include('allauth.urls')),
url(r'^api/', include('api.urls', namespace="api")),
url(r'^entries/', include('entries.urls', namespace="entries")),
url(r'^tags/', include('tags.urls', namespace="tags")),
)
| StarcoderdataPython |
45518 | <reponame>automation-liberation/deployment-helper
from enum import Enum
class ChangelogEntryEnum(Enum):
ADDED = 'Added'
CHANGED = 'Changed'
FIXED = 'Fixed'
REMOVED = 'Removed'
| StarcoderdataPython |
3399838 | <gh_stars>0
def aoc(data):
x, y, d = 0, 0, 0
moves = {
"E": (1, 0, 0),
"S": (0, 1, 0),
"W": (-1, 0, 0),
"N": (0, -1, 0),
"R": (0, 0, 1),
"L": (0, 0, -1),
}
for move, step in [(i[0], int(i[1:])) for i in data.split()]:
if move == "F":
x += list(moves.values())[d // 90 % 4][0] * step
y += list(moves.values())[d // 90 % 4][1] * step
else:
x += moves[move][0] * step
y += moves[move][1] * step
d += moves[move][2] * step
return abs(x) + abs(y)
| StarcoderdataPython |
4805177 | <reponame>laffra/pava
def add_native_methods(clazz):
def selectAlternative__boolean__java_lang_invoke_MethodHandle__java_lang_invoke_MethodHandle__(a0, a1, a2):
raise NotImplementedError()
clazz.selectAlternative__boolean__java_lang_invoke_MethodHandle__java_lang_invoke_MethodHandle__ = staticmethod(selectAlternative__boolean__java_lang_invoke_MethodHandle__java_lang_invoke_MethodHandle__)
| StarcoderdataPython |
3369197 | import pytest
from pathlib import Path
from pandas.testing import assert_frame_equal
import pandas as pd
from sqlalchemy.exc import DatabaseError
from prestest.fixtures import container, start_container, db_manager, create_temporary_table
from prestest.container import CONTAINER_NAMES
resource_folder = Path(".").resolve() / "resources"
@pytest.mark.prestest(container_folder=resource_folder)
def test_container_set_docker_folder_correctly(container):
assert container.docker_folder == resource_folder
@pytest.mark.prestest(container_folder=resource_folder)
def test_db_manager_set_docker_folder_correctly(db_manager):
assert db_manager.container.docker_folder == resource_folder
@pytest.mark.prestest(reset=True)
def test_start_container_disable_table_modification_do_not_change_hive_properties(start_container, container, tmpdir):
temp_download = Path(tmpdir.join("test_start_container_enable_table_modification_hive_properties"))
container.download_from_container(from_container="/opt/presto-server-0.181/etc/catalog/hive.properties",
to_local=temp_download,
container_name=CONTAINER_NAMES["presto_coordinator"])
with open(temp_download, 'r') as f:
result = set(l.strip() for l in f.readlines() if l.strip() != '')
expected = {"hive.allow-drop-table=true", "hive.allow-rename-table=true", "hive.allow-add-column=true"}
assert not result.intersection(expected)
@pytest.mark.prestest(allow_table_modification=True, reset=True)
def test_start_container_enable_table_modification_correctly(start_container, container, tmpdir):
temp_download = Path(tmpdir.join("test_start_container_enable_table_modification_hive_properties"))
container.download_from_container(from_container="/opt/presto-server-0.181/etc/catalog/hive.properties",
to_local=temp_download,
container_name=CONTAINER_NAMES["presto_coordinator"])
with open(temp_download, 'r') as f:
result = set(l.strip() for l in f.readlines() if l.strip() != '')
expected = {"hive.allow-drop-table=true", "hive.allow-rename-table=true", "hive.allow-add-column=true"}
assert result.issuperset(expected)
@pytest.mark.prestest(reset=True)
def test_start_container_reset_correctly(start_container, container, tmpdir):
temp_download = Path(tmpdir.join("test_start_container_enable_table_modification_hive_properties"))
container.download_from_container(from_container="/opt/presto-server-0.181/etc/catalog/hive.properties",
to_local=temp_download,
container_name=CONTAINER_NAMES["presto_coordinator"])
with open(temp_download, 'r') as f:
result = set(l.strip() for l in f.readlines() if l.strip() != '')
expected = {"hive.allow-drop-table=true", "hive.allow-rename-table=true", "hive.allow-add-column=true"}
assert not result.intersection(expected)
@pytest.fixture()
def clean_up_table(db_manager):
table_name = "sandbox.test_table"
db_manager.drop_table(table=table_name)
db_manager.run_hive_query(f"CREATE DATABASE IF NOT EXISTS sandbox")
yield table_name
db_manager.drop_table(table=table_name)
db_manager.run_hive_query(f"DROP DATABASE IF EXISTS sandbox")
@pytest.mark.prestest(allow_table_modification=True, reset=True)
def test_start_container_enable_table_modification_allow_presto_table_creation_and_drop(
start_container, db_manager, clean_up_table):
table_name = clean_up_table
create_table = f"""
CREATE TABLE IF NOT EXISTS {table_name} AS
SELECT
1 AS col1,
'dummy' AS col2
"""
db_manager.read_sql(create_table)
select_table = f"SELECT * FROM {table_name}"
result = db_manager.read_sql(select_table)
expected = pd.DataFrame({"col1": [1], "col2": ["dummy"]})
assert_frame_equal(result, expected)
db_manager.read_sql(f"DROP TABLE {table_name}")
with pytest.raises(DatabaseError):
db_manager.read_sql(select_table)
create_temporary_table_query = """CREATE TABLE {table_name} (
col1 INTEGER,
col2 STRING
)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
STORED AS TEXTFILE
"""
@pytest.mark.prestest(table_name="sandbox.test_temp_table", query=create_temporary_table_query,
file=resource_folder / "sample_table.csv")
def test_create_temporary_table_create_table_correctly(create_temporary_table, db_manager):
result = db_manager.read_sql("SELECT * FROM sandbox.test_temp_table")
expected = pd.DataFrame({"col1": [123, 456], "col2": ["abc", "cba"]})
assert_frame_equal(result, expected)
| StarcoderdataPython |
1776819 | <filename>libs/pose_sphere.py<gh_stars>1-10
from __future__ import print_function
import math
import threading
import time
class PoseSphere:
def __init__(self, name, priority=1):
self.position = (0.0, 0.0, 0.0)
self.p2 = (0.0, 0.0, 0.0)
self.type = 'sphere'
self.angle = 0.0
self.radius = 1.0
self.tolerance = 1.0
self.name = name
self.time_check = None
self.timer = 0
self.action = ''
self.timeout_raised = False
self.priority = priority
def set_sphere(self, (x, y, z), angle, radius=5, tolerance=10):
self.type = 'sphere'
self.position = (x, y, z)
self.angle = angle
self.radius = radius
self.tolerance = tolerance
def set_block(self, (x1, y1, z1), (x2, y2, z2), angle, tolerance=10):
self.type = 'block'
self.position = (max([x1, x2]), max([y1, y2]), max([z1, z2]))
self.p2 = (min([x1, x2]), min([y1, y2]), min([z1, z2]))
self.angle = angle
self.tolerance = tolerance
def set_action(self, action, time):
self.action = action
self.timer = time
def check(self, (x, y, z), angle):
if self.type == 'sphere':
distance = math.sqrt(math.pow(x - self.position[0], 2) + math.pow(
y - self.position[1], 2) + math.pow(z - self.position[2], 2))
delta_angle = abs(angle - self.angle)
if (distance <= self.radius) and (delta_angle < self.tolerance):
if self.time_check == None:
self.time_check = time.time()
return True
elif self.type == 'block':
inside = (self.p2[0] <= x <= self.position[0]) and (
self.p2[1] <= y <= self.position[1]) and (self.p2[2] <= z <= self.position[2])
delta_angle = abs(angle - self.angle)
if inside and (delta_angle < self.tolerance):
if self.time_check == None:
self.time_check = time.time()
return True
self.time_check = None
self.timeout_raised = False
return False
def get_time(self):
if self.time_check != None:
return time.time() - self.time_check
else:
return 0.0
def timeout(self):
if self.timer != 0:
if self.get_time() >= self.timer and not self.timeout_raised:
self.timeout_raised = True
return True
return False
def skip(self):
self.time_check = None
self.timeout_raised = False
| StarcoderdataPython |
112915 | <reponame>Farbfetzen/Advent_of_Code
from unittest import TestCase
from src.util.load_data import load_data
from src.year2020.day13 import part_1, part_2, part_2_without_crt, prepare_data
from test.decorators import sample
data = load_data(2020, 13)
@sample
class Test2020Day13Samples(TestCase):
prepared_data: list[str]
@classmethod
def setUpClass(cls) -> None:
cls.prepared_data = prepare_data(data.samples[0])
def test_part_1(self) -> None:
self.assertEqual(295, part_1(self.prepared_data))
def test_part_2(self) -> None:
self.assertEqual(1068781, part_2(self.prepared_data))
def test_part_2_without_crt(self):
self.assertEqual(1068781, part_2_without_crt(self.prepared_data))
class Test2020Day13(TestCase):
prepared_data: list[str]
@classmethod
def setUpClass(cls) -> None:
cls.prepared_data = prepare_data(data.input)
def test_part_1(self) -> None:
self.assertEqual(3882, part_1(self.prepared_data))
def test_part_2(self) -> None:
self.assertEqual(867295486378319, part_2(self.prepared_data))
def test_part_2_without_crt(self) -> None:
self.assertEqual(867295486378319, part_2_without_crt(self.prepared_data))
| StarcoderdataPython |
3296016 | <gh_stars>0
#!/usr/bin/env python3
from collections import defaultdict
from pgmpy.factors import TabularCPD, TreeCPD, RuleCPD
import itertools
import networkx as nx
class DirectedGraph(nx.DiGraph):
"""
Base class for directed graphs.
Directed graph assumes that all the nodes in graph are either random
variables, factors or clusters of random variables and edges in the graph
are dependencies between these random variables.
Parameters
----------
data: input graph
Data to initialize graph. If data=None (default) an empty graph is
created. The data can be an edge list or any Networkx graph object.
Examples
--------
Create an empty DirectedGraph with no nodes and no edges
>>> from pgmpy.base import DirectedGraph
>>> G = DirectedGraph()
G can be grown in several ways
**Nodes:**
Add one node at a time:
>>> G.add_node('a')
Add the nodes from any container (a list, set or tuple or the nodes
from another graph).
>>> G.add_nodes_from(['a', 'b'])
**Edges:**
G can also be grown by adding edges.
Add one edge,
>>> G.add_edge('a', 'b')
a list of edges,
>>> G.add_edges_from([('a', 'b'), ('b', 'c')])
If some edges connect nodes not yet in the model, the nodes
are added automatically. There are no errors when adding
nodes or edges that already exist.
**Shortcuts:**
Many common graph features allow python syntax for speed reporting.
>>> 'a' in G # check if node in graph
True
>>> len(G) # number of nodes in graph
3
"""
def __init__(self, ebunch=None):
super().__init__(ebunch)
def add_node(self, node, **kwargs):
"""
Add a single node to the Graph.
Parameters
----------
node: node
A node can be any hashable Python object.
Examples
--------
>>> from pgmpy.base import DirectedGraph
>>> G = DirectedGraph()
>>> G.add_node('A')
"""
super().add_node(node, **kwargs)
def add_nodes_from(self, nodes, **kwargs):
"""
Add multiple nodes to the Graph.
Parameters
----------
nodes: iterable container
A container of nodes (list, dict, set, etc.).
Examples
--------
>>> from pgmpy.base import DirectedGraph
>>> G = DirectedGraph()
>>> G.add_nodes_from(['A', 'B', 'C'])
"""
for node in nodes:
self.add_node(node, **kwargs)
def add_edge(self, u, v, **kwargs):
"""
Add an edge between u and v.
The nodes u and v will be automatically added if they are
not already in the graph
Parameters
----------
u,v : nodes
Nodes can be any hashable Python object.
Examples
--------
>>> from pgmpy.base import DirectedGraph
>>> G = DirectedGraph()
>>> G.add_nodes_from(['Alice', 'Bob', 'Charles'])
>>> G.add_edge('Alice', 'Bob')
"""
super().add_edge(u, v, **kwargs)
def add_edges_from(self, ebunch, **kwargs):
"""
Add all the edges in ebunch.
If nodes referred in the ebunch are not already present, they
will be automatically added. Node names should be strings.
Parameters
----------
ebunch : container of edges
Each edge given in the container will be added to the graph.
The edges must be given as 2-tuples (u, v).
Examples
--------
>>> from pgmpy.base import DirectedGraph
>>> G = DirectedGraph()
>>> G.add_nodes_from(['Alice', 'Bob', 'Charles'])
>>> G.add_edges_from([('Alice', 'Bob'), ('Bob', 'Charles')])
"""
for edge in ebunch:
self.add_edge(*edge, **kwargs)
def get_parents(self, node):
"""
Returns a list of parents of node.
Parameters
----------
node: string, int or any hashable python object.
The node whose parents would be returned.
Examples
--------
>>> from pgmpy.base import DirectedGraph
>>> G = DirectedGraph([('diff', 'grade'), ('intel', 'grade')])
>>> G.parents('grade')
['diff', 'intel']
"""
return self.predecessors(node)
def moralize(self):
"""
Removes all the immoralities in the DirectedGraph and creates a moral
graph (UndirectedGraph).
A v-structure X->Z<-Y is an immorality if there is no directed edge
between X and Y.
Examples
--------
>>> from pgmpy.base import DirectedGraph
>>> G = DirectedGraph([('diff', 'grade'), ('intel', 'grade')])
>>> moral_graph = G.moralize()
>>> moral_graph.edges()
[('intel', 'grade'), ('intel', 'diff'), ('grade', 'diff')]
"""
from pgmpy.base import UndirectedGraph
moral_graph = UndirectedGraph(self.to_undirected().edges())
for node in self.nodes():
moral_graph.add_edges_from(itertools.combinations(
self.get_parents(node), 2))
return moral_graph
| StarcoderdataPython |
1727584 | # https://codeforces.com/problemset/problem/479/A
a = int(input())
b = int(input())
c = int(input())
a1=a+b*c
a2=a*(b+c)
a3=a*b*c
a4=(a+b)*c
a5=a+b+c
print(max(a1,a2,a3,a4,a5))
| StarcoderdataPython |
1637985 | <filename>map_label_tool/py_proto/cyber/proto/perception_pb2.py
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: cyber/proto/perception.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='cyber/proto/perception.proto',
package='apollo.cyber.proto',
syntax='proto2',
serialized_pb=_b('\n\x1c\x63yber/proto/perception.proto\x12\x12\x61pollo.cyber.proto\"\x80\x01\n\nPerception\x12\x35\n\x06header\x18\x01 \x01(\x0b\x32%.apollo.cyber.proto.Perception.Header\x12\x0e\n\x06msg_id\x18\x02 \x01(\x04\x12\x0e\n\x06result\x18\x03 \x01(\x01\x1a\x1b\n\x06Header\x12\x11\n\ttimestamp\x18\x01 \x01(\x04')
)
_PERCEPTION_HEADER = _descriptor.Descriptor(
name='Header',
full_name='apollo.cyber.proto.Perception.Header',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='timestamp', full_name='apollo.cyber.proto.Perception.Header.timestamp', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=154,
serialized_end=181,
)
_PERCEPTION = _descriptor.Descriptor(
name='Perception',
full_name='apollo.cyber.proto.Perception',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='apollo.cyber.proto.Perception.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='msg_id', full_name='apollo.cyber.proto.Perception.msg_id', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='result', full_name='apollo.cyber.proto.Perception.result', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_PERCEPTION_HEADER, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=53,
serialized_end=181,
)
_PERCEPTION_HEADER.containing_type = _PERCEPTION
_PERCEPTION.fields_by_name['header'].message_type = _PERCEPTION_HEADER
DESCRIPTOR.message_types_by_name['Perception'] = _PERCEPTION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Perception = _reflection.GeneratedProtocolMessageType('Perception', (_message.Message,), dict(
Header = _reflection.GeneratedProtocolMessageType('Header', (_message.Message,), dict(
DESCRIPTOR = _PERCEPTION_HEADER,
__module__ = 'cyber.proto.perception_pb2'
# @@protoc_insertion_point(class_scope:apollo.cyber.proto.Perception.Header)
))
,
DESCRIPTOR = _PERCEPTION,
__module__ = 'cyber.proto.perception_pb2'
# @@protoc_insertion_point(class_scope:apollo.cyber.proto.Perception)
))
_sym_db.RegisterMessage(Perception)
_sym_db.RegisterMessage(Perception.Header)
# @@protoc_insertion_point(module_scope)
| StarcoderdataPython |
3301302 | <reponame>MrKomish/pymono<gh_stars>1-10
from cocos.director import director
from cocos.text import Label
from pymono.lib.cocos2d import *
from pymono.lib.observable import Observable
from pymono.models.cells.StreetCell import StreetCell, StreetPrices
from pymono.config import rgba_colors
from pymono.models.Cell import Cell
class CellDetailsView(cocos.layer.Layer):
def __init__(self, cell_details_ctrl):
super(CellDetailsView, self).__init__()
self.cell_details_ctrl = cell_details_ctrl
self.cell_name_label = None
self.owner_label = None
self.current_income_label = None
self.street_prices_labels = [None, None, None]
self.street_prices_labels_order = ["land_price", "house_price", "hotel_price"]
self.x = director.window.width - 150
self.y = 50
def build(self):
self.cell_details_ctrl.current_cell.watch(self.set_cell)
def remove_all_cell_details(self):
if self.cell_name_label in self.get_children():
self.remove(self.cell_name_label)
self.cell_name_label = None
for street_price_label in self.street_prices_labels:
if street_price_label in self.get_children():
self.remove(street_price_label)
self.street_prices_labels = [None, None, None]
if self.owner_label in self.get_children():
self.remove(self.owner_label)
self.owner_label = None
if self.current_income_label in self.get_children():
self.remove(self.current_income_label)
self.current_income_label = None
def set_cell(self, cell: Cell):
self.remove_all_cell_details()
if cell is None:
return
self.create_cell_name_label(cell.name)
if isinstance(cell, StreetCell):
self.create_street_cell_prices_labels(cell.prices)
self.create_cell_owner_label(cell.owner)
self.create_cell_street_current_income(cell.owner.get() is not None, cell.current_income)
def create_cell_name_label(self, text):
self.cell_name_label = Label(text,
font_name='Calibri',
color=(0, 0, 0, 255),
font_size=24,
anchor_x='center', anchor_y='center')
self.cell_name_label.position = 0, 210
self.add(self.cell_name_label)
def create_street_cell_prices_labels(self, prices: StreetPrices):
for price_type, price in prices:
view_index = self.street_prices_labels_order.index(price_type)
price_label = Label("- " + price_type.replace("_", " ").title() + ": $%d" % price,
font_name='Calibri',
color=(0, 0, 0, 255),
font_size=18,
anchor_x='center', anchor_y='center')
price_label.position = 0, 150 - view_index * 30
self.street_prices_labels[view_index] = price_label
self.add(price_label)
def create_cell_owner_label(self, owner_observable: Observable):
owner = owner_observable.get()
color = rgba_colors[owner.color if owner is not None else "black"]
owner_text = owner.color.title() if owner is not None else "No One"
self.owner_label = Label("Owner: " + owner_text,
font_name='Calibri',
color=color,
font_size=20,
anchor_x='center', anchor_y='center')
self.owner_label.position = 0, 30
self.add(self.owner_label)
def create_cell_street_current_income(self, has_owner, current_income):
self.current_income_label = Label(("" if has_owner else "Potential ") + "Income: $%d" % current_income,
font_name='Calibri',
color=rgba_colors["black"],
font_size=20,
anchor_x='center', anchor_y='center')
self.current_income_label.position = 0, 0
self.add(self.current_income_label)
| StarcoderdataPython |
194537 | <gh_stars>0
"""Append file."""
from os import getcwd
from os.path import abspath, realpath, join, dirname
content = 'Some text Lorem ipsum dolor sit amet |::|\n\t\t@treedbox'
appendMe = '\n1º New bit of information'
appendMeToo = '2º New bit of information'
filename = 'filename.txt'
dir = abspath(dirname(__file__))
absFilePathAutoSlash = abspath(join(dirname(__file__), filename))
chmod = 'a' # append
openFile = open(absFilePathAutoSlash, chmod)
openFile.write(content)
openFile.write(appendMe)
openFile.write('\n')
openFile.write(appendMeToo)
openFile.close()
| StarcoderdataPython |
1673000 | <reponame>Winzarten/SecondMonitor
import ac
import acsys
import sys
import os.path
import platform
import configparser
import ctypes
from ctypes import *
from smshared_mem import SecondMonitorShared
sharedMem = SecondMonitorShared()
pluginVersion = "1.0.0"
timer = 0
def updateSharedMemory():
global sharedMem
sharedmem = sharedMem.getsharedmem()
sharedmem.numVehicles = ac.getCarsCount()
sharedmem.focusVehicle = ac.getFocusedCar()
#now we'll build the slots, so we later know every single (possible) car
carIds = range(0, ac.getCarsCount(), 1)
for carId in carIds:
#first we'll check wether there is a car for this id; as soon it returns -1
#it's over
if str(ac.getCarName(carId)) == '-1':
break
else:
sharedmem.vehicleInfo[carId].carId = carId
sharedmem.vehicleInfo[carId].driverName = ac.getDriverName(carId).encode('utf-8')
sharedmem.vehicleInfo[carId].carModel = ac.getCarName(carId).encode('utf-8')
sharedmem.vehicleInfo[carId].speedMS = ac.getCarState(carId, acsys.CS.SpeedMS)
sharedmem.vehicleInfo[carId].bestLapMS = ac.getCarState(carId, acsys.CS.BestLap)
sharedmem.vehicleInfo[carId].lapCount = ac.getCarState(carId, acsys.CS.LapCount)
sharedmem.vehicleInfo[carId].currentLapInvalid = ac.getCarState(carId, acsys.CS.LapInvalidated)
sharedmem.vehicleInfo[carId].currentLapTimeMS = ac.getCarState(carId, acsys.CS.LapTime)
sharedmem.vehicleInfo[carId].lastLapTimeMS = ac.getCarState(carId, acsys.CS.LastLap)
sharedmem.vehicleInfo[carId].worldPosition = ac.getCarState(carId, acsys.CS.WorldPosition)
sharedmem.vehicleInfo[carId].isCarInPitline = ac.isCarInPitline(carId)
sharedmem.vehicleInfo[carId].isCarInPit = ac.isCarInPit(carId)
sharedmem.vehicleInfo[carId].carLeaderboardPosition = ac.getCarLeaderboardPosition(carId)
sharedmem.vehicleInfo[carId].carRealTimeLeaderboardPosition = ac.getCarRealTimeLeaderboardPosition(carId)
sharedmem.vehicleInfo[carId].spLineLength = ac.getCarState(carId, acsys.CS.NormalizedSplinePosition)
sharedmem.vehicleInfo[carId].isConnected = ac.isConnected(carId)
sharedmem.vehicleInfo[carId].finishStatus = ac.getCarState(carId, acsys.CS.RaceFinished)
def acMain(ac_version):
global appWindow,sharedMem
appWindow = ac.newApp("SecondMonitorEx")
ac.setTitle(appWindow, "SecondMonitorEx")
ac.setSize(appWindow, 300, 40)
ac.log("SecondMonitor Shared memory Initialized")
ac.console("SecondMonitor Shared memory Initialized")
sharedmem = sharedMem.getsharedmem()
sharedmem.serverName = ac.getServerName().encode('utf-8')
sharedmem.acInstallPath = os.path.abspath(os.curdir).encode('utf-8')
sharedmem.pluginVersion = pluginVersion.encode('utf-8')
return "SecondMonitorEx"
def acUpdate(deltaT):
global timer
timer += deltaT
if timer > 0.025:
updateSharedMemory()
timer = 0
| StarcoderdataPython |
129991 | <filename>ganonymizer-v3/app/api/router.py<gh_stars>0
from flask import request
from api import app
from api import controller as controller
from api import middleware as middleware
from api.gano import load_config, load_model
@app.before_first_request
def init():
load_config()
load_model()
@app.route("/health")
def health():
return controller.health()
@app.route("/image", methods=["POST"])
@middleware.auth.login_required
def image():
print(f"[INFO] User: {middleware.auth.current_user()}")
img_b64 = request.json["image"]
return controller.image(img_b64)
| StarcoderdataPython |
3263678 | # Ultroid - UserBot
# Copyright (C) 2020 TeamUltroid
#
# This file is a part of < https://github.com/TeamUltroid/Ultroid/ >
# PLease read the GNU Affero General Public License in
# <https://www.github.com/TeamUltroid/Ultroid/blob/main/LICENSE/>.
import os
import requests
from asyncio import sleep
from bs4 import BeautifulSoup as bs
from . import *
XX = "A servant appeared!"
YY = "A qt waifu appeared!"
@bot.on(events.NewMessage(incoming=True))
async def reverse(event):
if not event.media:
return
if not event.sender_id==792028928 or event.sender_id==1232515770:
return
if not event.text==XX or event.text==YY:
return
dl = await bot.download_media(event.media)
file = {"encoded_image": (dl, open(dl, "rb"))}
grs = requests.post(
"https://www.google.com/searchbyimage/upload", files=file, allow_redirects=False
)
loc = grs.headers.get("Location")
response = requests.get(
loc,
headers={
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:58.0) Gecko/20100101 Firefox/58.0"
},
)
xx = bs(response.text, "html.parser")
div = xx.find("div", {"class": "r5a77d"})
alls = div.find("a")
text = alls.text
send = await @bot.on.send_message(event.chat_id, f"/protecc {text}")
await sleep(2)
await send.delete()
os.remove(dl)
| StarcoderdataPython |
190700 | import inspect
from importlib import import_module
from .base import BaseState, INIT_REMOTE_API
from ..transport import new_session
def create_class(pkg_class: str):
"""Create a class from a package.module.class string
:param pkg_class: full class location,
e.g. "sklearn.model_selection.GroupKFold"
"""
splits = pkg_class.split(".")
clfclass = splits[-1]
pkg_module = splits[:-1]
class_ = getattr(import_module(".".join(pkg_module)), clfclass)
return class_
def create_function(pkg_func: list):
"""Create a function from a package.module.function string
:param pkg_func: full function location,
e.g. "sklearn.feature_selection.f_classif"
"""
splits = pkg_func.split(".")
pkg_module = ".".join(splits[:-1])
cb_fname = splits[-1]
pkg_module = __import__(pkg_module, fromlist=[cb_fname])
function_ = getattr(pkg_module, cb_fname)
return function_
class TaskState(BaseState):
kind = 'task'
_dict_fields = ['kind', 'name', 'class_name', 'class_params', 'handler',
'next', 'resource', 'transport', 'subpath', 'full_event']
def __init__(self, name=None, class_name=None, class_params=None, handler=None,
next=None, resource=None, transport=None, subpath=None, full_event=None):
super().__init__(name, next)
if callable(handler) and (class_name or class_params):
raise ValueError('cannot specify function pointer (handler) and class name/params')
self._class_object = None
self.class_name = class_name
if class_name and not isinstance(class_name, str):
self.class_name = class_name.__name__
self._class_object = class_name
self.class_params = class_params or {}
self._object = None
self.full_event = full_event
self.handler = handler
self.resource = resource
self.transport = transport
self.subpath = subpath
def _init_object(self, context, namespace):
# link to function
if self.handler and not self.class_name:
if callable(self.handler):
self._fn = self.handler
self.handler = self.handler.__name__
elif self.handler in namespace:
self._fn = namespace[self.handler]
else:
try:
self._fn = create_function(self.handler)
except (ImportError, ValueError) as e:
raise ImportError(f'state {self.name} init failed, function {self.handler} not found')
context.logger.debug(f'init function {self.handler} in {self.name}')
return
if not self.class_name:
raise ValueError('valid class_name and/or handler must be specified')
if not self._class_object:
if self.class_name in namespace:
self._class_object = namespace[self.class_name]
else:
try:
self._class_object = create_class(self.class_name)
except (ImportError, ValueError) as e:
raise ImportError(f'state {self.name} init failed, class {self.class_name} not found')
# init and link class/function
context.logger.debug(f'init class {self.class_name} in {self.name}')
self._object = init_class(self._class_object, context, self, **self.class_params)
handler = self.handler or 'do'
if not self.handler and hasattr(self._object, 'do_event'):
handler = 'do_event'
self.full_event = True
if not hasattr(self._object, handler):
raise ValueError(f'handler {handler} not found in class {self._object.__name__}')
self._fn = getattr(self._object, handler)
def _post_init(self):
if self._object and hasattr(self._object, 'post_init'):
self._object.post_init()
def run(self, context, event, *args, **kwargs):
context.logger.debug(f'running state {self.fullname}, type: {self._object_type}')
if not self._fn:
raise RuntimeError(f'state {self.name} run failed, function '
' or remote session not initialized')
try:
if self.full_event or self._object_type == INIT_REMOTE_API:
event = self._fn(event)
else:
event.body = self._fn(event.body)
except Exception as e:
fullname = self.fullname
context.logger.error(f'step {fullname} run failed, {e}')
event.add_trace(event.id, fullname, 'fail', e, verbosity=context.root.trace)
raise RuntimeError(f'step {fullname} run failed, {e}')
event.add_trace(event.id, self.fullname, 'ok', event.body, verbosity=context.root.trace)
resp_status = getattr(event.body, 'status_code', 0)
if self.next and not getattr(event, 'terminated', None) and resp_status < 300:
next_obj = self._parent[self.next]
return next_obj.run(context, event, *args, **kwargs)
return event
def init_class(object, context, state, **params):
args = inspect.signature(object.__init__).parameters
if 'context' in args:
params['context'] = context
if 'state' in args:
params['state'] = state
if 'name' in args:
params['name'] = state.name
return object(**params)
| StarcoderdataPython |
3287772 | <reponame>niemela/problemtools
from __future__ import print_function
import os
import re
import os.path
import glob
import tempfile
import shutil
# For backwards compatibility, remove in bright and shiny future.
def detect_version(problemdir, problemtex):
# Check for 0.1 - lack of \problemname
if open(problemtex).read().find('\problemname') < 0:
return '0.1'
return '' # Current
class Template:
filename = None
problemset_cls = None
copy_cls = True
def __init__(self, problemdir, language='',
title='Problem Title', force_copy_cls=False):
if not os.path.isdir(problemdir):
raise Exception('%s is not a directory' % problemdir)
if problemdir[-1] == '/':
problemdir = problemdir[:-1]
stmtdir = os.path.join(problemdir, 'problem_statement')
langs = []
if glob.glob(os.path.join(stmtdir, 'problem.tex')):
langs.append('')
for f in glob.glob(os.path.join(stmtdir, 'problem.[a-z][a-z].tex')):
langs.append(re.search("problem.([a-z][a-z]).tex$", f).group(1))
if len(langs) == 0:
raise Exception('No problem statements available')
dotlang = ''
# If language unspec., use first available one (will be
# problem.tex if exists)
if language == '':
language = langs[0]
if language != '':
if len(language) != 2 or not language.isalpha():
raise Exception('Invalid language code "%s"' % language)
if language not in langs:
raise Exception('No problem statement for language "%s" available' % language)
dotlang = '.' + language
# Used in the template.tex variable substitution.
language = dotlang
problemtex = os.path.join(stmtdir, 'problem' + dotlang + '.tex')
if not os.path.isfile(problemtex):
raise Exception('Unable to find problem statement, was looking for "%s"' % problemtex)
templatefile = 'template.tex'
clsfile = 'problemset.cls'
timelim = 1 # Legacy for compatibility with v0.1
version = detect_version(problemdir, problemtex)
if version != '':
print('Note: problem is in an old version (%s) of problem format, you should consider updating it' % version)
templatefile = 'template_%s.tex' % version
clsfile = 'problemset_%s.cls' % version
templatepaths = [os.path.join(os.path.dirname(__file__), 'templates/latex'),
os.path.join(os.path.dirname(__file__), '../templates/latex'),
'/usr/lib/problemtools/templates/latex']
templatepath = None
for p in templatepaths:
if os.path.isdir(p) and os.path.isfile(os.path.join(p, templatefile)):
templatepath = p
break
if templatepath == None:
raise Exception('Could not find directory with latex template "%s"' % templatefile)
basedir = os.path.dirname(problemdir)
shortname = os.path.basename(problemdir)
samples = [os.path.splitext(os.path.basename(f))[0] for f in sorted(glob.glob(os.path.join(problemdir, 'data', 'sample', '*.in')))]
self.problemset_cls = os.path.join(basedir, 'problemset.cls')
if os.path.isfile(self.problemset_cls) and not force_copy_cls:
print('%s exists, will not copy it -- in case of weirdness this is likely culprit' % self.problemset_cls)
self.copy_cls = False
if self.copy_cls:
shutil.copyfile(os.path.join(templatepath, clsfile), self.problemset_cls)
(templout, self.filename) = tempfile.mkstemp(suffix='.tex', dir=basedir)
templin = open(os.path.join(templatepath, templatefile))
for line in templin:
try:
out = line % locals()
os.write(templout, out)
except:
# This is a bit ugly I guess
for sample in samples:
out = line % locals()
os.write(templout, out)
os.close(templout)
templin.close()
def get_file_name(self):
assert os.path.isfile(self.filename)
return self.filename
def cleanup(self):
if self.problemset_cls is not None and self.copy_cls and os.path.isfile(self.problemset_cls):
os.remove(self.problemset_cls)
if self.filename is not None:
for f in glob.glob(os.path.splitext(self.filename)[0] + '.*'):
if os.path.isfile(f):
os.remove(f)
def __del__(self):
self.cleanup()
| StarcoderdataPython |
4810424 | <reponame>lundholmx/advent-of-code-2021
from collections import defaultdict
from itertools import pairwise
def parse_input(lines: list[str]) -> tuple[str, dict]:
template = lines[0]
rules = {}
for line in lines[2:]:
[a, b] = line.split(" -> ")
rules[a] = b
return template, rules
def calc(template: str, rules: dict[str, str], nsteps: int) -> int:
formula = defaultdict(int)
for a, b in pairwise(template):
formula[a + b] += 1
counter = defaultdict(int)
for c in template:
counter[c] += 1
for _ in range(nsteps):
next = defaultdict(int)
for p, count in formula.items():
r = rules[p]
next[p[0] + r] += count
next[r + p[1]] += count
counter[r] += count
formula = next
values = counter.values()
return max(values) - min(values)
if __name__ == "__main__":
with open("input.txt") as f:
template, rules = parse_input([l.strip() for l in f.readlines()])
print(f"part 1: {calc(template, rules, 10)}")
print(f"part 2: {calc(template, rules, 40)}")
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.