id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
3441271
|
from django.http import HttpResponse
from django.conf import settings
from django.urls import reverse
from falmer.auth.utils import create_magic_link_for_user
from falmer.auth.models import FalmerUser
from falmer.slack.models import SlackUser
from .utils import verify_slack_hook, get_slacker_instance
@verify_slack_hook
def open_falmer(request):
slack_user_id = request.POST.get('user_id')
slack = get_slacker_instance()
slack_profile = slack.users.profile.get(slack_user_id).body['profile']
slack_user_email = slack_profile['email']
try:
slack_account = SlackUser.objects.get(slack_user_id=slack_user_id)
user = slack_account.user
if slack_account.first_name != slack_profile.get('first_name', ''):
slack_account.first_name = slack_profile.get('first_name', '')
slack_account.save()
if slack_account.last_name != slack_profile.get('last_name', ''):
slack_account.last_name = slack_profile.get('last_name', '')
slack_account.save()
except SlackUser.DoesNotExist:
slack.chat.post_message(
'#falmer', '<@{new_slack_id}> opened Falmer for the first time!'.format(new_slack_id=slack_user_id)
)
try:
user = FalmerUser.objects.get(identifier=slack_user_email)
except FalmerUser.DoesNotExist:
user = FalmerUser.objects.create(
identifier=slack_user_email,
authority='IS',
)
SlackUser.objects.create(
user=user,
slack_user_id=slack_user_id,
first_name=slack_profile.get('first_name', ''),
last_name=slack_profile.get('last_name', '')
)
link = create_magic_link_for_user(user, '')
return HttpResponse('Here\'s a magic link to login: {}{}'.format(settings.PUBLIC_HOST, link))
|
StarcoderdataPython
|
3334254
|
<reponame>mohammadfayaj/Django-Pro-Eshop<filename>checkout/urls.py<gh_stars>0
from django.urls import path
from . import views
app_name = "checkout"
urlpatterns = [
path('address_info_/<int:id>/', views.check_out_view ,name='check-out'),
path('payment_option_/', views.payment_option ,name='payment-options'),
path ('Success_url/<int:id>/', views.success_page , name='success-page'),
path('get_delivery_charges/', views.get_ajax_data, name= 'get_ajax_data_load'),
path('export_pdf/<int:id>/', views.export_pdf, name= 'export_as_pdf'),
path('stripe/', views.stripe_payment_view ,name='stripe-payment'),
path ('cash_on_delivery/<int:id>/', views.cash_on_delivery , name='cash-on-process'),
path('bkash/<int:id>/',views.Bkash, name='bkash_pyment'),
path('paypal/', views.Paypal, name='paypal_payment'),
]
|
StarcoderdataPython
|
5058327
|
import json
import scrapy
from ..settings import *
from ..items import *
class XimaTargetsSpider(scrapy.Spider):
TARGET_CATES = [
'有声书',
'段子',
'情感生活',
'娱乐',
'影视',
'儿童',
'历史',
'商业财经',
'IT科技',
'个人成长',
'头条',
'二次元',
'旅游',
'汽车',
'人文'
]
target_cates_len = len(TARGET_CATES)
name = 'xima_targets'
cates = {}
my_target = TargetAlbums()
def start_requests(self):
with open(JSONS_DIR + '/' + CATE_INFO_STORE, 'r') as f:
self.cates = json.load(f)
# for c in self.cates['lv2'].keys():
for c in self.TARGET_CATES:
url = ALBUM_TARGETS + 'category=' + self.cates['lv2'][c]['pinyin']
yield scrapy.Request(url, callback=self.parse, meta={'cate': c})
def parse(self, response, **kwargs):
cate = response.meta['cate']
data = response.json()['data']
albums_list = [{
'albumId': album['albumId'],
'link': album['link'],
'playCount': album['playCount'],
'trackCount': album['trackCount'],} for album in data['albums']]
self.my_target.xima[cate] = albums_list
yield response.follow(ALBUM_RSS_FEED + str())
yield self.my_target
|
StarcoderdataPython
|
1882630
|
<filename>Collections-a-installer/community-general-2.4.0/scripts/inventory/landscape.py
#!/usr/bin/env python
# (c) 2015, <NAME> <<EMAIL>>
#
# This file is part of Ansible.
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# Dynamic inventory script which lets you use nodes discovered by Canonical's
# Landscape (http://www.ubuntu.com/management/landscape-features).
#
# Requires the `landscape_api` Python module
# See:
# - https://landscape.canonical.com/static/doc/api/api-client-package.html
# - https://landscape.canonical.com/static/doc/api/python-api.html
#
# Environment variables
# ---------------------
# - `LANDSCAPE_API_URI`
# - `LANDSCAPE_API_KEY`
# - `LANDSCAPE_API_SECRET`
# - `LANDSCAPE_API_SSL_CA_FILE` (optional)
import argparse
import collections
import os
import sys
from landscape_api.base import API, HTTPError
import json
_key = 'landscape'
class EnvironmentConfig(object):
uri = os.getenv('LANDSCAPE_API_URI')
access_key = os.getenv('LANDSCAPE_API_KEY')
secret_key = os.getenv('LANDSCAPE_API_SECRET')
ssl_ca_file = os.getenv('LANDSCAPE_API_SSL_CA_FILE')
def _landscape_client():
env = EnvironmentConfig()
return API(
uri=env.uri,
access_key=env.access_key,
secret_key=env.secret_key,
ssl_ca_file=env.ssl_ca_file)
def get_landscape_members_data():
return _landscape_client().get_computers()
def get_nodes(data):
return [node['hostname'] for node in data]
def get_groups(data):
groups = collections.defaultdict(list)
for node in data:
for value in node['tags']:
groups[value].append(node['hostname'])
return groups
def get_meta(data):
meta = {'hostvars': {}}
for node in data:
meta['hostvars'][node['hostname']] = {'tags': node['tags']}
return meta
def print_list():
data = get_landscape_members_data()
nodes = get_nodes(data)
groups = get_groups(data)
meta = get_meta(data)
inventory_data = {_key: nodes, '_meta': meta}
inventory_data.update(groups)
print(json.dumps(inventory_data))
def print_host(host):
data = get_landscape_members_data()
meta = get_meta(data)
print(json.dumps(meta['hostvars'][host]))
def get_args(args_list):
parser = argparse.ArgumentParser(
description='ansible inventory script reading from landscape cluster')
mutex_group = parser.add_mutually_exclusive_group(required=True)
help_list = 'list all hosts from landscape cluster'
mutex_group.add_argument('--list', action='store_true', help=help_list)
help_host = 'display variables for a host'
mutex_group.add_argument('--host', help=help_host)
return parser.parse_args(args_list)
def main(args_list):
args = get_args(args_list)
if args.list:
print_list()
if args.host:
print_host(args.host)
if __name__ == '__main__':
main(sys.argv[1:])
|
StarcoderdataPython
|
3296321
|
from guide.main import handler
true = True
false = False
response = {
"response": {"text": "Задаю простой вопрос...", "tts": "Задаю простой вопрос..."},
"version": "1.0",
"session_state": {"scene": "SimpleQuestion"},
}
REQUEST = {
"meta": {
"locale": "ru-RU",
"timezone": "UTC",
"client_id": "ru.yandex.searchplugin/7.16 (none none; android 4.4.2)",
"interfaces": {"screen": {}, "payments": {}, "account_linking": {}},
},
"session": {
"message_id": 2,
"session_id": "bc84469e-56d3-4828-8f86-219ddeac004f",
"skill_id": "03f46589-d3c4-43b1-b8b6-c8118b9ae151",
"new": false,
},
"request": {
"command": "простой",
"original_utterance": "Простой",
"nlu": {
"tokens": ["простой"],
"entities": [],
"intents": {
"game_question": {
"slots": {
"question_type": {
"type": "QuestionType",
"tokens": {"start": 0, "end": 1},
"value": "simple",
}
}
}
},
},
"markup": {"dangerous_context": false},
"type": "SimpleUtterance",
},
"state": {
"session": {"scene": "StartGame", "screen": "start_tour"},
"user": {},
"application": {},
},
"version": "1.0",
}
REQUEST_ANSWER = {
"meta": {
"locale": "ru-RU",
"timezone": "UTC",
"client_id": "ru.yandex.searchplugin/7.16 (none none; android 4.4.2)",
"interfaces": {
"screen": {},
"payments": {},
"account_linking": {},
"geolocation_sharing": {},
},
},
"session": {
"message_id": 15,
"session_id": "8f977edd-e362-4201-a5ee-738e7a3941c3",
"skill_id": "1f835d35-c640-4c36-b3bc-74ecaa0f71f1",
"user": {
"user_id": "5416FF55E3C40C32A49D45D68AA101F9AE1445387749DE5B7BEAAB9CD6557C1D"
},
"application": {
"application_id": "218AE790B7125C9F67E9E3234671E8861D9603BD2627726710B9EF8A1CE9748D"
},
"user_id": "218AE790B7125C9F67E9E3234671E8861D9603BD2627726710B9EF8A1CE9748D",
"new": false,
},
"request": {
"command": "7",
"original_utterance": "7",
"nlu": {
"tokens": ["7"],
"entities": [
{"type": "YANDEX.NUMBER", "tokens": {"start": 0, "end": 1}, "value": 7}
],
"intents": {},
},
"markup": {"dangerous_context": false},
"type": "SimpleUtterance",
},
"state": {
"session": {
"scene": "QuestionScene",
"question_id": "3",
"question_type": "simple",
},
"user": {},
"application": {},
},
"version": "1.0",
}
REQUEST_ACCEPT = {
"meta": {
"locale": "ru-RU",
"timezone": "UTC",
"client_id": "ru.yandex.searchplugin/7.16 (none none; android 4.4.2)",
"interfaces": {
"screen": {},
"payments": {},
"account_linking": {},
"geolocation_sharing": {},
},
},
"session": {
"message_id": 2,
"session_id": "0585cf18-518f-45e8-b353-e621e9fa05d9",
"skill_id": "1f835d35-c640-4c36-b3bc-74ecaa0f71f1",
"user": {
"user_id": "5416FF55E3C40C32A49D45D68AA101F9AE1445387749DE5B7BEAAB9CD6557C1D"
},
"application": {
"application_id": "218AE790B7125C9F67E9E3234671E8861D9603BD2627726710B9EF8A1CE9748D"
},
"user_id": "218AE790B7125C9F67E9E3234671E8861D9603BD2627726710B9EF8A1CE9748D",
"new": false,
},
"request": {
"command": "хорошо",
"original_utterance": "Хорошо",
"nlu": {
"tokens": ["хорошо"],
"entities": [],
"intents": {"YANDEX.CONFIRM": {"slots": {}}},
},
"markup": {"dangerous_context": false},
"type": "SimpleUtterance",
},
"state": {
"session": {"scene": "StartGame", "question_type": "simple"},
"user": {},
"application": {},
},
"version": "1.0",
}
def test_play_game():
response = handler(REQUEST, None)
assert response
assert "Задаю простой вопрос" in response["response"]["text"]
def test_answer_question():
response = handler(REQUEST_ANSWER, None)
assert response
assert (
response["session_state"]["question_type"] == "attention"
) # Сохранили тип вопроса
assert "Верно" in response["response"]["text"]
def test_accept():
response = handler(REQUEST_ACCEPT, None)
assert response
assert "Задаю простой вопрос" in response["response"]["text"]
|
StarcoderdataPython
|
4967344
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorThreads}.
"""
__metaclass__ = type
from weakref import ref
import gc, threading
from twisted.python.threadable import isInIOThread
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.python.threadpool import ThreadPool
from twisted.internet.interfaces import IReactorThreads
class ThreadTestsBuilder(ReactorBuilder):
"""
Builder for defining tests relating to L{IReactorThreads}.
"""
requiredInterfaces = (IReactorThreads,)
def test_getThreadPool(self):
"""
C{reactor.getThreadPool()} returns an instance of L{ThreadPool} which
starts when C{reactor.run()} is called and stops before it returns.
"""
state = []
reactor = self.buildReactor()
pool = reactor.getThreadPool()
self.assertIsInstance(pool, ThreadPool)
self.assertFalse(pool.started, "Pool should not start before reactor.run")
def f():
# Record the state for later assertions
state.append(pool.started)
state.append(pool.joined)
reactor.stop()
reactor.callWhenRunning(f)
self.runReactor(reactor, 2)
self.assertTrue(state[0], "Pool should start after reactor.run")
self.assertFalse(state[1], "Pool should not be joined before reactor.stop")
self.assertTrue(pool.joined, "Pool should be stopped after reactor.run returns")
def test_suggestThreadPoolSize(self):
"""
C{reactor.suggestThreadPoolSize()} sets the maximum size of the reactor
threadpool.
"""
reactor = self.buildReactor()
reactor.suggestThreadPoolSize(17)
pool = reactor.getThreadPool()
self.assertEqual(pool.max, 17)
def test_delayedCallFromThread(self):
"""
A function scheduled with L{IReactorThreads.callFromThread} invoked
from a delayed call is run immediately in the next reactor iteration.
When invoked from the reactor thread, previous implementations of
L{IReactorThreads.callFromThread} would skip the pipe/socket based wake
up step, assuming the reactor would wake up on its own. However, this
resulted in the reactor not noticing an insert into the thread queue at
the right time (in this case, after the thread queue has been processed
for that reactor iteration).
"""
reactor = self.buildReactor()
def threadCall():
reactor.stop()
# Set up the use of callFromThread being tested.
reactor.callLater(0, reactor.callFromThread, threadCall)
before = reactor.seconds()
self.runReactor(reactor, 60)
after = reactor.seconds()
# We specified a timeout of 60 seconds. The timeout code in runReactor
# probably won't actually work, though. If the reactor comes out of
# the event notification API just a little bit early, say after 59.9999
# seconds instead of after 60 seconds, then the queued thread call will
# get processed but the timeout delayed call runReactor sets up won't!
# Then the reactor will stop and runReactor will return without the
# timeout firing. As it turns out, select() and poll() are quite
# likely to return *slightly* earlier than we ask them to, so the
# timeout will rarely happen, even if callFromThread is broken. So,
# instead we'll measure the elapsed time and make sure it's something
# less than about half of the timeout we specified. This is heuristic.
# It assumes that select() won't ever return after 30 seconds when we
# asked it to timeout after 60 seconds. And of course like all
# time-based tests, it's slightly non-deterministic. If the OS doesn't
# schedule this process for 30 seconds, then the test might fail even
# if callFromThread is working.
self.assertTrue(after - before < 30)
def test_callFromThread(self):
"""
A function scheduled with L{IReactorThreads.callFromThread} invoked
from another thread is run in the reactor thread.
"""
reactor = self.buildReactor()
result = []
def threadCall():
result.append(threading.currentThread())
reactor.stop()
reactor.callLater(0, reactor.callInThread, reactor.callFromThread, threadCall)
self.runReactor(reactor, 5)
self.assertEqual(result, [threading.currentThread()])
def test_stopThreadPool(self):
"""
When the reactor stops, L{ReactorBase._stopThreadPool} drops the
reactor's direct reference to its internal threadpool and removes
the associated startup and shutdown triggers.
This is the case of the thread pool being created before the reactor
is run.
"""
reactor = self.buildReactor()
threadpool = ref(reactor.getThreadPool())
reactor.callWhenRunning(reactor.stop)
self.runReactor(reactor)
gc.collect()
self.assertIsNone(threadpool())
def test_stopThreadPoolWhenStartedAfterReactorRan(self):
"""
We must handle the case of shutting down the thread pool when it was
started after the reactor was run in a special way.
Some implementation background: The thread pool is started with
callWhenRunning, which only returns a system trigger ID when it is
invoked before the reactor is started.
This is the case of the thread pool being created after the reactor
is started.
"""
reactor = self.buildReactor()
threadPoolRefs = []
def acquireThreadPool():
threadPoolRefs.append(ref(reactor.getThreadPool()))
reactor.stop()
reactor.callWhenRunning(acquireThreadPool)
self.runReactor(reactor)
gc.collect()
self.assertIsNone(threadPoolRefs[0]())
def test_cleanUpThreadPoolEvenBeforeReactorIsRun(self):
"""
When the reactor has its shutdown event fired before it is run, the
thread pool is completely destroyed.
For what it's worth, the reason we support this behavior at all is
because Trial does this.
This is the case of the thread pool being created without the reactor
being started at al.
"""
reactor = self.buildReactor()
threadPoolRef = ref(reactor.getThreadPool())
reactor.fireSystemEvent("shutdown")
if reactor.__class__.__name__ == "AsyncioSelectorReactor":
self.assertIsNone(reactor.threadpool)
# ReactorBase.__init__ sets self.crash as a 'shutdown'
# event, which in turn calls stop on the underlying
# asyncio event loop, which in turn sets a _stopping
# attribute on it that's only unset after an iteration of
# the loop. Subsequent tests can only reuse the asyncio
# loop if it's allowed to run and unset that _stopping
# attribute.
self.runReactor(reactor)
else:
gc.collect()
self.assertIsNone(threadPoolRef())
def test_isInIOThread(self):
"""
The reactor registers itself as the I/O thread when it runs so that
L{twisted.python.threadable.isInIOThread} returns C{True} if it is
called in the thread the reactor is running in.
"""
results = []
reactor = self.buildReactor()
def check():
results.append(isInIOThread())
reactor.stop()
reactor.callWhenRunning(check)
self.runReactor(reactor)
self.assertEqual([True], results)
def test_isNotInIOThread(self):
"""
The reactor registers itself as the I/O thread when it runs so that
L{twisted.python.threadable.isInIOThread} returns C{False} if it is
called in a different thread than the reactor is running in.
"""
results = []
reactor = self.buildReactor()
def check():
results.append(isInIOThread())
reactor.callFromThread(reactor.stop)
reactor.callInThread(check)
self.runReactor(reactor)
self.assertEqual([False], results)
globals().update(ThreadTestsBuilder.makeTestCaseClasses())
|
StarcoderdataPython
|
324834
|
<reponame>KawashiroNitori/epicteller<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
from typing import List, Optional, Iterable, Dict
import base62
from sqlalchemy import select, and_, desc
from epicteller.core.model.message import Message, TextMessageContent, ImageMessageContent, DiceMessageContent, \
MessageContent
from epicteller.core.tables import table
from epicteller.core.util import ObjectDict
from epicteller.core.util.enum import MessageType
from epicteller.core.util.seq import get_id
def _format_message(result) -> Optional[Message]:
if not result:
return None
message_type = MessageType(result.type)
if message_type == MessageType.TEXT:
content = TextMessageContent.parse_obj(result.content)
elif message_type == MessageType.IMAGE:
content = ImageMessageContent.parse_obj(result.content)
elif message_type == MessageType.DICE:
content = DiceMessageContent.parse_obj(result.content)
else:
content = MessageContent()
message = Message(
id=result.id,
url_token=result.url_token,
campaign_id=result.campaign_id,
episode_id=result.episode_id,
character_id=result.character_id,
type=MessageType(result.type),
is_removed=bool(result.is_removed),
is_gm=bool(result.is_gm),
content=content,
created=result.created,
updated=result.updated,
)
return message
class MessageDAO:
t = table.message
select_clause = select([
t.c.id,
t.c.url_token,
t.c.campaign_id,
t.c.episode_id,
t.c.character_id,
t.c.type,
t.c.is_removed,
t.c.is_gm,
t.c.content,
t.c.created,
t.c.updated,
])
@classmethod
async def batch_get_message_by_id(cls, message_ids: Iterable[int]) -> Dict[int, Message]:
query = cls.select_clause.where(cls.t.c.id.in_(message_ids))
result = await table.execute(query)
rows = await result.fetchall()
return {row.id: _format_message(row) for row in rows}
@classmethod
async def batch_get_message_by_url_token(cls, url_tokens: Iterable[str]) -> Dict[str, Message]:
query = cls.select_clause.where(cls.t.c.url_token.in_(url_tokens))
result = await table.execute(query)
rows = await result.fetchall()
return {row.url_token: _format_message(row) for row in rows}
@classmethod
async def get_episode_latest_messages(cls, episode_id: int, limit: int) -> List[Message]:
query = cls.select_clause.where(and_(
cls.t.c.episode_id == episode_id,
cls.t.c.is_removed == 0,
)).order_by(desc(cls.t.c.id)).limit(limit)
results = await table.execute(query)
messages = [_format_message(result) for result in await results.fetchall()]
messages.reverse()
return messages
@classmethod
async def get_episode_messages_from_oldest(cls, episode_id: int, oldest: int, limit: int) -> List[Message]:
query = cls.select_clause.where(and_(
cls.t.c.episode_id == episode_id,
cls.t.c.is_removed == 0,
cls.t.c.id > oldest,
)).limit(limit)
results = await table.execute(query)
messages = [_format_message(result) for result in await results.fetchall()]
return messages
@classmethod
async def get_episode_messages_to_latest(cls, episode_id: int, latest: int, limit: int) -> List[Message]:
query = cls.select_clause.where(and_(
cls.t.c.episode_id == episode_id,
cls.t.c.is_removed == 0,
cls.t.c.id < latest,
)).order_by(desc(cls.t.c.id)).limit(limit)
results = await table.execute(query)
messages = [_format_message(result) for result in await results.fetchall()]
messages.reverse()
return messages
@classmethod
async def update_message(cls, message_id: int, **kwargs) -> None:
if 'updated' not in kwargs:
kwargs['updated'] = int(time.time())
query = cls.t.update().values(kwargs).where(cls.t.c.id == message_id)
await table.execute(query)
@classmethod
async def create_message(cls, campaign_id: int, episode_id: int, character_id: int, message_type: MessageType,
content: dict, is_gm: bool, created: Optional[int] = None) -> Message:
url_token = base62.encode(get_id())
if not created:
created = int(time.time())
values = ObjectDict(
url_token=url_token,
campaign_id=campaign_id,
episode_id=episode_id,
character_id=character_id,
type=int(message_type),
is_removed=0,
is_gm=int(is_gm),
content=content,
created=created,
updated=created,
)
query = cls.t.insert().values(values)
result = await table.execute(query)
values.id = result.lastrowid
message = _format_message(values)
return message
@classmethod
async def scan_messages(cls, start_id: int = 0, limit: int = 1000) -> List[Message]:
query = cls.select_clause.where(cls.t.c.id > start_id).limit(limit)
results = await table.execute(query)
messages = [_format_message(result) for result in await results.fetchall()]
return messages
|
StarcoderdataPython
|
6480281
|
import requests
import json
'''
BOOK PREFIX VALUE
genesis - gen, exodus - ex, leviticus - lev, numbers - num, deuteronomy - deu, joshua - joashua,
judges - judges, 1 sammuel - 1sam, 2 samuel - 2sam
'''
def get_book_prefix(book):
prefix =""
if (book == "genesis"): prefix = "gen"
elif (book == "exodus"): prefix = "ex"
elif (book == "leviticus"): prefix = "lev"
elif (book == "numbers"): prefix = "num"
elif (book == "deuteronomy"): prefix = "deu"
elif (book == "joshua"): prefix = "joshua"
elif (book == "1 samuel"): prefix = "1sam"
elif (book == "2 samuel"): prefix = "2sam"
elif (book == "ruth"): prefix = "ruth"
elif (book == "1 kings"): prefix ="1kings"
elif (book == "2 kings"): prefix ="2kings"
elif (book == "1 chronicles"): prefix ="1chronicles"
elif (book == "2 chronicles"): prefix ="2chronicles"
elif (book == "ezra"): prefix ="ezra"
elif (book == "nehemiah"): prefix ="nehemiah"
elif (book == "esther"): prefix ="esther"
elif (book == "job"): prefix ="job"
elif (book == "psalm"): prefix ="ps"
elif (book == "1 kings"): prefix ="1kings"
elif (book == "matthew"): prefix = "matthew"
elif (book == "john"): prefix = "jn"
elif (book == "mark"): prefix = "mark"
else:
prefix = "wrong input for book"
return prefix
def get_book(book):
url = f"https://getbible.net/json?passage={book}&raw=true"
response = requests.get(url)
data = json.loads(response.text)
chapters = data["book"]
print(f"There are {len(chapters)} chapters. Choose a chapter between 1 - {len(chapters)}")
def get_passage(book, chapter):
url = f"http://getbible.net/json?passage={book}{chapter}&raw=true"
print((url))
response = requests.get(url)
jdata = json.loads(response.text)
#print(response.status_code, response.headers)
chapter = jdata["chapter"]
for line in chapter.items():
linenum = line[0]
linedict = line[1]
verse = linedict["verse"]
print(linenum, verse)
def get_verse(book, chapter, verse):
url = f"http://getbible.net/json?passage={book}{chapter}:{verse}&raw=true"
print(url)
response = requests.get(url)
data = json.loads(response.text)
book = data["book"]
chapter = book[0]
chapter_verse = chapter["chapter"]
versej = chapter_verse[verse]
num = versej["verse_nr"]
v = versej["verse"]
print(f"{num} {v}")
def menu():
print("\nBIBLE MENU OPTIONS")
print("Press 1 - Search for a book")
print("Press 2 - Search for a passage")
print("Press 3 - Search for a verse")
choice = str(input("Option #: "))
if (choice == "1"):
book = str(input("What book would you like me to open? (eg. Genesis): "))
prefix = get_book_prefix(book.lower())
get_book(prefix)
chapter = str(input("What chapter would you like me to open?: "))
get_passage(prefix, chapter)
elif (choice == "2"):
book = str(input("Book (eg. Matthew): "))
prefix = get_book_prefix(book.lower())
chapter = str(input("What chapter do you want to look at?: "))
get_passage(prefix, chapter)
elif (choice == "3"):
book = str(input("Book: "))
prefix = get_book_prefix(book.lower())
chapter = str(input("Chapter: "))
verse = str(input("Verse num: "))
get_verse(prefix,chapter,verse)
else:
print("Invalid choice")
def terminate_loop():
answer = str(input("Continue (Y/N)?: "))
terminate = False
if (answer == 'Y' or answer == 'y'):
terminate = True
elif (answer == 'N' or answer == 'n'):
terminate = False
return terminate
def main():
#get_passage(get_book_prefix(book.lower()), chapter)
loop = True
try:
while(loop):
menu()
loop = terminate_loop()
print("Hope you enjoy the word. bless u!")
except KeyError:
print("Wrong spelling for inputs")
except ValueError:
print("Failed to decode json")
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1765125
|
<reponame>hayesla/sunpy-soar<filename>sunpy_soar/attrs.py<gh_stars>1-10
import warnings
import sunpy.net.attrs as a
from sunpy.net.attr import AttrAnd, AttrOr, AttrWalker, DataAttr, SimpleAttr
from sunpy.util.exceptions import SunpyDeprecationWarning
__all__ = ['Product']
class Product(SimpleAttr):
"""
The data product identifier to search for.
"""
class Identifier(Product):
"""
The data product identifier to search for.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"'a.soar.Identifier' is deprecated; use 'a.soar.Product' instead.",
SunpyDeprecationWarning)
super().__init__(*args, **kwargs)
walker = AttrWalker()
@walker.add_creator(AttrOr)
def create_or(wlk, tree):
"""
Creator for OR. Loops through the next level down in the tree and appends
the individual results to a list.
"""
results = []
for sub in tree.attrs:
results.append(wlk.create(sub))
return results
@walker.add_creator(AttrAnd, DataAttr)
def create_and(wlk, tree):
"""
Creator for And and other simple attributes. No walking needs to be done,
so simply call the applier function.
"""
result = []
wlk.apply(tree, result)
return [result]
@walker.add_applier(AttrAnd)
def apply_and(wlk, and_attr, params):
"""
Applier for And.
Parameters
----------
wlk : AttrWalker
and_attr : AttrAnd
The AND attribute being applied. The individual attributes being
AND'ed together are accesible with ``and_attr.attrs``.
params : list[str]
List of search parameters.
"""
for iattr in and_attr.attrs:
wlk.apply(iattr, params)
"""
Below are appliers for individual attributes.
The all convert the attribute object into a query string, that will eventually
be passed as a query to the SOAR server. They all have the signature:
Parameters
----------
wlk : AttrWalker
The attribute walker.
attr :
The attribute being applied.
params : list[str]
List of search parameters.
"""
@walker.add_applier(a.Time)
def _(wlk, attr, params):
start = attr.start.strftime('%Y-%m-%d+%H:%M:%S')
end = attr.end.strftime('%Y-%m-%d+%H:%M:%S')
params.append(f"begin_time>='{start}'+AND+begin_time<='{end}'")
@walker.add_applier(a.Level)
def _(wlk, attr, params):
level = int(attr.value)
params.append(f"level='L{level}'")
@walker.add_applier(a.Instrument)
def _(wlk, attr, params):
params.append(f"instrument='{attr.value}'")
@walker.add_applier(Product, Identifier)
def _(wlk, attr, params):
params.append(f"descriptor='{attr.value}'")
|
StarcoderdataPython
|
8101755
|
# -*- coding: utf-8 -*-
# Copyright 2021 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
"""
The Ntp_global parser templates file. This contains
a list of parser definitions and associated functions that
facilitates both facts gathering and native command generation for
the given network resource.
"""
import re
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.network_template import (
NetworkTemplate,
)
class Ntp_globalTemplate(NetworkTemplate):
def __init__(self, lines=None, module=None):
super(Ntp_globalTemplate, self).__init__(
lines=lines, tmplt=self, module=module
)
# fmt: off
PARSERS = [
{
"name": "peer",
"getval": re.compile(
r"""
^ntp\saccess-group
(\s(?P<ipv4>ipv4))?
(\s(?P<ipv6>ipv6))?
\speer
\s(?P<access_list>\S+)
(\s(?P<kod>kod))?
$""", re.VERBOSE),
"setval": "ntp access-group"
"{{ ' ipv4' if ipv4 is defined else '' }}"
"{{ ' ipv6' if ipv6 is defined else '' }}"
" peer "
"{{ access_list }}"
"{{ ' kod' if kod|d(False) else '' }}",
"result": {
"access_group": {
"peer": [
{
"access_list": "{{ access_list }}",
"kod": "{{ not not kod }}",
"ipv4": "{{ not not ipv4 }}",
"ipv6": "{{ not not ipv6 }}",
},
],
},
},
},
{
"name": "query_only",
"getval": re.compile(
r"""
^ntp\saccess-group
(\s(?P<ipv4>ipv4))?
(\s(?P<ipv6>ipv6))?
\squery-only
\s(?P<access_list>\S+)
(\s(?P<kod>kod))?
$""", re.VERBOSE),
"setval": "ntp access-group"
"{{ ' ipv4' if ipv4 is defined else '' }}"
"{{ ' ipv6' if ipv6 is defined else '' }}"
" query-only "
"{{ access_list }}"
"{{ ' kod' if kod|d(False) else '' }}",
"result": {
"access_group": {
"query_only": [
{
"access_list": "{{ access_list }}",
"kod": "{{ not not kod }}",
"ipv4": "{{ not not ipv4 }}",
"ipv6": "{{ not not ipv6 }}",
},
],
},
},
},
{
"name": "serve",
"getval": re.compile(
r"""
^ntp\saccess-group
(\s(?P<ipv4>ipv4))?
(\s(?P<ipv6>ipv6))?
\sserve
\s(?P<access_list>\S+)
(\s(?P<kod>kod))?
$""", re.VERBOSE),
"setval": "ntp access-group"
"{{ ' ipv4' if ipv4 is defined else '' }}"
"{{ ' ipv6' if ipv6 is defined else '' }}"
" serve "
"{{ access_list }}"
"{{ ' kod' if kod|d(False) else '' }}",
"result": {
"access_group": {
"serve": [
{
"access_list": "{{ access_list }}",
"kod": "{{ not not kod }}",
"ipv4": "{{ not not ipv4 }}",
"ipv6": "{{ not not ipv6 }}",
},
],
},
},
},
{
"name": "serve_only",
"getval": re.compile(
r"""
^ntp\saccess-group
(\s(?P<ipv4>ipv4))?
(\s(?P<ipv6>ipv6))?
\sserve-only
\s(?P<access_list>\S+)
(\s(?P<kod>kod))?
$""", re.VERBOSE),
"setval": "ntp access-group"
"{{ ' ipv4' if ipv4 is defined else '' }}"
"{{ ' ipv6' if ipv6 is defined else '' }}"
" serve-only "
"{{ access_list }}"
"{{ ' kod' if kod|d(False) else '' }}",
"result": {
"access_group": {
"serve_only": [
{
"access_list": "{{ access_list }}",
"kod": "{{ not not kod }}",
"ipv4": "{{ not not ipv4 }}",
"ipv6": "{{ not not ipv6 }}",
},
],
},
},
},
{
"name": "allow.control.rate_limit",
"getval": re.compile(
r"""
^ntp\sallow\smode\scontrol\s(?P<rate_limit>\d+)
$""", re.VERBOSE),
"setval": "ntp allow mode control {{ allow.control.rate_limit }}",
"result": {
"allow": {
"control": {
"rate_limit": "{{ rate_limit }}",
},
},
},
},
{
"name": "allow.private",
"getval": re.compile(
r"""
^ntp\sallow\smode\s(?P<private>private)
$""", re.VERBOSE),
"setval": "ntp allow mode private",
"result": {
"allow": {
"private": "{{ not not private }}",
},
},
},
{
"name": "authenticate",
"getval": re.compile(
r"""
^ntp\s(?P<authenticate>authenticate)
$""", re.VERBOSE),
"setval": "ntp authenticate",
"result": {
"authenticate": "{{ not not authenticate }}",
},
},
{
"name": "authentication_keys",
"getval": re.compile(
r"""
^ntp\sauthentication-key\s(?P<id>\d+)
\s(?P<algorithm>\S+)
\s(?P<key>\S+)
\s(?P<encryption>\d+)
$""", re.VERBOSE),
"setval": "ntp authentication-key {{ id }} {{ algorithm }} {{ key }} {{ encryption }}",
"result": {
"authentication_keys": [
{
"id": "{{ id }}",
"algorithm": "{{ algorithm }}",
"key": "{{ key }}",
"encryption": "{{ encryption }}",
},
],
},
},
{
"name": "broadcast_delay",
"getval": re.compile(
r"""
^ntp\sbroadcastdelay\s(?P<broadcast_delay>\d+)
$""", re.VERBOSE),
"setval": "ntp broadcastdelay {{ broadcast_delay }}",
"result": {
"broadcast_delay": "{{ broadcast_delay }}",
},
},
{
"name": "clock_period",
"getval": re.compile(
r"""
^ntp\sclock-period\s(?P<clock_period>\d+)
$""", re.VERBOSE),
"setval": "ntp clock-period {{ clock_period }}",
"result": {
"clock_period": "{{ clock_period }}",
},
},
{
"name": "logging",
"getval": re.compile(
r"""
^ntp\s(?P<logging>logging)
$""", re.VERBOSE),
"setval": "ntp logging",
"result": {
"logging": "{{ not not logging }}",
},
},
{
"name": "master.enabled",
"getval": re.compile(
r"""
^ntp\s(?P<master>master)
$""", re.VERBOSE),
"setval": "ntp master",
"result": {
"master": {
"enabled": "{{ not not master }}",
},
},
},
{
"name": "master.stratum",
"getval": re.compile(
r"""
^ntp\smaster\s(?P<stratum>\d+)
$""", re.VERBOSE),
"setval": "ntp master {{ master.stratum }}",
"result": {
"master": {
"stratum": "{{ stratum }}",
},
},
},
{
"name": "max_associations",
"getval": re.compile(
r"""
^ntp\smax-associations\s(?P<max_associations>\d+)
$""", re.VERBOSE),
"setval": "ntp max-associations {{ max_associations }}",
"result": {
"max_associations": "{{ max_associations }}",
},
},
{
"name": "max_distance",
"getval": re.compile(
r"""
^ntp\smaxdistance\s(?P<max_distance>\d+)
$""", re.VERBOSE),
"setval": "ntp maxdistance {{ max_distance }}",
"result": {
"max_distance": "{{ max_distance }}",
},
},
{
"name": "min_distance",
"getval": re.compile(
r"""
^ntp\smindistance\s(?P<min_distance>\d+)
$""", re.VERBOSE),
"setval": "ntp mindistance {{ min_distance }}",
"result": {
"min_distance": "{{ min_distance }}",
},
},
{
"name": "orphan",
"getval": re.compile(
r"""
^ntp\sorphan\s(?P<orphan>\d+)
$""", re.VERBOSE),
"setval": "ntp orphan {{ orphan }}",
"result": {
"orphan": "{{ orphan }}",
},
},
{
"name": "panic_update",
"getval": re.compile(
r"""
^ntp\spanic\s(?P<update>update)
$""", re.VERBOSE),
"setval": "ntp panic update",
"result": {
"panic_update": "{{ not not update }}",
},
},
{
"name": "passive",
"getval": re.compile(
r"""
^ntp\s(?P<passive>passive)
$""", re.VERBOSE),
"setval": "ntp passive",
"result": {
"passive": "{{ not not passive }}",
},
},
{
"name": "peers",
"getval": re.compile(
r"""
^ntp\speer
(\svrf\s(?P<vrf>\S+))?
(\s(?P<ipv4>ip))?
(\s(?P<ipv6>ipv6))?
\s(?P<peer>\S+)
(\s(?P<burst>burst))?
(\s(?P<iburst>iburst))?
(\skey\s(?P<key>\d+))?
(\sminpoll\s(?P<minpoll>\d+))?
(\smaxpoll\s(?P<maxpoll>\d+))?
(\s(?P<normal_sync>normal-sync))?
(\s(?P<prefer>prefer))?
(\ssource\s(?P<source>\S+))?
(\sversion\s(?P<version>\d+))?
$""", re.VERBOSE),
"setval": "ntp peer"
"{{ (' vrf ' + vrf) if vrf is defined else '' }}"
"{{ ' ip' if use_ipv4|d(False) else ''}}"
"{{ ' ipv6' if use_ipv6|d(False) else ''}}"
"{{ ( ' ' + peer ) if peer is defined else '' }}"
"{{ ' burst ' if burst|d(False) else ''}}"
"{{ ' iburst ' if iburst|d(False) else ''}}"
"{{ (' key ' + key_id|string) if key_id is defined else '' }}"
"{{ (' minpoll ' + minpoll|string) if minpoll is defined else '' }}"
"{{ (' maxpoll ' + maxpoll|string) if maxpoll is defined else '' }}"
"{{ ' normal-sync ' if normal_sync is defined else ''}}"
"{{ ' prefer' if prefer|d(False) else ''}}"
"{{ (' source ' + source) if source is defined else '' }}"
"{{ (' version ' + version|string) if version is defined else '' }}",
"result": {
"peers": [
{
"peer": "{{ peer }}",
"use_ipv4": "{{ not not ipv4 }}",
"use_ipv6": "{{ not not ipv6 }}",
"vrf": "{{ vrf }}",
"burst": "{{ not not burst }}",
"iburst": "{{ not not iburst }}",
"key_id": "{{ key }}",
"minpoll": "{{ minpoll }}",
"maxpoll": "{{ maxpoll }}",
"normal_sync": "{{ not not normal_sync }}",
"prefer": "{{ not not prefer }}",
"source": "{{ source }}",
"version": "{{ version }}",
},
],
},
},
{
"name": "servers",
"getval": re.compile(
r"""
^ntp\sserver
(\svrf\s(?P<vrf>\S+))?
(\s(?P<ipv4>ip))?
(\s(?P<ipv6>ipv6))?
\s(?P<server>\S+)
(\s(?P<burst>burst))?
(\s(?P<iburst>iburst))?
(\skey\s(?P<key>\d+))?
(\sminpoll\s(?P<minpoll>\d+))?
(\smaxpoll\s(?P<maxpoll>\d+))?
(\s(?P<normal_sync>normal-sync))?
(\s(?P<prefer>prefer))?
(\ssource\s(?P<source>\S+))?
(\sversion\s(?P<version>\d+))?
$""", re.VERBOSE),
"setval": "ntp server"
"{{ (' vrf ' + vrf) if vrf is defined else '' }}"
"{{ ' ip' if use_ipv4|d(False) else ''}}"
"{{ ' ipv6' if use_ipv6|d(False) else ''}}"
"{{ ( ' ' + server ) if server is defined else '' }}"
"{{ ' burst ' if burst|d(False) else ''}}"
"{{ ' iburst ' if iburst|d(False) else ''}}"
"{{ (' key ' + key_id|string) if key_id is defined else '' }}"
"{{ (' minpoll ' + minpoll|string) if minpoll is defined else '' }}"
"{{ (' maxpoll ' + maxpoll|string) if maxpoll is defined else '' }}"
"{{ ' normal-sync ' if normal_sync is defined else ''}}"
"{{ ' prefer' if prefer|d(False) else ''}}"
"{{ (' source ' + source) if source is defined else '' }}"
"{{ (' version ' + version|string) if version is defined else '' }}",
"result": {
"servers": [
{
"server": "{{ server }}",
"use_ipv4": "{{ not not ipv4 }}",
"use_ipv6": "{{ not not ipv6 }}",
"vrf": "{{ vrf }}",
"burst": "{{ not not burst }}",
"iburst": "{{ not not iburst }}",
"key_id": "{{ key }}",
"minpoll": "{{ minpoll }}",
"maxpoll": "{{ maxpoll }}",
"normal_sync": "{{ not not normal_sync }}",
"prefer": "{{ not not prefer }}",
"source": "{{ source }}",
"version": "{{ version }}",
},
],
},
},
{
"name": "source",
"getval": re.compile(
r"""
^ntp\ssource\s(?P<source>\S+)
$""", re.VERBOSE),
"setval": "ntp source {{ source }}",
"result": {
"source": "{{ source }}",
},
},
{
"name": "trusted_keys",
"getval": re.compile(
r"""
^ntp\strusted-key
\s((?P<range_start>\d+))
(\s\-\s)?
((?P<range_end>\d+))?
$""", re.VERBOSE),
"setval": "ntp trusted-key {{ range_start }}"
"{{ (' - ' + range_end|string) if range_end is defined else '' }}",
"result": {
"trusted_keys": [
{
"range_start": "{{ range_start }}",
"range_end": "{{ range_end }}",
},
],
},
},
{
"name": "update_calendar",
"getval": re.compile(
r"""
^ntp\s(?P<update_calendar>update-calendar)
$""", re.VERBOSE),
"setval": "ntp update-calendar",
"result": {
"update_calendar": "{{ not not update_calendar }}",
},
},
]
# fmt: on
|
StarcoderdataPython
|
8025624
|
import requests
to_predict_dict = {
"data": [
[4.8, 3, 1.4, 0.3],
[2, 1, 3.2, 1.1]
]
}
url = 'http://1172.16.31.10:8000/api'
r = requests.post(url, json=to_predict_dict)
print(r.json())
|
StarcoderdataPython
|
3261159
|
from setuptools import setup, find_packages
import os
version = '1.3.0' # the number version of the package is the same than less.js version
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
long_description = (
read('README.txt')
+ '\n' +
read('js', 'lesscss', 'test_less.txt')
+ '\n' +
read('CHANGES.txt'))
setup(
name='js.lesscss',
version=version,
description="Fanstatic packaging of less",
long_description=long_description,
classifiers=[],
keywords='',
author='<NAME>',
author_email='<EMAIL>',
license='BSD',
packages=find_packages(), namespace_packages=['js'],
include_package_data=True,
zip_safe=False,
install_requires=[
'fanstatic',
'setuptools',
],
entry_points={
'fanstatic.libraries': [
'less = js.lesscss:library',
],
'console_scripts': [
'jslessc = js.lesscss:main',
],
},
)
|
StarcoderdataPython
|
3237803
|
# str_repr_test.py
class foo:
def __repr__(self):
return "foo.repr"
def __str__(self):
return "foo.str"
f = foo()
print(f)
print("str", str(f))
print("repr", repr(f))
print("{}", f"{f}")
print("{!r}", f"{f!r}")
print("{!s}", f"{f!s}")
|
StarcoderdataPython
|
1929106
|
"""The tests for the uptime sensor platform."""
import asyncio
from datetime import timedelta
import unittest
from unittest.mock import patch
from homeassistant.components.uptime.sensor import UptimeSensor
from homeassistant.setup import setup_component
from tests.common import get_test_home_assistant
class TestUptimeSensor(unittest.TestCase):
"""Test the uptime sensor."""
def setUp(self):
"""Set up things to run when tests begin."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_uptime_min_config(self):
"""Test minimum uptime configuration."""
config = {"sensor": {"platform": "uptime"}}
assert setup_component(self.hass, "sensor", config)
def test_uptime_sensor_name_change(self):
"""Test uptime sensor with different name."""
config = {"sensor": {"platform": "uptime", "name": "foobar"}}
assert setup_component(self.hass, "sensor", config)
def test_uptime_sensor_config_hours(self):
"""Test uptime sensor with hours defined in config."""
config = {"sensor": {"platform": "uptime", "unit_of_measurement": "hours"}}
assert setup_component(self.hass, "sensor", config)
def test_uptime_sensor_config_minutes(self):
"""Test uptime sensor with minutes defined in config."""
config = {"sensor": {"platform": "uptime", "unit_of_measurement": "minutes"}}
assert setup_component(self.hass, "sensor", config)
def test_uptime_sensor_days_output(self):
"""Test uptime sensor output data."""
sensor = UptimeSensor("test", "days")
assert sensor.unit_of_measurement == "days"
new_time = sensor.initial + timedelta(days=1)
with patch("homeassistant.util.dt.now", return_value=new_time):
asyncio.run_coroutine_threadsafe(
sensor.async_update(), self.hass.loop
).result()
assert sensor.state == 1.00
new_time = sensor.initial + timedelta(days=111.499)
with patch("homeassistant.util.dt.now", return_value=new_time):
asyncio.run_coroutine_threadsafe(
sensor.async_update(), self.hass.loop
).result()
assert sensor.state == 111.50
def test_uptime_sensor_hours_output(self):
"""Test uptime sensor output data."""
sensor = UptimeSensor("test", "hours")
assert sensor.unit_of_measurement == "hours"
new_time = sensor.initial + timedelta(hours=16)
with patch("homeassistant.util.dt.now", return_value=new_time):
asyncio.run_coroutine_threadsafe(
sensor.async_update(), self.hass.loop
).result()
assert sensor.state == 16.00
new_time = sensor.initial + timedelta(hours=72.499)
with patch("homeassistant.util.dt.now", return_value=new_time):
asyncio.run_coroutine_threadsafe(
sensor.async_update(), self.hass.loop
).result()
assert sensor.state == 72.50
def test_uptime_sensor_minutes_output(self):
"""Test uptime sensor output data."""
sensor = UptimeSensor("test", "minutes")
assert sensor.unit_of_measurement == "minutes"
new_time = sensor.initial + timedelta(minutes=16)
with patch("homeassistant.util.dt.now", return_value=new_time):
asyncio.run_coroutine_threadsafe(
sensor.async_update(), self.hass.loop
).result()
assert sensor.state == 16.00
new_time = sensor.initial + timedelta(minutes=12.499)
with patch("homeassistant.util.dt.now", return_value=new_time):
asyncio.run_coroutine_threadsafe(
sensor.async_update(), self.hass.loop
).result()
assert sensor.state == 12.50
|
StarcoderdataPython
|
264846
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# %% Borrowed utils from here: https://github.com/pkmital/tensorflow_tutorials/
#import tensorflow as tf
import numpy as np
import csv
def conv2d(x, n_filters,
k_h=5, k_w=5,
stride_h=2, stride_w=2,
stddev=0.02,
activation=lambda x: x,
bias=True,
padding='SAME',
name="Conv2D"):
"""2D Convolution with options for kernel size, stride, and init deviation.
Parameters
----------
x : Tensor
Input tensor to convolve.
n_filters : int
Number of filters to apply.
k_h : int, optional
Kernel height.
k_w : int, optional
Kernel width.
stride_h : int, optional
Stride in rows.
stride_w : int, optional
Stride in cols.
stddev : float, optional
Initialization's standard deviation.
activation : arguments, optional
Function which applies a nonlinearity
padding : str, optional
'SAME' or 'VALID'
name : str, optional
Variable scope to use.
Returns
-------
x : Tensor
Convolved input.
"""
with tf.variable_scope(name):
w = tf.get_variable(
'w', [k_h, k_w, x.get_shape()[-1], n_filters],
initializer=tf.truncated_normal_initializer(stddev=stddev))
conv = tf.nn.conv2d(
x, w, strides=[1, stride_h, stride_w, 1], padding=padding)
if bias:
b = tf.get_variable(
'b', [n_filters],
initializer=tf.truncated_normal_initializer(stddev=stddev))
conv = conv + b
return conv
def linear(x, n_units, scope=None, stddev=0.02,
activation=lambda x: x):
"""Fully-connected network.
Parameters
----------
x : Tensor
Input tensor to the network.
n_units : int
Number of units to connect to.
scope : str, optional
Variable scope to use.
stddev : float, optional
Initialization's standard deviation.
activation : arguments, optional
Function which applies a nonlinearity
Returns
-------
x : Tensor
Fully-connected output.
"""
shape = x.get_shape().as_list()
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable("Matrix", [shape[1], n_units], tf.float32,
tf.random_normal_initializer(stddev=stddev))
return activation(tf.matmul(x, matrix))
# %%
def weight_variable(shape):
'''Helper function to create a weight variable initialized with
a normal distribution
Parameters
----------
shape : list
Size of weight variable
'''
#initial = tf.random_normal(shape, mean=0.0, stddev=0.01)
initial = tf.zeros(shape)
return tf.Variable(initial)
# %%
def bias_variable(shape):
'''Helper function to create a bias variable initialized with
a constant value.
Parameters
----------
shape : list
Size of weight variable
'''
initial = tf.random_normal(shape, mean=0.0, stddev=0.01)
return tf.Variable(initial)
# %%
def dense_to_one_hot(labels, n_classes=2):
"""Convert class labels from scalars to one-hot vectors."""
labels = np.array(labels).astype('int32')
n_labels = labels.shape[0]
index_offset = (np.arange(n_labels) * n_classes).astype('int32')
labels_one_hot = np.zeros((n_labels, n_classes), dtype=np.float32)
labels_one_hot.flat[index_offset + labels.ravel()] = 1
return labels_one_hot
def prepare_trainVal_img_list(img_list, num_subjs):
#num_imgs_per_subj =np.zeros([num_subjs])
id_label_list = []
for row in img_list:
id_label = int(row[8])
#num_imgs_per_subj[id_label] += 1
id_label_list.append(id_label)
id_label_list = np.asarray(id_label_list)
id_label_list = np.reshape(id_label_list, [-1])
train_indices_list = []
valid_indices_list= []
eval_train_indices_list = []
eval_valid_indices_list = []
for i in range(num_subjs):
print(i)
curr_subj_idx = np.nonzero(id_label_list == i)[0]
tmp = np.random.permutation(curr_subj_idx)
per80 = np.floor(len(curr_subj_idx) * 0.8)
t_inds = tmp[0:per80]
v_inds = tmp[per80:]
train_indices_list.append(t_inds)
valid_indices_list.append(v_inds)
eval_train_indices_list.append(t_inds[0])
eval_valid_indices_list.append(v_inds[0])
train_indices_list = np.asarray(train_indices_list)
valid_indices_list = np.asarray(valid_indices_list)
eval_train_indices_list = np.asarray(eval_train_indices_list)
eval_valid_indices_list = np.asarray(eval_valid_indices_list)
#print(train_indices_list, train_indices_list.shape
train_indices_list = np.hstack(train_indices_list).astype('int')
valid_indices_list = np.hstack(valid_indices_list).astype('int')
eval_train_indices_list = np.hstack(eval_train_indices_list).astype('int')
eval_valid_indices_list = np.hstack(eval_valid_indices_list).astype('int')
print(train_indices_list.shape, valid_indices_list.shape, eval_train_indices_list.shape, eval_valid_indices_list.shape)
img_list = np.asarray(img_list)
print(img_list.shape)
train_list = img_list[train_indices_list]
valid_list = img_list[valid_indices_list]
eval_train_list = img_list[eval_train_indices_list]
eval_valid_list = img_list[eval_valid_indices_list]
np.savez("Oxford_trainVal_data_3DSTN.npz", train_list=train_list, valid_list=valid_list, eval_train_list=eval_train_list, eval_valid_list=eval_valid_list)
def select_eval_img_list(img_list, num_subjs, save_file_name):
# number of validation subjects
id_label_list = []
for row in img_list:
id_label = int(row[8])
id_label_list.append(id_label)
id_label_list = np.asarray(id_label_list)
id_label_list = np.reshape(id_label_list, [-1])
eval_indices_list = []
for i in range(num_subjs):
print(i)
curr_subj_idx = np.nonzero(id_label_list == i)[0]
tmp = np.random.permutation(curr_subj_idx)
inds = tmp[0:min(5, len(curr_subj_idx))]
eval_indices_list.append(inds)
eval_indices_list = np.asarray(eval_indices_list)
eval_indices_list = np.hstack(eval_indices_list).astype('int')
print(eval_indices_list.shape)
img_list = np.asarray(img_list)
print(img_list.shape)
eval_list = img_list[eval_indices_list]
np.savez(save_file_name, eval_list=eval_list)
"""
# Record the number of images per subject
num_imgs_per_subj =np.zeros([num_subjs])
for row in valid_img_list:
id_label = int(row[8])
num_imgs_per_subj[id_label] += 1
hist_subj = np.zeros([num_subjs])
idx = 0
count = 0
for row in valid_img_list:
count += 1
print(count
image_key = row[0]
image_path = row[1]
id_label = int(row[8])
if idx >= num_subjs:
break
if hist_subj[idx] < min(1, num_imgs_per_subj[idx]):
if id_label == idx:
with open(save_file_name, "a") as f:
f.write(image_key + "," + image_path + "," + row[2] + "," + row[3] + "," + row[4] + "," + row[5] + "," + row[6] + "," + row[7] + "," + str(id_label) + "\n")
hist_subj[idx] += 1
else:
idx += 1
"""
def input_processing(images, pose_labels, id_labels, train_mean_vec, mean_labels, std_labels, num_imgs, image_size, num_classes):
images = images.reshape([num_imgs, image_size, image_size, 3])
pose_labels = pose_labels.reshape([num_imgs, 6])
id_labels = id_labels.reshape([num_imgs, 1])
id_labels = dense_to_one_hot(id_labels, num_classes)
# Subtract train image mean
images = images / 255.
train_mean_mat = train_mean_vec2mat(train_mean_vec, images)
normalized_images = images - train_mean_mat
# Normalize labels
normalized_pose_labels = (pose_labels - mean_labels) / (std_labels + 0.000000000000000001)
return normalized_images, normalized_pose_labels, id_labels
def train_mean_vec2mat(train_mean, images_array):
height = images_array.shape[1]
width = images_array.shape[2]
#batch = images_array.shape[0]
train_mean_R = np.matlib.repmat(train_mean[0],height,width)
train_mean_G = np.matlib.repmat(train_mean[1],height,width)
train_mean_B = np.matlib.repmat(train_mean[2],height,width)
R = np.reshape(train_mean_R, (height,width,1))
G = np.reshape(train_mean_G, (height,width,1))
B = np.reshape(train_mean_B, (height,width,1))
train_mean_image = np.append(R, G, axis=2)
train_mean_image = np.append(train_mean_image, B, axis=2)
return train_mean_image
def create_file_list(csv_file_path):
with open(csv_file_path, 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
csv_list = list(csvreader)
return csv_list
|
StarcoderdataPython
|
6484957
|
<filename>paas-ce/paas/paas/common/tests.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
from __future__ import unicode_literals
from django.conf import settings
from django.test import TestCase
import mock
import requests
from common.constants import LogoImgRelatedDirEnum
from common.http import _gen_header, http_delete, http_get, http_post
from common.utils import file_size_bytes_to_m, get_app_logo, should_update_logo
class CommonUtilsTestCase(TestCase):
def test_file_size_bytes_to_m(self):
size = None
self.assertEqual(size, file_size_bytes_to_m(size))
size = 0
self.assertEqual(size, file_size_bytes_to_m(0))
size = 1024 * 1024
self.assertEqual(1.0, file_size_bytes_to_m(size))
def test_get_app_logo(self):
app_code = 'bk_framework'
logo_name = '{}/{}.png'.format(LogoImgRelatedDirEnum.APP.value, app_code)
result = '{}{}'.format(settings.MEDIA_URL, logo_name)
self.assertEqual(result, get_app_logo(app_code))
app_code = "not_exists"
self.assertEqual("", get_app_logo(app_code))
def test_should_update_logo(self):
app_code = "test"
app_logo_name = "{}/{}.png".format(LogoImgRelatedDirEnum.APP.value, app_code)
ok, _ = should_update_logo(app_code, app_logo_name)
self.assertFalse(ok)
ok, logo_name = should_update_logo('test1', app_logo_name)
self.assertTrue(ok)
class CommonHttpTestCase(TestCase):
def _mock_response(self, status=200, content="CONTENT", json_data=None, raise_for_status=None):
"""
https://gist.github.com/evansde77/45467f5a7af84d2a2d34f3fcb357449c
since we typically test a bunch of different
requests calls for a service, we are going to do
a lot of mock responses, so its usually a good idea
to have a helper function that builds these things
"""
mock_resp = mock.Mock()
# mock raise_for_status call w/optional error
mock_resp.raise_for_status = mock.Mock()
if raise_for_status:
mock_resp.raise_for_status.side_effect = raise_for_status
# set status code and content
mock_resp.status_code = status
mock_resp.content = content
# add json data if provided
if json_data:
mock_resp.json = mock.Mock(
return_value=json_data
)
return mock_resp
@mock.patch('requests.get')
def test_http_get(self, mock_get):
# 200
mock_resp = self._mock_response(status=200)
mock_get.return_value = mock_resp
ok, data = http_get("http://not_exists.com/", data={})
self.assertTrue(ok)
# 200, with json
json_data = {"a": 1, "b": 2}
mock_resp = self._mock_response(status=200, json_data=json_data)
mock_get.return_value = mock_resp
ok, data = http_get("http://not_exists.com/", data={})
self.assertTrue(ok)
self.assertEqual(json_data, data)
# not 200
mock_resp = self._mock_response(status=400)
mock_get.return_value = mock_resp
ok, data = http_get("http://not_exists.com/", data={})
self.assertFalse(ok)
# timeout
# https://stackoverflow.com/questions/48723711/python-mock-requests-post-to-throw-exception
mock_get.side_effect = requests.exceptions.Timeout()
ok, data = http_get("http://not_exists.com/", data={})
self.assertFalse(ok)
@mock.patch('requests.post')
def test_http_post(self, mock_post):
# 200
mock_resp = self._mock_response(status=200)
mock_post.return_value = mock_resp
ok, data = http_post("http://not_exists.com/", data={})
self.assertTrue(ok)
# 200, with json
json_data = {"a": 1, "b": 2}
mock_resp = self._mock_response(status=200, json_data=json_data)
mock_post.return_value = mock_resp
ok, data = http_post("http://not_exists.com/", data={})
self.assertTrue(ok)
self.assertEqual(json_data, data)
@mock.patch('requests.delete')
def test_http_delete(self, mock_delete):
# 200
mock_resp = self._mock_response(status=200)
mock_delete.return_value = mock_resp
ok, data = http_delete("http://not_exists.com/", data={})
self.assertTrue(ok)
# 200, with json
json_data = {"a": 1, "b": 2}
mock_resp = self._mock_response(status=200, json_data=json_data)
mock_delete.return_value = mock_resp
ok, data = http_delete("http://not_exists.com/", data={})
self.assertTrue(ok)
self.assertEqual(json_data, data)
def test_default_header(self):
headers = {
"Content-Type": "application/json",
}
self.assertEqual(headers, _gen_header())
|
StarcoderdataPython
|
3406367
|
<gh_stars>1-10
# Generated by Django 4.0.2 on 2022-03-06 20:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("boardmanlab", "0008_alter_helpsession_is_inperson_and_more"),
]
operations = [
migrations.AlterField(
model_name="helpsession",
name="notes",
field=models.TextField(blank=True, max_length=1000, null=True),
),
migrations.AlterField(
model_name="helpsession",
name="remote_link",
field=models.TextField(blank=True, max_length=200, null=True),
),
]
|
StarcoderdataPython
|
4958267
|
def sumofpowersof2(n):
# say n is 2 : 2**0+2**1+2**2
return (1<<n )-1
print sumofpowersof2(4)
|
StarcoderdataPython
|
5030136
|
# high res ocean regions
import numpy as np
import xarray as xr
from xr_DataArrays import example_file, depth_lat_lon_names
from paths import path_samoc
from paths import file_ex_ocn_rect, file_RMASK_ocn, file_RMASK_ocn_rect, file_RMASK_ocn_low
ocn_file = example_file('ocn')
bll_AMO = (( 0, 60), (- 80, 0)) # (0-60N, 0-80W)
bll_N34 = (( -5, 5), (-170,-120)) # (5N-5S, 170W-120W)
bll_SOM = ((-50,-35), (- 50, 0)) # (50S-35S, 50W-0W) `SOM` region in Le Bars et al. (2016)
bll_SMV = ((-80,-50), (- 30, 80)) # (80S-50S, 30W-80E) `WGKP` region in Jüling et al. (2018)
bll_TPI1 = (( 25, 40), ( 140,-145)) # (25N-45N, 140E-145W)
bll_TPI2 = ((-10, 10), ( 170,- 90)) # (10S-10N, 170E-90W)
bll_TPI3 = ((-50,-15), ( 150,-160)) # (50S-15S, 150E-160W)
# 'ocn'
# AMO_area = {'nlat':slice(1081,1858), 'nlon':slice( 500,1100)} # North Atlantic (0-60N, 0-80W)
Drake_Passage = {'nlat':slice( 268, 513), 'nlon':410 }
DP_North = {'nlat':512 , 'nlon':410 }
global_ocean = {'nlat':slice( 0,2400), 'nlon':slice( 0,3600)} # global ocean
Nino12 = {'nlat':slice(1081,1181), 'nlon':slice( 200, 300)} # Niño 1+2 (0-10S, 90W-80W)
Nino34 = {'nlat':slice(1131,1232), 'nlon':slice(3000,3500)} # Niño 3.4 (5N-5S, 170W-120W)
sinking_area = {'nlat':slice( 283, 353), 'nlon':slice(1130,1210)}
SOM_ocn = {'nlat':slice( 603, 808), 'nlon':slice( 600,1100)} # (50S-35S, 0E-50W)
# TexT_area = {'nlat':slice( 427,1858) } # tropic + extratropics (60S-60N)
WGKP_area = {'nlat':slice( 0, 603), 'nlon':slice( 750,1900)}
WG_center = {'nlat':321 , 'nlon':877 } # [64.9S,337.8E]
# 'ocn_rect'
# SOM_area_rect = {'t_lat': slice(-50,-35), 't_lon': slice(0, 50)}
gl_ocean_rect = {'t_lat': slice(-80, 90), 't_lon': slice(0,360)}
SOM_rect = {'t_lat': slice(-50,-35), 't_lon': slice(310,360)}
# 'ocn_low' ! displaced dipole: not strictly lat-lon in NH
# AMO_area_low = {'nlat':slice( 187, 353), 'nlon':slice( 284,35)} # North Atlantic (0-60N, 0-80W)
Nino12_low = {'nlat':slice( 149, 187), 'nlon':slice( 275, 284)} # Niño 1+2 (0-10S, 90W-80W)
Nino34_low = {'nlat':slice( 168, 205), 'nlon':slice( 204, 248)} # Niño 3.4 (5N-5S, 170W-120W)
gl_ocean_low = {'nlat':slice( 0, 384), 'nlon':slice( 0, 320)}
SOM_low = {'nlat':slice( 55, 83), 'nlon':slice( -9, 36)}
# 'ocn_had'
SOM_had = {'latitude':slice( -35.5, -49.5), 'longitude':slice(-49.5, -.5)}
# 'atm'
Uwind_eq_Pa = {'lat':slice(-6,6), 'lon':slice(180,200)}
regions_dict = {-14: 'Caspian_Sea',
-13: 'Black_Sea',
# -1: 'no calculations',
# 0: 'continents',
0: 'Global_Ocean',
1: 'Southern_Ocean',
2: 'Pacific_Ocean',
3: 'Indian_Ocean',
4: 'Persian_Gulf',
5: 'Red_Sea',
6: 'Atlantic_Ocean',
7: 'Mediterranean',
8: 'Labrador_Sea',
9: 'Greenland_Sea',
10: 'Arctic_Ocean',
11: 'Hudson_Bay',
12: 'Baltic_Sea',
}
OSNAP = {'IC0':(-35.1, 59.2),
'IC1':(-33.7, 59.2),
'IC2':(-32.7, 59.0),
'IC3':(-31.95, 58.96),
'IC4':(-31.3, 58.9)
}
def boolean_mask(domain, mask_nr, rounded=False):
""" selects a region by number, returns xr DataArray """
assert domain in ['ocn', 'ocn_low', 'ocn_rect', 'ocn_had', 'ocn_ersst', 'ocn_cobe']
RMASK = xr.open_dataarray(f'{path_samoc}/grid/RMASK_{domain}.nc')
# created in regrid_tutorial.ipynb
MASK = RMASK.copy()
if mask_nr==0: # global ocean
MASK_np = np.where(RMASK>0, 1, 0)
else:
MASK_np = np.where(RMASK==mask_nr, 1, 0)
MASK.values = MASK_np
if rounded==True and 'TLAT' in MASK.coords and 'TLONG' in MASK.coords:
MASK['TLAT' ] = MASK['TLAT' ].round(decimals=2)
MASK['TLONG'] = MASK['TLONG'].round(decimals=2)
return MASK
def combine_mask(MASK, numbers):
"""
combines submasks of MASK into single boolean mask
input:
MASK .. xr DataArray with integer numbers for masks
numbers .. list of IDs of submasks to be combined
"""
assert len(numbers)>1
NEW_MASK = xr.where(MASK==numbers[0], 1, 0)
for number in numbers[1:]:
NEW_MASK = xr.where(MASK==number, 1, NEW_MASK)
return NEW_MASK
def Atlantic_mask(domain):
assert domain in ['ocn', 'ocn_low']
ds = xr.open_dataset(example_file(domain), decode_times=False)
ATLANTIC_MASK = combine_mask(ds.REGION_MASK, [6,8,9])
return ATLANTIC_MASK
def mask_box_in_region(domain, mask_nr, bounding_lats=None, bounding_lons=None):
""" boolean mask of region limited by bounding_lats/lons """
MASK = boolean_mask(domain=domain, mask_nr=mask_nr)
if domain=='ocn_rect':
lat, lon = 't_lat', 't_lon'
elif domain in ['ocn', 'ocn_low']:
lat, lon = 'TLAT', 'TLONG'
elif domain in ['ocn_had', 'ocn_ersst', 'ocn_cobe']:
lat, lon = 'latitude', 'longitude'
if bounding_lats!=None:
assert type(bounding_lats)==tuple
(latS, latN) = bounding_lats
assert latS<latN
MASK = MASK.where(MASK[lat]<latN, 0)
MASK = MASK.where(MASK[lat]>latS, 0)
if bounding_lons!=None:
assert type(bounding_lons)==tuple
(lonW, lonE) = bounding_lons
assert lonW!=lonE
if domain in ['ocn', 'ocn_low', 'ocn_rect', 'ocn_ersst', 'ocn_cobe']:
if lonW<0: lonW = lonW+360.
if lonE<0: lonE = lonE+360.
assert lonW>=0
assert lonW<=360
assert lonE>=0
assert lonE<=360
elif domain=='ocn_had':
if lonW>180: lonW = lonW-360.
if lonE>180: lonE = lonE-360.
if lonW<lonE:
MASK = MASK.where(MASK[lon]<lonE, 0)
MASK = MASK.where(MASK[lon]>lonW, 0)
else: # crossing grid boundary
MASK = MASK.where(MASK[lon]<lonE, 0) + MASK.where(MASK[lon]>lonW, 0)
return MASK
def AMO_mask(domain):
file = example_file(domain)
TLAT = xr.open_dataset(file, decode_times=False).TLAT
if domain=='ocn_low':
MASK = boolean_mask(domain, mask_nr=6)
elif domain=='ocn':
MASK = Atlantic_mask(domain)
MASK = np.where(TLAT> 0, MASK, 0)
MASK = np.where(TLAT<60, MASK, 0)
return MASK
def TexT_mask(domain):
file = example_file(domain)
TLAT = xr.open_dataset(file, decode_times=False).TLAT
MASK = boolean_mask(domain, mask_nr=0)
MASK = np.where(TLAT>-60, MASK, 0)
MASK = np.where(TLAT< 60, MASK, 0)
return MASK
def TPI_masks(domain, region_nr):
file = example_file(domain)
TLAT = xr.open_dataset(file, decode_times=False).TLAT
TLONG = xr.open_dataset(file, decode_times=False).TLONG
MASK = boolean_mask(domain, mask_nr=0)
if region_nr==1:
# (25N-45N, 140E-145W)
MASK = np.where(TLAT > 25, MASK, 0)
MASK = np.where(TLAT < 45, MASK, 0)
MASK = np.where(TLONG>140, MASK, 0)
MASK = np.where(TLONG>215, MASK, 0)
elif region_nr==2:
# (10S-10N, 170E-90W)
MASK = np.where(TLAT >-10, MASK, 0)
MASK = np.where(TLAT < 10, MASK, 0)
MASK = np.where(TLONG>170, MASK, 0)
MASK = np.where(TLONG>270, MASK, 0)
elif region_nr==3:
# (50S-15S, 150E-160W)
MASK = np.where(TLAT >-50, MASK, 0)
MASK = np.where(TLAT <-15, MASK, 0)
MASK = np.where(TLONG>150, MASK, 0)
MASK = np.where(TLONG>200, MASK, 0)
return MASK
def SST_index_bounds(name):
if name=='TPI1': bounds = (140,215, 25, 45)
elif name=='TPI2': bounds = (170,270,-10, 10)
elif name=='TPI3': bounds = (150,200,-50,-15)
elif name=='SOM' : bounds = (310,360,-50,-35)
elif name=='SMV' : bounds = (-30, 80,-80,-50)
elif name=='AMO' : bounds = (280,360, 0, 60)
elif name=='AMV' : bounds = (280,360, 0, 60)
elif name=='PDO' : bounds = (110,255, 20, 68)
elif name=='PMV' : bounds = (110,255, 20, 68)
elif name=='IPO' : bounds = (110,255,-38, 68)
elif name=='PMV_Eq' : bounds = (110,255,0, 68)
return bounds
|
StarcoderdataPython
|
273958
|
##########################################################################
#
# Copyright 2010 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
"""Generated an header, glproc.hpp, which does pretty much what GLEW does, but
covers all the functions we support.
"""
from __future__ import print_function
import sys
from dispatch import Dispatcher
import specs.stdapi as stdapi
from specs.gles12api import glesapi
from specs.eglapi import eglapi
def ResetFuncPtrsInAPI(api):
for func in api.functions:
print(' _'+func.name+'_ptr = NULL;')
def ResetGLFuncPtrs():
print('void ResetGLFuncPtrs()')
print('{')
ResetFuncPtrsInAPI(glesapi)
print('}')
if __name__ == '__main__':
# glClientSideBufferData is a fake api to update client-side memory
glesapi.delFunctionByName("glClientSideBufferData")
glesapi.delFunctionByName("glClientSideBufferSubData")
glesapi.delFunctionByName("glCreateClientSideBuffer")
glesapi.delFunctionByName("glDeleteClientSideBuffer")
glesapi.delFunctionByName("glCopyClientSideBuffer")
glesapi.delFunctionByName("glPatchClientSideBuffer")
# glGenGraphicBuffer_ARM, glGraphicBufferData_ARM and glDeleteGraphicBuffer are fake APIs to use GraphicBuffer on Android or to use DMA buffer on Linux
glesapi.delFunctionByName("glGenGraphicBuffer_ARM")
glesapi.delFunctionByName("glGraphicBufferData_ARM")
glesapi.delFunctionByName("glDeleteGraphicBuffer_ARM")
dispatcher = Dispatcher()
#############################################################
sys.stdout = open('eglproc_auto.hpp', 'w')
print('// Generated by', sys.argv[0])
print('#ifndef _DISPATCH_EGLPROC_HPP_')
print('#define _DISPATCH_EGLPROC_HPP_')
print()
print('#include "eglimports.hpp"')
print('#include "common/os.hpp"')
print()
print('#ifndef GLES_CALLCONVENTION')
print('#define GLES_CALLCONVENTION')
print('#endif')
print()
print('void * _getProcAddress(const char *procName);')
print('void ResetGLFuncPtrs();')
print()
dispatcher.dispatchApi(eglapi)
print()
dispatcher.dispatchApi(glesapi)
print()
print('#endif /* !_DISPATCH_EGLPROC_HPP_ */')
print()
#############################################################
sys.stdout = open('eglproc_auto.cpp', 'w')
print('// Generated by', sys.argv[0])
print('#include <dispatch/eglproc_auto.hpp>')
print()
dispatcher.defineFptrs(eglapi)
print()
dispatcher.defineFptrs(glesapi)
print()
ResetGLFuncPtrs()
print()
|
StarcoderdataPython
|
4813353
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.python.code_generator import CodeGenerator
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.ivy.bootstrapper import Bootstrapper
from pants.ivy.ivy import Ivy
from pants.util.dirutil import safe_mkdir
class PythonAntlrBuilder(CodeGenerator):
"""
Antlr builder.
"""
def run_antlrs(self, output_dir):
# TODO(<NAME>): graduate to a JvmToolTask and/or merge with the java code gen AntlrGen
# task.
args = [
'-dependency', 'org.antlr', 'antlr', self.target.antlr_version,
'-types', 'jar',
'-main', 'org.antlr.Tool',
'--', '-fo', output_dir
]
for source in self.target.sources_relative_to_buildroot():
abs_path = os.path.join(get_buildroot(), source)
args.append(abs_path)
try:
ivy = Bootstrapper.default_ivy()
ivy.execute(args=args) # TODO: Needs a workunit, when we have a context here.
except (Bootstrapper.Error, Ivy.Error) as e:
raise TaskError('ANTLR generation failed! {0}'.format(e))
def generate(self):
# Create the package structure.
path = self.sdist_root
package = ''
for module_name in self.target.module.split('.'):
path = os.path.join(path, module_name)
if package == '':
package = module_name
else:
package = package + '.' + module_name
safe_mkdir(path)
with open(os.path.join(path, '__init__.py'), 'w') as f:
if package != self.target.module: # Only write this in the non-leaf modules.
f.write("__import__('pkg_resources').declare_namespace(__name__)")
self.created_namespace_packages.add(package)
self.created_packages.add(package)
# autogenerate the python files that we bundle up
self.run_antlrs(path)
|
StarcoderdataPython
|
106861
|
from exp.experiment import Experiment
from exp.auctions import Auction
def test_experiment_generates_auctions():
experiment = Experiment()
assert experiment.auctions is not None
assert len(experiment.auctions) > 0
for aid, auction in experiment.auctions.items():
assert type(auction) is Auction
assert type(aid) is int
|
StarcoderdataPython
|
3591706
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
my_dict = {'insert_me': "Hello I'm from views.py"}
return render(request, 'first_app/index.html', context=my_dict)
def help(request):
help_dict = {'help_me': 'hello from views.py'}
return render(request, 'first_app/help.html', context=help_dict)
|
StarcoderdataPython
|
1949869
|
#!/usr/bin/env python2.7
# Forked from crowd-api
# 15/12/2017 version 1.0 by <NAME>
# - Add feature to update user metadata
# - Add feature to get all active users
import requests
import json
import random
import string
import logging
class client(object):
def __init__(self, **kwargs):
if 'api_url' not in kwargs:
raise ValueError("Crowd API URL must be given")
self.api_url = kwargs['api_url']
if 'app_name' not in kwargs:
raise ValueError("Crowd API application name must be given")
if 'app_password' not in kwargs:
raise ValueError("Crowd API application password must be given")
self.auth = (kwargs['app_name'], kwargs['app_password'])
def api_get(self, query):
req = requests.get(self.api_url + query, headers = {"Content-Type": "application/json", "Accept": "application/json"}, auth = self.auth)
return req
def api_post(self, query, data):
req = requests.post(self.api_url + query, headers = {"Content-Type": "application/json", "Accept": "application/json"}, auth = self.auth, data = json.dumps(data))
return req
def api_put(self, query, data):
req = requests.put(self.api_url + query, headers = {"Content-Type": "application/json", "Accept": "application/json"}, auth = self.auth, data = json.dumps(data))
return req
def get_user(self, **kwargs):
if "username" in kwargs:
if kwargs['username'] == 'all':
req = self.api_get("/search?entity-type=user&restriction=active%3Dtrue&max-results=10000")
return req.json()
else:
req = self.api_get("/user?username=" + kwargs['username'] + "&expand=attributes")
if req.status_code == 200:
return {"status": True, "user": req.json()}
if req.status_code == 404:
return {"status": False, "user": None}
else:
return {"status": False, "code": req.status_code, "reason": req.content}
else:
raise ValueError("Must pass username")
def update_user(self, **kwargs):
if "username" not in kwargs:
raise ValueError("Must pass username")
if "data" not in kwargs:
raise ValueError("Must pass data")
req = self.api_put("/user/?username=" + kwargs['username'], kwargs['data'])
print req.status_code
if req.status_code == 204:
return {"status": True}
else:
return {"status": False, "code": req.status_code, "reason": req.content}
def get_user_groups(self, **kwargs):
groups = []
if "username" not in kwargs:
raise ValueError("Must pass username")
req = self.api_get("/user/group/direct?username=" + kwargs['username'])
if req.status_code == 200:
for group in req.json()['groups']:
groups.append(group['name'])
return {"status": True, "groups": groups}
if req.status_code == 404:
return {"status": False, "groups": []}
else:
return {"status": False, "code": req.status_code, "reason": req.content}
def set_user_attribute(self, **kwargs):
if "username" not in kwargs:
raise ValueError("Must pass username")
if "attribute_name" not in kwargs:
raise ValueError("Must pass attribute_name")
if "attribute_value" not in kwargs:
raise ValueError("Must pass attribute_value")
else:
if not isinstance(kwargs['attribute_value'], list):
kwargs['attribute_value'] = [ kwargs['attribute_value'] ]
req = self.api_post("/user/attribute?username=" + kwargs['username'], {"attributes": [{"name": kwargs['attribute_name'], "values": kwargs['attribute_value']}]})
if req.status_code == 204:
return {"status": True}
else:
return {"status": False, "code": req.status_code, "reason": req.content}
def create_user(self, **kwargs):
user = {}
for k, v in kwargs.iteritems():
user[k.replace('_', '-')] = v
if 'password' not in kwargs:
user['password'] = {}
user['password']['value'] = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
req_password_change = True
else:
req_password_change = False
user['active'] = True
req = self.api_post("/user", user)
if req.status_code == 201:
# user should change the password at their next login
if req_password_change:
self.set_user_attribute(username = user['name'], attribute_name = "requiresPasswordChange", attribute_value = True)
return {"status": True, "password": user['password']['value']}
else:
return {"status": True}
else:
return {"status": False, "code": req.status_code, "reason": req.content}
def create_group(self, **kwargs):
req = self.api_post("/group", {"groupname": kwargs['name'], "type": "GROUP", "description": kwargs['description'], "active": True})
if req.status_code == 201:
return {"status": True}
else:
return {"status": False, "code": req.status_code, "reason": req.content}
def add_user_to_group(self, **kwargs):
req = self.api_post("/user/group/direct?username=" + kwargs['username'], {"name": kwargs['groupname']})
if req.status_code == 201:
return {"status": True}
else:
return {"status": False, "code": req.status_code, "reason": req.content}
|
StarcoderdataPython
|
4870378
|
#!/usr/bin/env python
from iris_sdk.models.base_resource import BaseData
from iris_sdk.models.maps.contact import ContactMap
class Contact(ContactMap, BaseData):
pass
|
StarcoderdataPython
|
385615
|
<reponame>Dheer08/Python-Projects
programming_dictonary = {
"Bug":"An error in program that prevents the program running as expected",
"Function":"A piece of code that you can call over and over again",
}
# Retreive
print(programming_dictonary["Bug"])
# Adding items
programming_dictonary["Loop"] = "The action of doing something again and again"
empty_list = []
empty_dictonary = {}
# wipe an existing dictonary - helpful in wiping out data of game
# programming_dictonary = {}
print(programming_dictonary.keys())
print(programming_dictonary.values())
# editing dictonary values
programming_dictonary["Bug"] = "An moth in your computer"
print(programming_dictonary)
# Loop through dictonary
for key in programming_dictonary:
print(key,end=" : ")
print(programming_dictonary[key])
|
StarcoderdataPython
|
12855633
|
<gh_stars>0
import pytest
from GraphModels.models.Sarah.model_agricultural_water import AgriculturalWaterNodes
from GraphModels.models.Sarah.model_freshwater_available import FreshwaterAvailableNodes
from GraphModels.models.Sarah.model_municipal_water import MunicipalWaterNodes
nodes_list = AgriculturalWaterNodes + FreshwaterAvailableNodes + MunicipalWaterNodes
computationnal_nodes = [node for node in nodes_list if 'computation' in node.keys()]
@pytest.mark.parametrize(('node'), nodes_list)
def test_node_minimal_keys(node):
assert set(['type', 'unit', 'id', 'name']) <= set(node.keys())
@pytest.mark.parametrize(('node'), computationnal_nodes)
def test_node_computationnal(node):
assert set(['formula', 'name']) == set(node['computation'].keys())
def test_inputs_computation():
inputs_computation = [val for sublist in [node['in'] for node in nodes_list if 'in' in node] for val in sublist]
node_ids = [node['id'] for node in nodes_list]
assert set(inputs_computation) <= set(node_ids)
|
StarcoderdataPython
|
6625395
|
#!/usr/local/subliminal/env/bin/python
from application.db import Session, Directory
from application.direct import Subliminal, scan, notify
import os
import sys
import argparse
class Scanner(object):
def __init__(self, directory_id):
self.directory_id = directory_id
self.session = Session()
def daemonize(self):
"""Do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0: # exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write('Fork #1 failed: %d (%s)\n' % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir('/')
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0: # exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write('Fork #2 failed: %d (%s)\n' % (e.errno, e.strerror))
sys.exit(1)
def start(self):
"""Start the application and daemonize"""
self.daemonize()
self.run()
def run(self):
directory = self.session.query(Directory).get(self.directory_id)
if not os.path.exists(directory.path):
return 0
s = Subliminal()
results = scan(directory.path, s.config, temp_cache=True)
if s.config['General']['dsm_notifications']:
notify('Downloaded %d subtitle(s) for %d video(s) in directory %s' % (sum([len(s) for s in results.itervalues()]), len(results), directory.name))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Directory scanner')
parser.add_argument('id', help='directory id to scan', metavar='ID')
args = parser.parse_args()
scanner = Scanner(args.id)
scanner.start()
|
StarcoderdataPython
|
3589348
|
<reponame>nikhiljain-413/Hacktoberfest2021_beginner
# AUTHOR: <NAME>
# Python3 Concept:String traversing
# GITHUB: https://github.com/AadityaKumra
#capitalize first letter of each word.
#input-nikhil jain
#output-Nikhil Jain
def Capatalize_string(a):
a = s.split(' ')
n = (' '.join(word.capitalize() for word in a))
return n
s = input()
final_string = Capatalize_string(s)
print(final_string)
|
StarcoderdataPython
|
11230494
|
# Generated by Django 2.2.7 on 2020-02-17 23:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('komax_app', '0003_auto_20200217_2235'),
]
operations = [
migrations.AlterField(
model_name='harnesschart',
name='wire_type',
field=models.CharField(max_length=256),
),
]
|
StarcoderdataPython
|
1864203
|
# Copyright 2016 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rclpy
from rclpy.node import Node
# import board
from std_msgs.msg import String
from std_msgs.msg import Int32MultiArray, Int16
from sensor_msgs.msg import Joy, Imu
import time
import argparse
import threading
import math
euler = [0.0, 0.0, 0.0]
class Robot(Node):
def __init__(
self,
simulation=False,
debug=False,
steer_gain=0.8,
speed_limit=0.3,
left_trim=-0,
right_trim=0,
*args,
**kwargs
):
super().__init__("robot")
self.steer_gain = steer_gain
self.speed_limit = speed_limit
self.left_trim = left_trim
self.right_trim = right_trim
self.simulation = simulation
self.debug = debug
self.joy_topic = self.create_subscription(Joy, "joy", self.joy_topic, 10)
self.joy_web = self.create_subscription(String, "joy_web", self.joy_web, 10)
self.imu_topic = self.create_subscription(Imu, "/imu", self.imu_topic, 10)
from simple_pid import PID
self.max_speed = 0.002
self.GpioPins_MA = [13, 11, 15, 12]
self.GpioPins_MB = [37, 33, 35, 16]
# Max spees is [-0.002,0.002], will need to divide teh PID value accorgingly
self.pid = PID(
1, 0, 0, output_limits=(-200, 200), setpoint=41, sample_time=0.08
)
# Initialize motors
if not self.simulation:
from .stepper_lib import BYJMotor
# Declare an named instance of class pass a name and type of motor
# type of motor(Nema) is case sensitive
self.motor_A = BYJMotor("motor_A", "Nema")
self.motor_B = BYJMotor("motor_B", "Nema")
self.get_logger().info("Motors initialized successfully")
def set_steer_gain(self, steer_gain):
self.steer_gain = steer_gain
def set_speed_limit(self, speed_limit):
self.speed_limit = speed_limit
def set_left_trim(self, left_trim):
self.left_trim = left_trim
def set_right_trim(self, right_trim):
self.right_trim = right_trim
def move(self, speed, steer):
speed_l = speed
speed_r = speed
if not self.simulation:
time.sleep(0.1)
t1 = threading.Thread(
target=self.motor_A.motor_run,
args=(self.GpioPins_MA, speed_l / 10000, 1, True, False, "full", 1),
)
# motor_A.motor_run(GpioPins_MA, s, 200, True, True, "full", 1)
t1.start()
t2 = threading.Thread(
target=self.motor_B.motor_run,
args=(self.GpioPins_MB, -speed_r / 10000, 1, True, False, "full", 1),
)
t2.start()
else:
self.get_logger().info(
"Simulation mode: "
+ str(round(speed_l / 10000, 7))
+ ","
+ str(round(speed_r, 2))
+ "angle: "
+ str(round(euler[0], 2))
+ ")"
)
def imu_topic(self, msg):
global euler
euler = euler_from_quaternion(
msg.orientation.x,
msg.orientation.y,
msg.orientation.z,
msg.orientation.w,
False,
1,
)
print(
"PID: "
+ str(round(self.pid(euler[0] / 10000), 7))
+ "\t: "
+ str(euler[0])
+ "\t: "
+ str(euler[1])
+ "\t: "
+ str(euler[2])
)
self.move(self.pid(euler[0]), 0)
def joy_topic(self, msg):
if self.debug:
self.get_logger().info(
"joy X: " + str(msg.axes[0]) + " Y: " + str(msg.axes[1])
)
self.move(msg.axes[1], -msg.axes[0])
# These are the buttons to do fine adjustments to the position of the robot
if msg.axes[7] != 0:
self.move(msg.axes[7] * self.speed_limit - self.left_trim, 0)
if msg.axes[6] != 0:
self.move(0, -(msg.axes[6] * self.speed_limit - self.left_trim))
if msg.buttons[2] == 1:
self.move(0, 0)
def joy_web(self, msg):
speed = msg.data.split(",")
self.move(speed[1], speed[0])
# print(msg.data)
def euler_from_quaternion(x, y, z, w, rad=False, approx=1):
"""
Convert a quaternion into euler angles (roll, pitch, yaw)
roll is rotation around x in radians (counterclockwise)
pitch is rotation around y in radians (counterclockwise)
yaw is rotation around z in radians (counterclockwise)
"""
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x * x + y * y)
roll_x = math.atan2(t0, t1)
t2 = +2.0 * (w * y - z * x)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
pitch_y = math.asin(t2)
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y * y + z * z)
yaw_z = math.atan2(t3, t4)
if not rad:
roll_x = round(math.degrees(roll_x), approx)
pitch_y = round(math.degrees(pitch_y), approx)
yaw_z = round(math.degrees(yaw_z), approx)
return roll_x, pitch_y, yaw_z # in radians
def main(args=None):
rclpy.init(args=args)
# Construct an argument parser
parser = argparse.ArgumentParser()
# Add arguments to the parser
parser.add_argument(
"-s",
"--simulation",
required=False,
action="store_true",
default=False,
help="Simulation mode: motors are not imported, a message is displayed to simuate the signal",
)
parser.add_argument(
"-d",
"--debug",
required=False,
action="store_true",
default=False,
help="Debug mode: robot will print extra log info in the console",
)
args = parser.parse_args()
robot = Robot(simulation=args.simulation, debug=args.debug)
print("Robot ready")
if args.debug:
print("Debug mode")
if args.simulation:
print("Simulation mode")
rclpy.spin(robot)
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
robot.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
11211691
|
<reponame>sakagarwal/python-aiplatform
# -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Callable, Dict
from werkzeug import Response
class BasePlugin(abc.ABC):
"""Base plugin for cloud training tools endpoints.
The plugins support registering http handlers to be used for
AI Platform training jobs.
"""
@staticmethod
@abc.abstractmethod
def setup() -> None:
"""Run any setup code for the plugin before webserver is launched."""
raise NotImplementedError
@staticmethod
@abc.abstractmethod
def can_initialize() -> bool:
"""Check whether a plugin is able to be initialized.
Used for checking if correct dependencies are installed, system requirements, etc.
Returns:
Bool indicating whether the plugin can be initialized.
"""
raise NotImplementedError
@staticmethod
@abc.abstractmethod
def post_setup_check() -> bool:
"""Check if after initialization, we need to use the plugin.
Example: Web server only needs to run for main node for training, others
just need to have 'setup()' run to start the rpc server.
Returns:
A boolean indicating whether post setup checks pass.
"""
raise NotImplementedError
@abc.abstractmethod
def get_routes(self) -> Dict[str, Callable[..., Response]]:
"""Get the mapping from path to handler.
This is the method in which plugins can assign different routes to
different handlers.
Returns:
A mapping from a route to a handler.
"""
raise NotImplementedError
|
StarcoderdataPython
|
9639423
|
import torch
import torch.nn as nn
def L2_loss(l1,l2):
loss = nn.MSELoss()
losses = loss(l1,l2)
return losses
def L1_loss(l1,l2):
loss = nn.L1Loss()
losses = loss(l1,l2)
return losses
def cosine(l1,l2):
loss = nn.CosineSimilarity()
losses = loss(l1,l2)
return losses
|
StarcoderdataPython
|
1968457
|
"""Switch for Cozytouch."""
import logging
from cozytouchpy.constant import DeviceType
from homeassistant.components.switch import SwitchEntity
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import CONF_COZYTOUCH_ACTUATOR, COORDINATOR, DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set the sensor platform."""
coordinator = hass.data[DOMAIN][config_entry.entry_id][COORDINATOR]
actuator = hass.data[DOMAIN][CONF_COZYTOUCH_ACTUATOR]
devices = []
for device in coordinator.data.devices.values():
if actuator == "all":
devices.append(CozytouchSwitch(device, coordinator))
elif actuator == "pass" and device.widget == DeviceType.PILOT_WIRE_INTERFACE:
devices.append(CozytouchSwitch(device, coordinator))
elif actuator == "i2g" and device.widget == DeviceType.HEATER:
devices.append(CozytouchSwitch(device, coordinator))
async_add_entities(devices)
class CozytouchSwitch(CoordinatorEntity, SwitchEntity):
"""Header switch (on/off)."""
def __init__(self, device, coordinator):
"""Initialize switch."""
self.heater = device
self.coordinator = coordinator
@property
def unique_id(self):
"""Return the unique id of this switch."""
return self.heater.id
@property
def name(self):
"""Return the display name of this switch."""
return "{heater}".format(heater=self.heater.name)
@property
def is_on(self):
"""Return true if switch is on."""
return self.coordinator.data.devices[self.unique_id].is_on
@property
def device_class(self):
"""Return the device class."""
return "heat"
@property
def device_info(self):
"""Return the device info."""
return {
"name": self.name,
"identifiers": {(DOMAIN, self.unique_id)},
"manufacturer": "Cozytouch",
"via_device": (DOMAIN, self.heater.data["placeOID"]),
}
async def async_turn_on(self, **kwargs) -> None:
"""Turn the entity on."""
await self.heater.turn_on()
await self.coordinator.async_request_refresh()
async def async_turn_off(self, **kwargs):
"""Turn the entity off."""
await self.heater.turn_off()
await self.coordinator.async_request_refresh()
|
StarcoderdataPython
|
8059873
|
<filename>my_slackclient.py
import subprocess
from slackclient import SlackClient
import requests
from requests.packages.urllib3.exceptions import InsecurePlatformWarning
from requests.packages.urllib3.exceptions import SNIMissingWarning
requests.packages.urllib3.disable_warnings(InsecurePlatformWarning)
requests.packages.urllib3.disable_warnings(SNIMissingWarning)
class my_slackclient(SlackClient):
def __init__(self, token):
super(my_slackclient, self).__init__(token)
@staticmethod
def _run_command(command):
p = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
return iter(p.stdout.readline, b'')
def post_message(self, channel, text):
return self.api_call('chat.postMessage', channel = channel, text = text, as_user = True, unfurl_links = False, link_names = True)
def update_message(self, channel, text, id):
return self.api_call('chat.update', channel = channel, ts = id, text = text, as_user = True, unfurl_links = False, link_names = True)
def delete_message(self, channel, ts):
return self.api_call('chat.delete', channel = channel, ts = ts, as_user = True)
def show_is_typing(self, channel):
self.server.send_to_websocket({'type': 'typing', 'channel': channel, 'id': 1})
def get_user(self, user):
try:
json_data = self.api_call('users.info', user = user)
if 'ok' in json_data and json_data['ok'] and 'user' in json_data and 'name' in json_data['user']:
return json_data['user']
except Exception as e:
pass
return user
def upload_file(self, channel, filename):
command = 'curl -F file=@' + filename + ' -F channels=' + channel + ' -F token=' + self.token + ' https://slack.com/api/files.upload'
self._run_command(command.split())
def upload_files(self, channel, filenames):
for filename in filenames:
command = 'curl -s -F file=@' + filename + ' -F channels=' + channel + ' -F token=' + self.token + ' https://slack.com/api/files.upload'
p = subprocess.Popen(command, shell=True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
p.communicate()
|
StarcoderdataPython
|
6427569
|
<gh_stars>10-100
import pytest
from wh_habitica import default
API_STATUS_UP = {default.JSON_STATUS: default.JSON_UP}
LOCAL_NAME = '<NAME>'
FACEBOOK_NAME = 'John Facebook'
FACEBOOK_ID = '1337'
GOOGLE_NAME = '<NAME>'
USER_EMAIL = '<EMAIL>'
API_USER = {
default.JSON_ID: 42,
default.JSON_AUTH: {
default.JSON_FACEBOOK: {},
default.JSON_GOOGLE: {},
default.JSON_LOCAL: {default.JSON_USERNAME: LOCAL_NAME, default.JSON_EMAIL: USER_EMAIL},
}
}
API_USER_FACEBOOK = {
default.JSON_ID: 42,
default.JSON_AUTH: {
default.JSON_LOCAL: {},
default.JSON_GOOGLE: {},
default.JSON_FACEBOOK: {
default.JSON_DISPLAY_NAME: FACEBOOK_NAME,
default.JSON_ID: FACEBOOK_ID,
}
}
}
API_USER_GOOGLE = {
default.JSON_ID: 42,
default.JSON_AUTH: {
default.JSON_LOCAL: {},
default.JSON_FACEBOOK: {},
default.JSON_GOOGLE: {
default.JSON_DISPLAY_NAME: GOOGLE_NAME,
default.JSON_GOOGLE_EMAILS: [{default.JSON_TYPE: u'account', default.JSON_VALUE: USER_EMAIL}],
}
}
}
API_USER_INVALID = {
default.JSON_ID: 42,
default.JSON_AUTH: {
default.JSON_FACEBOOK: {},
default.JSON_GOOGLE: {},
default.JSON_LOCAL: {},
'Invalid_Provider': {default.JSON_USERNAME: LOCAL_NAME, default.JSON_EMAIL: USER_EMAIL},
}
}
API_TASK = {default.JSON_DELTA: 1}
@pytest.fixture
def mock_habitica_api(monkeypatch):
monkeypatch.setattr('habitica.api.Habitica.status', lambda x: API_STATUS_UP, raising=False)
monkeypatch.setattr('habitica.api.Habitica.user', lambda x: API_USER, raising=False)
monkeypatch.setattr('wh_habitica.api.HabiticaApi.post_task', lambda x, task_id: API_TASK, raising=False)
@pytest.fixture
def mock_habitica_api_facebook(monkeypatch):
monkeypatch.setattr('habitica.api.Habitica.user', lambda x: API_USER_FACEBOOK, raising=False)
@pytest.fixture
def mock_habitica_api_google(monkeypatch):
monkeypatch.setattr('habitica.api.Habitica.user', lambda x: API_USER_GOOGLE, raising=False)
@pytest.fixture
def mock_habitica_api_invalid_provider(monkeypatch):
monkeypatch.setattr('habitica.api.Habitica.user', lambda x: API_USER_INVALID, raising=False)
|
StarcoderdataPython
|
6565310
|
import numpy as np
import json
def dump_beautiful_json(annotation, path: str):
def convert(o):
if isinstance(o, np.generic):
return o.item()
raise TypeError
# now write output to a file
json_file = open(path, "w")
# magic happens here to make it pretty-printed
json_file.write(json.dumps(annotation, indent=4, sort_keys=True, default=convert))
json_file.close()
|
StarcoderdataPython
|
3285357
|
<reponame>wzh99/GSL
from typing import Optional, List
from tvm import transform
import rule
from gsl import attr, pat, op, spec, Workload, Subst
from gsl.util import Timer
class AlgCmp:
def create_workload(self) -> Workload:
raise NotImplementedError()
def get_pass(self) -> Optional[transform.Pass]:
pass
def gsl_rules(self) -> List[Subst]:
raise NotImplementedError()
def run(self):
# Create workload
wl = self.create_workload()
# Apply pass
timer = Timer()
f_pass = self.get_pass()
if f_pass is not None:
timer.begin()
f_pass(wl.mod)
print(f'Built-in pass: {timer.end()} s')
# Apply GSL rules
Subst.profile = True
for idx, subst in enumerate(self.gsl_rules()):
print(f'Rule {idx + 1}')
for _ in range(15):
subst(wl, fold_params=False)
class InceptionV3(AlgCmp):
def create_workload(self) -> Workload:
from tvm.relay.testing import inception_v3
net, params = inception_v3.get_workload()
wl = Workload(net, params)
for subst in [
rule.lower_batch_norm(),
rule.conv_mul()
]:
wl = subst(wl)
return wl
def gsl_rules(self) -> List[Subst]:
conv_attrs = ['strides', 'padding', 'dilation', 'groups']
def parallel_two_conv(num_ops: int):
# Input
x = pat.Wildcard()
w1 = pat.Variable()
w2 = pat.Variable(shape=(None, None, w1.shape[2], w1.shape[3]))
# Source pattern
conv1 = op.Conv2D(x, w1, groups=1)
conv2 = op.Conv2D(x, w2, **pat.same_attr(conv1, conv_attrs))
src = [conv1, conv2]
biases = [pat.Variable(), pat.Variable()]
if num_ops >= 2:
src = [op.BiasAdd(y, b, axis=1) for y, b in zip(src, biases)]
if num_ops >= 3:
src = [op.ReLU(y) for y in src]
# Target pattern
w = op.Concatenate((w1, w2), axis=0)
y = op.Conv2D(x, w, **pat.same_attr(conv1, conv_attrs))
if num_ops >= 2:
y = op.BiasAdd(y, op.Concatenate(biases, axis=0))
if num_ops >= 3:
y = op.ReLU(y)
split = op.Split(y, indices_or_sections=(w1.shape[0],), axis=1)
tgt = [split[0], split[1]]
# Build substitution
return Subst(src, tgt)
def parallel_three_conv(num_ops: int):
# Input
x = pat.Wildcard()
w1 = pat.Variable()
w2 = pat.Variable(shape=(None, None, w1.shape[2], w1.shape[3]))
w3 = pat.Variable(shape=(None, None, w1.shape[2], w1.shape[3]))
# Source pattern
conv1 = op.Conv2D(x, w1, groups=1)
conv2 = op.Conv2D(x, w2, **pat.same_attr(conv1, conv_attrs))
conv3 = op.Conv2D(x, w3, **pat.same_attr(conv1, conv_attrs))
src = [conv1, conv2, conv3]
biases = [pat.Variable() for _ in range(3)]
if num_ops >= 2:
src = [op.BiasAdd(y, b, axis=1) for y, b in zip(src, biases)]
if num_ops >= 3:
src = [op.ReLU(y) for y in src]
# Target pattern
w = op.Concatenate((w1, w2, w3), axis=0)
y = op.Conv2D(x, w, **pat.same_attr(conv1, conv_attrs))
if num_ops >= 2:
y = op.BiasAdd(y, op.Concatenate(biases, axis=0))
if num_ops >= 3:
y = op.ReLU(y)
split = op.Split(y, indices_or_sections=(w1.shape[0], w1.shape[0] + w2.shape[0]),
axis=1)
tgt = [split[0], split[1], split[2]]
# Build substitution
return Subst(src, tgt)
def parallel_conv_variadic(num_ops: int):
# Input
x = pat.Wildcard()
w1 = pat.Variable()
w = pat.Variable(shape=(None, None, w1.shape[2], w1.shape[3]))
b1 = pat.Variable()
b = pat.Variable()
# Source pattern
conv1 = op.Conv2D(x, w1, groups=1)
conv = op.Conv2D(x, w, **pat.same_attr(conv1, conv_attrs))
templates = [conv, w]
first = [conv1, w1]
y, y1 = conv, conv1
if num_ops >= 2:
y1 = bias_add1 = op.BiasAdd(y1, b1, axis=1)
y = bias_add = op.BiasAdd(y, b, axis=1)
templates += [bias_add, b]
first += [bias_add1, b1]
if num_ops >= 3:
relu1 = op.ReLU(y1)
y = relu = op.ReLU(y)
templates += [relu]
first += [relu1]
src = pat.Variadic(y, templates=templates, first=first, min_len=2)
# Target pattern
i = attr.Symbol()
w_inst = src(w, i)
concat = op.Concatenate(
pat.Variadic(w_inst, templates=[w_inst], index=i, length=src.length), axis=0)
y = op.Conv2D(x, concat, **pat.same_attr(conv1, conv_attrs))
if num_ops >= 2:
i = attr.Symbol()
b_inst = src(b, i)
bias_add = op.Concatenate(pat.Variadic(b_inst, templates=[b_inst], index=i,
length=src.length), axis=0)
y = op.BiasAdd(y, bias_add, axis=1)
if num_ops >= 3:
y = op.ReLU(y)
split = op.Split(y, axis=1, indices_or_sections=attr.Variadic(
lambda j: attr.ReduceIndexed(attr.BinaryOp.ADD, lambda k: src(w, k).shape[0], j + 1),
length=src.length - 1))
i = attr.Symbol()
item = split[i]
tgt = pat.Variadic(item, templates=[item], index=i)
# Build substitution
return Subst(src, tgt)
return [
parallel_two_conv(3),
parallel_three_conv(3),
parallel_conv_variadic(3),
]
class Transformer(AlgCmp):
def create_workload(self) -> Workload:
from model import transformer
return transformer.get_workload(6, 64, 4, 128)
def gsl_rules(self) -> List[Subst]:
def parallel_three_dense():
# Input
x = pat.Wildcard()
w1 = pat.Variable()
w2 = pat.Variable()
w3 = pat.Variable()
weights = [w1, w2, w3]
# Source pattern
src = [op.Dense(x, w) for w in weights]
# Target pattern
dense = op.Dense(x, op.Concatenate(weights, axis=0))
split = op.Split(dense, indices_or_sections=(w1.shape[0], w1.shape[0] + w2.shape[0]),
axis=-1)
tgt = [split[0], split[1], split[2]]
# Build substitution
return Subst(src, tgt)
return [
rule.parallel_dense(),
parallel_three_dense(),
rule.parallel_dense_variadic(),
]
class NASNet(AlgCmp):
def create_workload(self) -> Workload:
from model import nasnet
model = nasnet.get_model(6)
return Workload.from_keras(model, {'input_1': nasnet.batch_shape_nchw})
def gsl_rules(self) -> List[Subst]:
def merge_three_element_wise():
# Input
x = pat.Wildcard()
# Source pattern
ew_op = pat.OpWithTrait(spec.OpTrait.ELEMENT_WISE)
ew1 = pat.Call(ew_op, x)
ew2 = pat.Call(ew_op, x)
ew3 = pat.Call(ew_op, x)
# Target pattern
ew = pat.Call(ew_op, x)
# Build substitution
return Subst([ew1, ew2, ew3], [ew, ew, ew])
return [
rule.merge_element_wise(),
merge_three_element_wise(),
rule.merge_element_wise_variadic(),
]
if __name__ == '__main__':
for cls in [
# InceptionV3,
# Transformer,
# NASNet,
]:
cls().run()
|
StarcoderdataPython
|
3494660
|
from sklearn.cluster import KMeans
from .kmeans_torch import kmeans_torch
import torch
import numpy as np
def localize_kmeans_sklearn(threshold=120, tol=1e-4):
def localize(image, prev_location):
n_flies = prev_location.shape[0]
fly_pixels = torch.nonzero(image < threshold).type(torch.float32)
fit = KMeans(n_clusters=n_flies, n_init=1, init=prev_location, tol=tol).fit(
fly_pixels
)
locations = torch.tensor(fit.cluster_centers_)
# we do tracking at the end, so we approximate the distance by summing over all the flies.
fly_space_distance = torch.linalg.norm(
locations.sum(axis=0) - prev_location.sum(axis=0)
)
return locations, fly_space_distance
return localize
def localize_kmeans_torch(threshold=120, tol=1e-4, device="cuda"):
def localize(image, prev_location):
fly_pixels = torch.nonzero(image < threshold).type(torch.float32)
locations, _ = kmeans(fly_pixels, prev_location)
# we do tracking at the end, so we approximate the distance by summing over all the flies.
fly_space_distance = torch.linalg.norm(
locations.sum(axis=0) - prev_location.sum(axis=0)
)
return locations, fly_space_distance
kmeans = kmeans_torch(tol=tol, device=device)
return localize
|
StarcoderdataPython
|
3526293
|
from unittest import TestCase
from funpy.fundict import FunDict
from logging_config_for_tests import logging_config
logging_config()
class TestFunDict(TestCase):
def test_fundict(self):
def filter_gr_1(k: str, v: str) -> bool:
return k != '3'
d = FunDict({1: 2, 2: 3, 3: 4})
res = (d
.map(lambda k, v: (k + 1, v))
.map(lambda k, v: (str(k), str(v)))
.print("message1")
.filter(lambda k, v: k != '2')
.filter(filter_gr_1)
.print("message2")
.to_list()
.py()
)
assert res == [('4', '4')]
|
StarcoderdataPython
|
5104170
|
from __future__ import unicode_literals
from django import forms
from django.views.generic import TemplateView, DetailView
from django.views.generic.edit import FormMixin
from django.http import HttpResponseRedirect
from multipageforms.forms.multiform import MultiForm
from multipageforms.forms.multipageform import MultiPageForm
from multipageforms.views.generic import AbstractFieldFileMapperMixin
from multipageforms.views.generic import ModelMapperMixin
from multipageforms.views.generic import UpdateMultiFormView
from multipageforms.views.generic import UpdateMultiPageFormView
from demo.demoapp.models import FormStorage, FileStorage, Person
class TextForm(forms.Form):
text = forms.CharField(required=True)
class IntegerForm(forms.Form):
integer = forms.IntegerField(required=True)
class BooleanForm(forms.Form):
boolean = forms.BooleanField(required=True)
class OptionalTextForm(forms.Form):
optional_text = forms.CharField(required=False)
class FileForm(forms.Form):
document = forms.FileField()
class OptionalFileForm(forms.Form):
optional_document = forms.FileField(required=False)
class OptionalForm(forms.Form):
optional_text = forms.CharField(required=False)
optional_document = forms.FileField(required=False)
class ChoiceForm(forms.Form):
CHOICES = (('choice1', 'choice1'), ('choice2', 'choice2'))
choice = forms.MultipleChoiceField(choices=CHOICES)
chance = forms.ChoiceField(choices=CHOICES)
class PersonForm(forms.ModelForm):
class Meta:
model = Person
fields = ('name',)
class DemoMultiForm(MultiForm):
slug = 'form1'
formclasses = (
TextForm, IntegerForm,
BooleanForm,
PersonForm,
ChoiceForm,
OptionalForm
)
class DemoMultiFormWithFiles(MultiForm):
slug = 'form1'
formclasses = (
TextForm, IntegerForm,
BooleanForm,
PersonForm,
FileForm, OptionalFileForm,
ChoiceForm,
OptionalForm
)
class IndexView(TemplateView):
template_name = 'demoapp/index.html'
class CreateMultiFormView(TemplateView):
_url = '/multiform/%i/'
def post(self, request, *args, **kwargs):
formstorage = FormStorage.objects.create(storage='{}')
url = self._url % (formstorage.pk)
return HttpResponseRedirect(url)
class CreateMultiFormWithFilesView(CreateMultiFormView):
_url = '/multiform-files/%i/'
class DemoFileMapperMixin(AbstractFieldFileMapperMixin):
filefield = 'storage'
filemodel = FileStorage
def get_files_from_field(self, fieldname):
return self.object.filestorage_set.filter(html_field_name=fieldname)
def upload_files_to_field(self, fileobj, field, instance=None):
try:
doc = self.filemodel.objects.get(form=instance, html_field_name=field)
doc.storage = fileobj
except self.filemodel.DoesNotExist:
doc = self.filemodel(storage=fileobj, html_field_name=field, form=instance)
doc.save()
return doc
class MultiFormView(UpdateMultiFormView):
template_name = 'demoapp/multiform.html'
model = FormStorage
datafield = 'storage'
form_class = DemoMultiForm
_url = '/multiform/%i/preview/'
def get_success_url(self):
obj = self.get_object()
return self._url % obj.pk
class MultiFormWithFilesView(DemoFileMapperMixin, MultiFormView):
form_class = DemoMultiFormWithFiles
_url = '/multiform-files/%i/preview/'
class PreviewMultiFormView(ModelMapperMixin, FormMixin, DetailView):
template_name = 'demoapp/preview_multiform.html'
model = FormStorage
filefield = 'storage'
filemodel = FileStorage
form_class = DemoMultiForm
datafield = 'storage'
def get_context_data(self, **kwargs):
kwargs = super(PreviewMultiFormView, self).get_context_data(**kwargs)
form_kwargs = self.get_form_kwargs()
kwargs['form'] = self.form_class(**form_kwargs)
return kwargs
class PreviewMultiFormWithFilesView(DemoFileMapperMixin, PreviewMultiFormView):
form_class = DemoMultiFormWithFiles
class DemoMultiPageForm(MultiPageForm):
class Page1MultiForm(MultiForm):
slug = 'page1'
formclasses = (TextForm, IntegerForm)
class Page2MultiForm(MultiForm):
slug = 'page2'
formclasses = (BooleanForm,)
class Page3MultiForm(MultiForm):
slug = 'page3'
formclasses = (PersonForm,)
class Page4MultiForm(MultiForm):
slug = 'page4'
formclasses = (ChoiceForm,)
class Page5MultiForm(MultiForm):
slug = 'page5'
formclasses = (OptionalForm,)
pages = (
Page1MultiForm,
Page2MultiForm,
Page3MultiForm,
Page4MultiForm,
Page5MultiForm,
)
class DemoMultiPageFormWithFiles(DemoMultiPageForm):
class Page1MultiForm(MultiForm):
slug = 'page1'
formclasses = (TextForm, IntegerForm)
class Page2MultiForm(MultiForm):
slug = 'page2'
formclasses = (BooleanForm,)
class Page3MultiForm(MultiForm):
slug = 'page3'
formclasses = (PersonForm,)
class Page4MultiForm(MultiForm):
slug = 'page4'
formclasses = (ChoiceForm,)
class Page5MultiForm(MultiForm):
slug = 'page5'
formclasses = (OptionalForm,)
class Page6MultiForm(MultiForm):
slug = 'page6'
formclasses = (FileForm, OptionalFileForm)
pages = (
Page1MultiForm,
Page2MultiForm,
Page3MultiForm,
Page4MultiForm,
Page5MultiForm,
Page6MultiForm,
)
class CreateMultiPageFormView(TemplateView):
form_class = DemoMultiPageForm
_url = '/multipageform/%i/%s'
def post(self, request, *args, **kwargs):
mpf = self.form_class()
first_page = mpf.first_page().slug
formstorage = FormStorage.objects.create(storage='{}')
url = self._url % (formstorage.pk, first_page)
return HttpResponseRedirect(url)
class CreateMultiPageFormWithFilesView(CreateMultiPageFormView):
_url = '/multipageform-files/%i/%s'
class MultiPageFormView(UpdateMultiPageFormView):
template_name = 'demoapp/multipageform.html'
model = FormStorage
form_class = DemoMultiPageForm
datafield = 'storage'
_url = '/multipageform/%i/%s/'
def get_success_url(self):
obj = self.get_object()
page = self.get_form_class()
return self._url % (obj.pk, page.slug)
class MultiPageFormWithFilesView(DemoFileMapperMixin, MultiPageFormView):
form_class = DemoMultiPageFormWithFiles
_url = '/multipageform-files/%i/%s/'
class PreviewMultiPageFormView(ModelMapperMixin, FormMixin, DetailView):
template_name = 'demoapp/preview_multipageform.html'
model = FormStorage
form_class = DemoMultiPageForm
datafield = 'storage'
def get_context_data(self, **kwargs):
kwargs = super(PreviewMultiPageFormView, self).get_context_data(**kwargs)
form_kwargs = self.get_form_kwargs()
kwargs['pages'] = self.form_class(**form_kwargs)
kwargs['pageslug'] = 'preview'
return kwargs
class PreviewMultiPageFormWithFilesView(DemoFileMapperMixin, PreviewMultiPageFormView):
form_class = DemoMultiPageFormWithFiles
|
StarcoderdataPython
|
1732234
|
<reponame>JasonLearning/dzdp_spider
class ValidationFailure(RuntimeError):
"""
Raised by :meth:`Model.validate()` when the value given for a particular
field is not valid.
:ivar field_name: The value of the field's ``name`` attribute.
:ivar description: A description of the failure.
"""
def __init__(self, field_name, description):
self.field_name = field_name
self.description = description
def __str__(self):
return "In field '%s': %s" % (self.field_name, self.description)
class UnknownAttribute(ValidationFailure):
"""
Raised by :meth:`Model.validate()` when ``allow_unknown_data`` is ``False``
and an unknown attribute is encountered.
Inherits from :class:`.ValidationFailure`.
"""
def __init__(self, attribute_name):
self.attribute_name = attribute_name
def __str__(self):
return "Unknown attribute '%s'." % (self.attribute_name, )
|
StarcoderdataPython
|
1932934
|
<filename>examples/example_deviceiocontrol/processes.py
from process_base import *
from targets import *
import subprocess
import os
class ProcessDeviceIo(ProcessBase):
def __init__(self, Controller, crashdump_folder, breakpoint_handler, pid, ph, unique_identifier, verbose, logger):
# Specific options
self.path_to_exe = b"C:\\Windows\\System32\\notepad.exe"
self.command_line = b"notepad.exe"
self.logger = logger
# Initialize
self.initialize(Controller, self.__class__.__name__, crashdump_folder, breakpoint_handler, pid, ph, unique_identifier, verbose)
def on_debugger_attached(self, Engine):
# Set the types
self.Engine = Engine
self.types = meddle_types(Engine)
# Add the targets
Engine.AddTarget(Target_Handles)
Engine.AddTarget(Target_DeviceIoControl)
# Handle process loaded
Engine.HandleProcessLoaded()
# Start an auto-it script
try:
subprocess.Popen(['autoit3.exe', os.path.join(os.path.dirname(__file__), "..", "autoit", "notepad_print.au3"), str(self.pid), ">nul"], shell=True)
except:
print "Warning: autoit3.exe not found on path. Please install it and add it to path to increase the attack surface."
# Resume the process that we created suspended. This is called just after the debugger has been attached.
if self.start_th >= 0:
windll.kernel32.ResumeThread(self.start_th);
def log_csv(self, fields):
self.logger.log_event(fields)
|
StarcoderdataPython
|
4828442
|
# Generated by Django 2.2.12 on 2020-05-29 12:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('video_pipeline', '0003_coursevideouploadsenabledbydefault_videouploadsenabledbydefault'),
]
operations = [
migrations.CreateModel(
name='VEMPipelineIntegration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('client_name', models.CharField(default='VEM-Prod', help_text='Oauth client name of VEM service.', max_length=100)),
('api_url', models.URLField(help_text='video encode manager API URL.', verbose_name='Internal API URL')),
('service_username', models.CharField(default='vem_service_user', help_text='Username created for VEM Integration, e.g. vem_service_user.', max_length=100)),
('changed_by', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL, verbose_name='Changed by')),
],
options={
'ordering': ('-change_date',),
'abstract': False,
},
),
]
|
StarcoderdataPython
|
5110816
|
<filename>synced_side_bar_watcher.py
import os
import sublime
import sublime_plugin
isNotSyncedSideBarEnabled = True
class SyncedSideBarRevealInSideBarCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.run_command ("reveal_in_side_bar")
def is_visible(self):
# print( 'isNotSyncedSideBarEnabled: %s, not not self.window.folders: %s' % ( isNotSyncedSideBarEnabled, not not self.window.folders() ) )
return isNotSyncedSideBarEnabled and not not self.window.folders()
def plugin_loaded():
global isNotSyncedSideBarEnabled
userSettings = sublime.load_settings('Preferences.sublime-settings')
packageSettings = sublime.load_settings('SyncedSideBar.sublime-settings')
def updateIsSyncedSideBarEnabled():
# print(' updateIsSyncedSideBarEnabled!!!!')
updateGlobalData( packageSettings, is_package_enabled( userSettings, "SyncedSideBar" ) )
def updateGlobalData( packageSettings, isEnabled ):
global isNotSyncedSideBarEnabled
if isEnabled:
isEnabled = packageSettings.get( "reveal-on-activate" )
isNotSyncedSideBarEnabled = not isEnabled
else:
isNotSyncedSideBarEnabled = True
# print( 'isNotSyncedSideBarEnabled: ' + str( isNotSyncedSideBarEnabled ) )
def read_pref_async():
# print('READ_PREF_ASYNC!!!!')
updateIsSyncedSideBarEnabled()
def read_user_preferences():
# print('READ_package_PREFERENCES!!!!')
userSettings = sublime.load_settings('Preferences.sublime-settings')
updateIsSyncedSideBarEnabled()
def read_package_preferences():
# print('READ_package_PREFERENCES!!!!')
packageSettings = sublime.load_settings('SyncedSideBar.sublime-settings')
updateIsSyncedSideBarEnabled()
# read initial setting, after all packages being loaded
sublime.set_timeout_async( read_pref_async, 10000 )
# listen for changes
packageSettings.add_on_change( "Preferences", read_user_preferences )
packageSettings.add_on_change( "SyncedSideBar", read_package_preferences )
# print( packageSettings.get( "reveal-on-activate" ) )
# print( userSettings.get( "ignored_packages" ) )
def is_package_enabled( userSettings, package_name ):
# print( "is_package_enabled = " + sublime.packages_path()
# + "/All Autocomplete/ is dir? " \
# + str( os.path.isdir( sublime.packages_path() + "/" + package_name ) ) )
# print( "is_package_enabled = " + sublime.installed_packages_path()
# + "/All Autocomplete.sublime-package is file? " \
# + str( os.path.isfile( sublime.installed_packages_path() + "/" + package_name + ".sublime-package" ) ) )
ignoredPackages = userSettings.get('ignored_packages')
if ignoredPackages is not None:
return ( os.path.isdir( sublime.packages_path() + "/" + package_name ) \
or os.path.isfile( sublime.installed_packages_path() + "/" + package_name + ".sublime-package" ) ) \
and not package_name in ignoredPackages
return os.path.isdir( sublime.packages_path() + "/" + package_name ) \
or os.path.isfile( sublime.installed_packages_path() + "/" + package_name + ".sublime-package" )
class SyncedSideBarToggleSideBarCommand(sublime_plugin.WindowCommand):
def run(self):
# if self.window.is_sidebar_visible():
# self.window.run_command ("toggle_side_bar")
# else:
self.window.run_command ("reveal_in_side_bar")
|
StarcoderdataPython
|
1784990
|
from socket import *
import threading
from threading import Thread
import tkinter
import pyaudio
import time
def Receive():
while True:
try:
msg = client_socket.recv(BuffferSize).decode("utf8")
if msg[0:12] == "{modifyList}":
setNameList(msg[12:])
else:
msg_list.insert(tkinter.END, msg)
except OSError:
break
def setNameList(ll):
online_list.delete('0','end')
name_list = ll.split(",")
for i in range(len(name_list)):
online_list.insert(tkinter.END, name_list[i])
return
def Receive_audio():
CHUNK = 1024
FORMAT = 8
CHANNELS = 2
RATE = 44100
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, output=True)
Break = False
first_time_end = threading.local()
first_time_end = True
while True:
try:
data = audio_socket.recv(CHUNK)
except:
p.terminate()
sn_socket.close()
audio_socket.close()
break
while len(data) > 0 and any(data):
first_time_end = True
if Playing:
stream.write(data)
try:
data = audio_socket.recv(CHUNK)
except:
p.terminate()
sn_socket.close()
audio_socket.close()
Break = True
break
else:
p.terminate()
sn_socket.close()
audio_socket.close()
Break = True
break
if first_time_end:
try:
song_name = sn_socket.recv(1024).decode("utf8")
except:
p.terminate()
sn_socket.close()
audio_socket.close()
break
playing_name.config(text=song_name)
first_time_end = False
if Break:
break
def Send(event=None):
msg = my_msg.get()
my_msg.set("")
client_socket.send(bytes(msg, "utf8"))
def Leave(event=None):
my_msg.set("{quit}")
Send()
audio_socket.close()
client_socket.close()
sn_socket.close()
Window.quit()
def PlayPause(event=None):
global Playing, Host, audio_port, audio_socket, sn_socket, sn_port
if Playing:
audio_socket.close()
play_button.config(text="▶︎")
playing_name.config(text="None")
Playing = False
else:
Playing = True
play_button.config(text="◼︎")
audio_socket = socket(AF_INET, SOCK_STREAM)
audio_socket.connect((Host, audio_port))
sn_socket = socket(AF_INET, SOCK_STREAM)
sn_socket.connect((Host, sn_port))
song_name = sn_socket.recv(1024).decode("utf8")
playing_name.config(text=song_name)
audio_receive_thread = Thread(target=Receive_audio)
audio_receive_thread.start()
def On_Closing(event=None):
my_msg.set("{quit}")
Send()
audio_socket.close()
client_socket.close()
sn_socket.close()
Window.quit()
# tkinter gui
Window = tkinter.Tk()
Window.title("Chat Room")
Window.geometry("608x427+379+173")
Window.minsize(1, 1)
Window.maxsize(1425, 870)
Window.resizable(1, 1)
# main message display
msg_list = tkinter.Listbox(Window, font=('Arial', 14))
msg_list.place(relx=0.033, rely=0.141, relheight=0.714, relwidth=0.683)
msg_list.configure(background="#d9d9d9")
msg_list.configure(relief="flat")
my_msg = tkinter.StringVar()
my_msg.set("Type your messages here...")
# online label
online_label = tkinter.Label(Window, font=('Arial', 17))
online_label.place(relx=0.757, rely=0.164, height=28, width=106)
online_label.configure(background="#ffffff")
online_label.configure(text="Online User")
# online list
online_list = tkinter.Listbox(Window, font=('Arial', 17), justify = "center")
online_list.place(relx=0.74, rely=0.234, relheight=0.571, relwidth=0.204)
online_list.configure(background="#90A4AE")
online_list.configure(relief="flat")
# typing field
entry_field = tkinter.Entry(Window, font=('Arial', 14), textvariable=my_msg)
entry_field.bind("<Return>", Send)
entry_field.place(relx=0.03, rely=0.867,height=40, relwidth=0.683)
# send button
send_button = tkinter.Button(Window, text="Send", font=('Arial', 20), command=Send)
send_button.place(relx=0.757, rely=0.867,width=100, height=40)
send_button.configure(foreground="black")
send_button.configure(highlightbackground="#1565C0")
send_button.configure(relief="flat")
# leave button
leave_button = tkinter.Button(Window, text="Leave", font=('Arial', 14), command=Leave)
leave_button.place(relx=0.872, rely=0.023, height=28, width=69)
leave_button.configure(foreground="black")
leave_button.configure(highlightbackground="#D32F2F")
# play button
play_button = tkinter.Button(Window, text="▶︎", font=('Arial', 25), command=PlayPause)
play_button.configure(anchor = 's')
play_button.place(relx=0.033, rely=0.023, height=40, width=60)
play_button.configure(highlightbackground="#FFC400")
# now playing label
now_playing = tkinter.Label(Window, text="Now Playing:", font=('Arial', 18))
now_playing.place(relx=0.140, rely=0.023, height=48)
# playing name label
playing_name = tkinter.Label(Window, text="None", font=('Arial', 18))
playing_name.place(relx=0.320, rely=0.047, height=28)
Window.protocol("WM_DELETE_WINDOW", On_Closing)
# main
Host = '127.0.0.1'
Port = 12000
BuffferSize = 1024
Addr = (Host, Port)
Playing = False
name_list = []
client_socket = socket(AF_INET, SOCK_STREAM)
client_socket.connect(Addr)
audio_socket = socket(AF_INET, SOCK_STREAM)
audio_port = 12002
sn_socket = socket(AF_INET, SOCK_STREAM)
sn_port = 12003
receive_thread = Thread(target=Receive)
receive_thread.start()
tkinter.mainloop() # loop GUI execution.
receive_thread.join()
|
StarcoderdataPython
|
11307105
|
<reponame>vyvojer/django-chatbot
import logging
from django_chatbot.models import Update
from testapp.models import Note
log = logging.getLogger(__name__)
def default(update: Update):
update.message.reply("I don't understand you :( /help")
def start(update: Update):
update.message.reply("""
Command list:
/help - help
/add - add note
/delete - delete note
/count - count notes
""") # noqa
def count(update: Update):
user = update.message.from_user
note_count = Note.objects.filter(user=user).count()
update.message.reply(f"Note(s) count: {note_count}")
|
StarcoderdataPython
|
12852277
|
<reponame>dcdanko/MetaSUB_CAP<filename>scripts/alpha_diversity_stats.py
#! /usr/bin/env python3
import sys
import math
import argparse as ap
from json import dumps as jdumps
from random import choices
class LevelNotFoundException(Exception):
pass
def checkLevel(taxon, level):
if level == 'species':
return ('s__' in taxon) and ('t__' not in taxon)
elif level == 'genus':
return ('g__' in taxon) and ('s__' not in taxon)
raise LevelNotFoundException()
class Sample:
def __init__(self, tool, level):
self.tool = tool
self.level = level
self.abunds = {}
self._total = None
def addLine(self, line):
taxon, abund = line.split()
if checkLevel(taxon, self.level):
self.abunds[taxon] = float(abund)
@classmethod
def parseMPA(ctype, tool, mpaFile, level):
sample = Sample(tool, level)
with open(mpaFile) as mF:
for line in mF:
sample.addLine(line)
return sample
def subset(self, n):
if n == self.total():
return self
brkpoints = [0]
rmap = {}
for i, (key, val) in enumerate(self.abunds.items()):
brkpoints.append(brkpoints[i] + val)
rmap[i] = key
i = 0
outAbunds = {}
indices = range(int(self.total()))
indices = sorted(choices(indices, k=n))
for ind in indices:
while ind >= brkpoints[i + 1]:
i += 1
key = rmap[i]
try:
outAbunds[key] += 1
except KeyError:
outAbunds[key] = 1
outSamp = Sample(self.tool, self.level)
outSamp.abunds = outAbunds
return outSamp
def total(self):
if self._total is None:
self._total = sum(self.abunds.values())
return self._total
def richness(self):
return len(self.abunds)
def shannonIndex(self):
H = 0
for count in self.abunds.values():
p = count / self.total()
assert p <= 1
H += p * math.log(p)
if H < 0:
H *= -1
return H
def ginisimpson(self):
H = 0
for count in self.abunds.values():
p = count / self.total()
assert p <= 1
H += p * p
H = 1 - H
return H
def chao1(self):
sings, doubs = 0, 1 # give doubles a pseudocount to avoid div by zero
for val in self.abunds.values():
if val == 1:
sings += 1
elif val == 2:
doubs += 1
est = (sings * sings) / (2 * doubs)
return self.richness() + est
def getSubsets(N):
vals = [1, 5, 10, 100, 500, 1000, 10 * 1000]
vals = [el * 1000 for el in vals]
out = []
for val in vals:
if val < N:
out.append(val)
else:
out.append(N)
break
return out
def handleCounts(tool, fname):
obj = {
'species': {
'richness': {},
'shannon_index': {},
'gini-simpson': {},
'chao1': {}
},
'genus': {
'richness': {},
'shannon_index': {},
'gini-simpson': {},
'chao1': {}
}
}
for level in obj.keys():
sample = Sample.parseMPA(tool, fname, level)
for subsetSize in getSubsets(sample.total()):
subsample = sample.subset(subsetSize)
key = str(subsetSize)
if subsample == sample:
key = 'all_reads'
obj[level]['shannon_index'][key] = subsample.shannonIndex()
obj[level]['richness'][key] = subsample.richness()
obj[level]['gini-simpson'][key] = subsample.ginisimpson()
obj[level]['chao1'][key] = subsample.chao1()
return obj
def handleProportions(tool, fname):
obj = {
'species': {
'richness': {},
'shannon_index': {},
'gini-simpson': {}
},
'genus': {
'richness': {},
'shannon_index': {},
'gini-simpson': {}
}
}
for level in obj.keys():
sample = Sample.parseMPA(tool, fname, level)
key = 'all_reads'
obj[level]['richness'][key] = sample.richness()
obj[level]['shannon_index'][key] = sample.shannonIndex()
obj[level]['gini-simpson'][key] = sample.ginisimpson()
return obj
def main():
args = parseArgs()
outobj = {}
for mpaFilePair in args.mpa_files:
tool, mpaFile = mpaFilePair.split(',')
if tool.lower() == 'kraken':
outobj['kraken'] = handleCounts(tool, mpaFile)
elif tool.lower() == 'metaphlan2':
outobj['metaphlan2'] = handleProportions(tool, mpaFile)
else:
sys.stderr.write('tool {} unsupported'.format(tool))
sys.stdout.write(jdumps(outobj))
def parseArgs():
parser = ap.ArgumentParser()
parser.add_argument('mpa_files', nargs='+',
help='pairs of tool_name,mpa_file')
args = parser.parse_args()
return args
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
4856670
|
<reponame>fusion-research/TrajectoryNet<gh_stars>10-100
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import math
import numpy as np
import tensorflow as tf
from sklearn import preprocessing
import os
import inspect
import sys
import datetime
import cProfile
from enum import Enum
from sklearn import metrics
from sklearn.metrics import recall_score
from sklearn.metrics import average_precision_score
import threading
from tensorflow.python.platform import flags
from sklearn.metrics import confusion_matrix
from tensorflow.python.client import timeline
from customized_activations import maxout
from customized_rnncell import NewGRUCell
import Learning_rate
import Monitor
from param import RNNType
from MyThread import MyThread
from Log import Log
from Evaluate import evaluate_accuracy, evaluate_stat, evaluate_confusion
import Data
import Config
# for testing only
import cProfile
# check num of parameters
if len(sys.argv) < 2:
dconfile = 'config.json'
elif (sys.argv[1].isdigit()):
dconfile = 'config.json'
test_task = int(sys.argv[1]) # speed up testing
else:
dconfile = sys.argv[1]
logPath = './log/'
dataPath = './data/'
conf = Config.DataConfig(confile=dconfile)
task = conf.task
# overwrite testing task
try:
test_task
except NameError:
pass
else:
conf.test_id = [test_task]
# this data are generated from create_npy.py
x_file = 'x_mobility_context.npy'
y_file = 'y_mobility_point.npy'
mmsi_file = 'mmsi_mobility_point.npy'
# selection of cell type
rnnType = RNNType.GRU_b
gpuMode = conf.useGPU
exp_seq_len = conf.truncated_seq_len
deep_output = False
use_dropout = False
weight_initializer = conf.weight_initializer
evaluate_freq = conf.evaluate_freq
bias_initializer = tf.random_uniform_initializer(0, 0.001)
if conf.activation == "maxout":
rnnCell = NewGRUCell
activation_function = tf.nn.tanh
else:
rnnCell = tf.contrib.rnn.GRUCell
if conf.activation == "sigmoid":
activation_function = tf.nn.sigmoid
elif conf.activation == "relu":
activation_function = tf.nn.relu
else:
activation_function = tf.nn.tanh
lr = Learning_rate.Learning_rate(global_lr=0.001, decay_rate=0.999, decay_step=50)
# load data
x = np.load(dataPath + x_file)
y = np.load(dataPath+y_file)
mmsi = np.load(dataPath+mmsi_file)
# feature selection
def filter_features(x):
print("warning: not all featuers are used")
x = x[:, :, 0:40]
return x
#x = filter_features(x)
def filter_classes(x, y, mmsi, cls):
valid_index = np.concatenate([np.where(mmsi == i) for i in cls], axis=1)[0]
num_features = x.shape[2]
(x, y, mmsi) = Data.Data.reorganizeSeq(x, y, mmsi, exp_seq_len)
num_examples = x.shape[0]
unique_mmsi = np.unique(mmsi[0])
num_classes = len(np.unique(y))
test_vessel = conf.test_id
val_vessel = conf.val_id
if conf.testmode == "lobo":
(train_index, test_index, valid_index) = Data.Data.splitDataset(mmsi[0], test_vessel, val_vessel)
elif conf.testmode == "random":
(train_index, test_index, valid_index) = Data.Data.randomSplitDataset(mmsi[0], train_perc = conf.train_ratio, val_perc = conf.val_ratio)
print(train_index)
train_seq_len = mmsi[1][train_index]
test_seq_len = mmsi[1][test_index]
valid_seq_len = mmsi[1][valid_index]
num_class = np.unique(y).size
log = Log(task, logPath, num_class)
monitor = Monitor.Monitor(loss=True, num_class=num_class)
def encode_label(y):
"""encode label into a matrix based on the number of classes"""
num_class = np.unique(y).size
if num_class > 2: # multi-class
lb = preprocessing.LabelBinarizer()
lb.fit(range(num_class))
labels = np.array([lb.transform(i) for i in y])
#labels = lb.transform(y)
else: # 2-class
# the labels are stored in reserve in the numpy array
# fishing is labeled 0
Y0 = np.logical_not(y) * 1 # Y1 represents fishing
Y1 = y # Y0 represents non-fishing
labels = np.array([Y0, Y1])
labels = labels.transpose(1,2,0) # dim: [example; length; classes]
return labels
#labels = encode_label(y) # no need to encode y
labels = y
def get_all_data(conf):
"""generate data for all vessels"""
early = mmsi[1]
X = x.transpose((1, 0, 2))
return (X, labels, early)
class VesselModel(object):
"""The vessel classification lstm model."""
def __init__(self, config):
self.num_threads = conf.num_threads
self.hidden_size = conf.hidden_size
self.learning_rate = conf.learning_rate
self.num_layers = conf.num_layers
self.num_epochs = conf.num_epochs
self.batch_size = config.batch_size
self.is_training = config.is_training
self.is_validation = config.is_validation
self.current_step = tf.Variable(0)
# place holder for sequence that we will provide at runtime
# batch size will be different for training and testing set
self._input_data = tf.placeholder(tf.float32, [exp_seq_len, self.batch_size, num_features], name="input-data")
# target for one batch
self._targets = tf.placeholder(tf.int64, [self.batch_size, exp_seq_len], name = "y-target")
# get the length of all training and test sequences
if self.is_training:
self.seq_len = exp_seq_len*self.batch_size #sum(train_seq_len)
elif self.is_validation:
self.seq_len = sum(valid_seq_len)
else:
self.seq_len = sum(test_seq_len)
with tf.name_scope("lstm-cell") as scope:
rnn_cell = self.get_rnn_cell()
with tf.name_scope("multi-rnn-cell") as scope:
cell = self.get_multi_rnn_cell(rnn_cell)
# what timesteps we want to stop at, notice it's different for each batch
self._early_stop = tf.placeholder(tf.int64, shape=[self.batch_size], name = "early-stop")
self.set_initial_states(cell)
#with tf.name_scope("dropout") as scope:
# if self.is_training and config.keep_prob < 1:
# self._input_data = tf.nn.dropout(self._input_data, config.keep_prob)
outputs = []
# Creates a recurrent neural network specified by RNNCell "cell
# inputs for rnn needs to be a list, each item being a timestep.
# Args:
# cell: An instance of RNNCell.
# inputs: A length T list of inputs, each a tensor of shape
# [batch_size, cell.input_size].
# initial_state: (optional) An initial state for the RNN. This must be
# a tensor of appropriate type and shape [batch_size x cell.state_size].
# dtype: (optional) The data type for the initial state. Required if
# initial_state is not provided.
# sequence_length: Specifies the length of each sequence in inputs.
# An int32 or int64 vector (tensor) size [batch_size]. Values in [0, T).
# scope: VariableScope for the created subgraph; defaults to "RNN".
#
# Returns:
# A pair (outputs, state) where:
# outputs is a length T list of outputs (one for each input)
# state is the final state
with tf.name_scope("rnn-outputs") as scope:
self.get_outputs(cell)
self.valid_target = self.get_valid_sequence(tf.reshape(self._targets, [exp_seq_len * self.batch_size]), num_classes) # valid digit target
self.lstm_output = self.valid_output
if deep_output:
with tf.name_scope("deep-output-layer") as scope:
softmax_size = self.hidden_size * 2 if rnnType == RNNType.LSTM_b or rnnType == RNNType.GRU_b else self.hidden_size
softmax_wout = tf.get_variable("softmax_w_deepout", [softmax_size, self.higher_hidden_size])
softmaxb_dout = tf.get_variable("softmax_b_deepout", [self.higher_hidden_size])
self.valid_output = tf.sigmoid(tf.matmul(self.valid_output, softmax_wout) + softmaxb_dout)
if use_dropout:
self.valid_output = tf.nn.dropout(self.valid_output, keep_prob = 0.5)
#softmax_wout2 = tf.get_variable("softmax_w_deepout2", [self.hidden_size, self.hidden_size])
#softmaxb_dout2 = tf.get_variable("softmax_b_deepout2", [self.hidden_size])
#self.valid_output = tf.matmul(self.valid_output, softmax_wout2) + softmaxb_dout2
#if use_dropout:
# self.valid_output = tf.nn.dropout(self.valid_output, keep_prob = 0.5)
with tf.name_scope("softmax-W") as scope:
softmax_w = self.get_softmax_layer()
self.w = softmax_w
with tf.name_scope("softmax-b") as scope:
softmax_b = tf.get_variable("softmax_b", [num_classes], initializer=bias_initializer)
with tf.name_scope("softmax-predictions") as scope:
self._predictions = tf.matmul(self.valid_output, softmax_w) + softmax_b
self._prob_predictions = tf.nn.softmax(self._predictions)
self.digit_predictions = tf.argmax(self._prob_predictions, axis=1)
with tf.name_scope("confusion-matrix") as scope:
self.confusion_matrix = tf.confusion_matrix(self.valid_target, self.digit_predictions)
# Weighted cross-entropy loss for a sequence of logits (per example).
# at: tensorflow/python/ops/seq2seq.py
# Args:
# logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
# targets: List of 1D batch-sized int32 Tensors of the same length as logits.
# weights: List of 1D batch-sized float-Tensors of the same length as logits.
with tf.name_scope("seq2seq-loss-by-example") as scpoe:
self.loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[self._predictions],
[self.valid_target],
[tf.ones([int(self.getTensorShape(self.valid_target)[0])])])
self._cost = tf.reduce_mean(self.loss)
self._accuracy = tf.contrib.metrics.accuracy(self.digit_predictions, self.valid_target)
# Add summary ops to collect data
if conf.tensorboard:
self.w_hist = tf.summary.histogram("weights", softmax_w)
self.b_hist = tf.summary.histogram("biases", softmax_b)
self.y_hist_train = tf.summary.histogram("train-predictions", self._predictions)
self.y_hist_test = tf.summary.histogram("test-predictions", self._predictions)
self.mse_summary_train = tf.summary.scalar("train-cross-entropy-cost", self._cost)
self.mse_summary_test = tf.summary.scalar("test-cross-entropy-cost", self._cost)
with tf.name_scope("optimization") as scope:
self._train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self._cost, global_step=self.current_step)
#self._train_op = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self._cost, global_step=self.current_step)
def get_rnn_cell(self):
"""Create rnn_cell based on RNN type"""
if rnnType == RNNType.LSTM_b:
lstm_cell_fw = tf.contrib.rnn.LSTMCell(self.hidden_size, state_is_tuple=True, use_peepholes=conf.peephole)
lstm_cell_bw = tf.contrib.rnn.LSTMCell(self.hidden_size, state_is_tuple=True, use_peepholes=conf.peephole)
return (lstm_cell_fw, lstm_cell_bw)
elif rnnType == RNNType.LSTM_u:
lstm_cell = rnn_cell.BasicLSTMCell(self.hidden_size, forget_bias=1, state_is_tuple=True, orthogonal_scale_factor=conf.init_scale, initializer = weight_initializer)
return lstm_cell
elif rnnType == RNNType.GRU:
gru_cell = rnnCell(self.hidden_size, activation=activation_function)
return gru_cell
else:
lstm_cell_fw = rnnCell(self.hidden_size, activation=activation_function)
lstm_cell_bw = rnnCell(self.hidden_size, activation=activation_function)
return (lstm_cell_fw, lstm_cell_bw)
def get_multi_rnn_cell(self, rnn_cell):
"""Create multiple layers of rnn_cell based on RNN type"""
if rnnType == RNNType.LSTM_b or rnnType == RNNType.GRU_b:
(lstm_cell_fw, lstm_cell_bw) = rnn_cell
cell_fw = tf.contrib.rnn.MultiRNNCell([rnnCell(self.hidden_size, activation=activation_function) for _ in range(self.num_layers)])
cell_bw = tf.contrib.rnn.MultiRNNCell([rnnCell(self.hidden_size, activation=activation_function) for _ in range(self.num_layers)])
return (lstm_cell_fw, lstm_cell_bw)
elif rnnType == RNNType.LSTM_u or rnnType == RNNType.GRU:
cell = tf.contrib.rnn.MultiRNNCell([rnnCell(self.hidden_size, activation=activation_function) for _ in range(self.num_layers)])
return cell
def set_initial_states(self, cell):
"""set initial states based on RNN types"""
# Initial state of the LSTM memory
# If `state_size` is an int or TensorShape, then the return value is a
# `N-D` tensor of shape `[batch_size x state_size]` filled with zeros.
# If `state_size` is a nested list or tuple, then the return value is
# a nested list or tuple (of the same structure) of `2-D` tensors with
# the shapes `[batch_size x s]` for each s in `state_size`.
if rnnType == RNNType.LSTM_b or rnnType == RNNType.GRU_b:
(cell_fw, cell_bw) = cell
self.initial_state_fw = cell_fw.zero_state(self.batch_size, tf.float32)
self.initial_state_bw = cell_bw.zero_state(self.batch_size, tf.float32)
elif rnnType == RNNType.LSTM_u or rnnType == RNNType.GRU:
self._initial_state = cell.zero_state(self.batch_size, tf.float32)
def get_outputs(self, cell):
""" get output tensor of the RNN"""
# At: tensorflow/tensorflow/python/ops/rnn.py
# Args:
# Unlike `rnn`, the input `inputs` is not a Python list of `Tensors`. Instead,
# it is a single `Tensor` where the maximum time is either the first or second
# dimension (see the parameter `time_major`). The corresponding output is
# a single `Tensor` having the same number of time steps and batch size.
#
# If time_major == False (default), this must be a tensor of shape:
# `[batch_size, max_time, input_size]`, or a nested tuple of such elements
# If time_major == True, this must be a tensor of shape:
# `[max_time, batch_size, input_size]`, or a nested tuple of such elements
#
# Returns:
# If time_major == False (default), this will be a `Tensor` shaped:
# `[batch_size, max_time, cell.output_size]`.
# If time_major == True, this will be a `Tensor` shaped:
# `[max_time, batch_size, cell.output_size]`.
if rnnType == RNNType.LSTM_b or rnnType == RNNType.GRU_b:
(cell_fw, cell_bw) = cell
self.outputs, self.state = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, self._input_data, sequence_length=self._early_stop, initial_state_fw=self.initial_state_fw, initial_state_bw=self.initial_state_bw, time_major=True, dtype='float32')
output_fw, output_bw = self.outputs
output_fw = tf.transpose(output_fw, perm=[1, 0, 2])
output_bw = tf.transpose(output_bw, perm=[1, 0, 2])
outputs = tf.concat(axis=2, values=[output_fw, output_bw])
# Concatenates tensors along one dimension.
# this will flatten the dimension of the matrix to [batch_size * num_steps, num_hidden_nodes]
# However, this is not the true output sequence, since padding added a number of empty elements
# Extra padding elements should be removed from the output sequence.
# Here first concatenate all vessels into one long sequence, including paddings
self.output = tf.reshape(tf.concat(axis=0, values=outputs), [exp_seq_len * self.batch_size, self.hidden_size*2])
# Remove padding here
self.valid_output = self.get_valid_sequence(self.output, self.hidden_size*2)
elif rnnType == RNNType.LSTM_u or rnnType == RNNType.GRU:
self.outputs, self.state = tf.nn.dynamic_rnn(cell, self._input_data, sequence_length=self._early_stop, initial_state=self._initial_state, time_major=True, dtype='float32')
# This is a workaround with tf.reshape().
# To make data with the same vessel continguous after reshape,
# we need to transpose it first.
outputs = tf.transpose(self.outputs, perm=[1, 0, 2])
# Concatenates tensors along one dimension.
# this will flatten the dimension of the matrix to [batch_size * num_steps, num_hidden_nodes]
# However, this is not the true output sequence, since padding added a number of empty elements
# Extra padding elements should be removed from the output sequence.
# Here first concatenate all vessels into one long sequence, including paddings
self.output = tf.reshape(tf.concat(axis=0, values=outputs), [exp_seq_len * self.batch_size, self.hidden_size])
# Remove padding here
self.valid_output = self.get_valid_sequence(self.output, self.hidden_size)
def get_softmax_layer(self):
if deep_output:
softmax_w = tf.get_variable("softmax_w", [self.higher_hidden_size, num_classes])
elif rnnType == RNNType.LSTM_b or rnnType == RNNType.GRU_b:
softmax_w = tf.get_variable("softmax_w", [self.hidden_size*2, num_classes])
elif rnnType == RNNType.LSTM_u or rnnType == RNNType.GRU:
softmax_w = tf.get_variable("softmax_w", [self.hidden_size, num_classes])
return softmax_w
def get_valid_sequence(self, seq, feature_size):
"""remove padding from sequences"""
if self.is_training:
stop = train_seq_len
elif self.is_validation:
stop = valid_seq_len
else:
stop = test_seq_len
valid_sequence_list = []
for i in range(self.batch_size):
if len(tf.Tensor.get_shape(seq)) == 2:
sub_seq = tf.slice(seq, [exp_seq_len*i, 0], [ stop[i], feature_size])
else:
sub_seq = tf.slice(seq, [exp_seq_len*i], [stop[i]])
valid_sequence_list.append(sub_seq)
valid_sequence = tf.concat(axis=0, values=valid_sequence_list)
return valid_sequence
def getTensorShape(this, tensor):
return tf.Tensor.get_shape(tensor)
@property
def prob_predictions(self):
return self._prob_predictions
@property
def input_data(self):
return self._input_data
@property
def inputs(self):
return self._inputs
@property
def targets(self):
return self._targets
@property
def predictions(self):
return self._predictions
@property
def early_stop(self):
return self._early_stop
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def accuracy(self):
return self._accuracy
@property
def train_op(self):
return self._train_op
@property
def final_state(self):
return self._final_state
def test_model(sess, minibatch):
# test and validate model
if conf.test_mode:
run_batch(sess, mtest, test_data, tf.no_op(), minibatch)
t_train = MyThread(run_batch, (sess, m, train_data, tf.no_op(), minibatch))
t_test = MyThread(run_batch, (sess, mtest, test_data, tf.no_op(), minibatch))
t_val = MyThread(run_batch, (sess, mval, val_data, tf.no_op(), minibatch))
t_train.start()
t_test.start()
t_val.start()
t_train.join()
result_train = t_train.get_result()
t_test.join()
result_test = t_test.get_result()
t_val.join()
result_val = t_val.get_result()
result = result_train + result_test + result_val
monitor.new(result, minibatch)
return result
def run_batch(session, m, data, eval_op, minibatch):
"""Runs the model on the given data."""
# prepare data for input
x, y, e_stop = data
epoch_size = x.shape[1] // m.batch_size
# record results, keep results for each minibatch in list
costs = []
correct = []
for batch in range(epoch_size):
x_batch = x[:,batch*m.batch_size : (batch+1)*m.batch_size,:]
y_batch = y[batch*m.batch_size : (batch+1)*m.batch_size,:]
e_batch = e_stop[batch*m.batch_size : (batch+1)*m.batch_size]
temp_dict = {m.input_data: x_batch}
temp_dict.update({m.targets: y_batch})
temp_dict.update({m.early_stop: e_batch})
#m.learning_rate = lr.get_lr()
# train the model
if m.is_training and eval_op == m.train_op:
_ = session.run([eval_op], feed_dict=temp_dict)
print("minibatch {0}: {1}/{2}, lr={3:0.5f}\r".format(minibatch, batch, epoch_size,m.learning_rate),)
lr.increase_global_step()
# track stats every 10 minibatches
if minibatch % evaluate_freq == 0:
result = test_model(session, minibatch) # recursive function
log.write(result, minibatch)
minibatch += 1
# test the model
else:
cost, confusion, accuracy, _ = session.run([m.cost, m.confusion_matrix, m._accuracy, eval_op], feed_dict=temp_dict)
# keep results for this minibatch
costs.append(cost)
correct.append(accuracy * sum(e_batch))
# print test confusion matrix
if not m.is_training and not m.is_validation:
print(confusion)
# output predictions in test mode
if conf.test_mode:
pred = session.run([m._prob_predictions], feed_dict=temp_dict)
pred = np.array(pred)
np.set_printoptions(threshold=np.nan)
print(pred.shape)
print(pred)
#results = np.column_stack((tar, pred))
#np.savetxt("results/prediction.result", pred)#, fmt='%.3f')
print("output target and predictions to file prediction.csv")
exit()
if batch == epoch_size - 1:
accuracy = sum(correct) / float(sum(e_stop))
return(sum(costs)/float(epoch_size), accuracy)
# training: keep track of minibatch number
return(minibatch)
def getPredFileName(minibatch):
"""get the output of the prediction files"""
return (logPath+str(test_vessel[0])+'/pred-'+task + str(minibatch)+'.csv')
def getLearnedParameters(param_name='model/bidirectional_rnn/fw/gru_cell/candidate/weights:0', filename='learned_embedding'):
#print(tf.trainable_variables())
var = [v for v in tf.trainable_variables() if v.name == param_name][0]
x = var.eval()
np.savetxt(filename, x)
def main(_):
now = time.time()
# get config
train_conf = Config.TrainingConfig(is_training = True, is_validation = False, batch_size = conf.batch_size)
test_conf = Config.TrainingConfig(is_training = False, is_validation = False, batch_size = len(test_index))
valid_conf = Config.TrainingConfig(is_training = False, is_validation = True, batch_size = len(valid_index))
# prepare all data to evaluate
with tf.Session() as session:
X_all, Y_all, e_stop_all = get_all_data(test_conf)
# random shuffle, very important for stochastic gradient descent with minibatch
np.random.shuffle(train_index)
# specify training and test vessels
X_train = X_all[:,train_index,:]
y_train = Y_all[train_index,:]
stop_train = e_stop_all[train_index]
#print(X_train.shape)
#(X_train, y_train, stop_train) = Data.Data.upsample((X_train, y_train, stop_train), cls=1, times=4)
#print(X_train.shape)
perm = np.random.permutation(X_train.shape[1])
X_train = X_all[:,perm,:]
y_train = Y_all[perm,:]
stop_train = e_stop_all[perm]
X_test = X_all[:,test_index,:]
y_test = Y_all[test_index,:]
stop_test = e_stop_all[test_index]
X_valid = X_all[:,valid_index,:]
y_valid = Y_all[valid_index,:]
stop_valid = e_stop_all[valid_index]
# delete variables to save RAM
del X_all
del Y_all
del e_stop_all
global train_data
train_data = (X_train, y_train, stop_train)
global test_data
test_data = (X_test, y_test, stop_test)
global val_data
val_data = (X_valid, y_valid, stop_valid)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.1
config.intra_op_parallelism_threads=conf.num_threads
# config.log_device_placement=True
session = tf.Session(config=config)
minibatch = 0
with tf.Graph().as_default(), session as sess:
tf.set_random_seed(0)
if weight_initializer == "uniform":
initializer = tf.random_uniform_initializer(0, conf.init_scale)
elif weight_initializer == "orthogonal":
initializer = tf.orthogonal_initializer(gain=conf.init_scale)
else:
print("Error: wrong weight initializer")
exit()
with tf.variable_scope("model", reuse=None, initializer=initializer):
global m
m = VesselModel(config=train_conf)
with tf.variable_scope("model", reuse=True, initializer=initializer):
global mtest
mtest = VesselModel(config=test_conf)
with tf.variable_scope("model", reuse=True, initializer=initializer):
global mval
mval = VesselModel(config=valid_conf)
if conf.checkpoint or conf.restore:
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
if conf.tensorboard:
global writer
writer = tf.summary.FileWriter(logPath+"tf-logs", sess.graph_def)
if not conf.restore:
tf.global_variables_initializer().run() #initialize all variables in the model
else:
saver.restore(sess, dataPath+task)
print("Model restored.")
# training
for i in range(conf.num_epochs):
print("running epoch {0}".format(i))
minibatch = run_batch(sess, m, train_data, m.train_op, minibatch)
# get best results
best = monitor.getBest()
log.write(best, monitor.minibatch)
log.close()
# save the model
if conf.checkpoint:
# Save the variables to disk
save_path = saver.save(sess, dataPath+task)
print("Model saved in file: %s" % save_path)
later = time.time()
difference = int(later - now)
print('time elapsed: {:} seconds'.format(difference))
def prof(main=None):
f = flags.FLAGS
f._parse_flags()
main = main or sys.modules['__main__'].main
profile=cProfile.Profile()
profile.run('main(sys.argv)')
kProfile=lsprofcalltree.KCacheGrind(profile)
kFile=open('profile','w+')
kProfile.output(kFile)
kFile.close()
if __name__ == "__main__":
if not gpuMode:
with tf.device('/cpu:0'):
tf.app.run()
else:
tf.app.run()
|
StarcoderdataPython
|
5086433
|
<filename>examples/cp/basic/house_building.py
# --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2015, 2016
# --------------------------------------------------------------------------
"""
This problem schedule a series of tasks of varying durations where some tasks must finish
before others start. And assign workers to each of the tasks such that each worker is assigned
to only one task to a given time. The objective of the problem is to maximize the matching worker
skill level to the tasks.
Please refer to documentation for appropriate setup of solving configuration.
"""
from docplex.cp.model import CpoModel
from collections import namedtuple
#-----------------------------------------------------------------------------
# Initialize the problem data
#-----------------------------------------------------------------------------
# Number of Houses to build
NB_HOUSES = 5
# Max number of periods for the schedule
MAX_SCHEDULE = 318
# House construction tasks
Task = (namedtuple("Task", ["name", "duration"]))
TASKS = {Task("masonry", 35),
Task("carpentry", 15),
Task("plumbing", 40),
Task("ceiling", 15),
Task("roofing", 5),
Task("painting", 10),
Task("windows", 5),
Task("facade", 10),
Task("garden", 5),
Task("moving", 5),
}
# The tasks precedences
TaskPrecedence = (namedtuple("TaskPrecedence", ["beforeTask", "afterTask"]))
TASK_PRECEDENCES = {TaskPrecedence("masonry", "carpentry"),
TaskPrecedence("masonry", "plumbing"),
TaskPrecedence("masonry", "ceiling"),
TaskPrecedence("carpentry", "roofing"),
TaskPrecedence("ceiling", "painting"),
TaskPrecedence("roofing", "windows"),
TaskPrecedence("roofing", "facade"),
TaskPrecedence("plumbing", "facade"),
TaskPrecedence("roofing", "garden"),
TaskPrecedence("plumbing", "garden"),
TaskPrecedence("windows", "moving"),
TaskPrecedence("facade", "moving"),
TaskPrecedence("garden", "moving"),
TaskPrecedence("painting", "moving"),
}
# Workers Name and level for each of there skill
Skill = (namedtuple("Skill", ["worker", "task", "level"]))
SKILLS = {Skill("Joe", "masonry", 9),
Skill("Joe", "carpentry", 7),
Skill("Joe", "ceiling", 5),
Skill("Joe", "roofing", 6),
Skill("Joe", "windows", 8),
Skill("Joe", "facade", 5),
Skill("Joe", "garden", 5),
Skill("Joe", "moving", 6),
Skill("Jack", "masonry", 5),
Skill("Jack", "plumbing", 7),
Skill("Jack", "ceiling", 8),
Skill("Jack", "roofing", 7),
Skill("Jack", "painting", 9),
Skill("Jack", "facade", 5),
Skill("Jack", "garden", 5),
Skill("Jim", "carpentry", 5),
Skill("Jim", "painting", 6),
Skill("Jim", "windows", 5),
Skill("Jim", "garden", 9),
Skill("Jim", "moving", 8)
}
# Worker and continuity requirements: if the Task 1 is done on the house, he must do the task 2 in this house
Continuity = (namedtuple("Continuity", ["worker", "task1", "task2"]))
CONTINUITIES = {Continuity("Joe", "masonry", "carpentry"),
Continuity("Jack", "roofing", "facade"),
Continuity("Joe", "carpentry", "roofing"),
Continuity("Jim", "garden", "moving")
}
#-----------------------------------------------------------------------------
# Prepare the data for modeling
#-----------------------------------------------------------------------------
# Find_tasks: return the task it refers to in the Tasks vector
def find_tasks(name):
return next(t for t in TASKS if t.name == name)
# Find_skills: return the skill it refers to in the Skills vector
def find_skills(worker, task):
return next(s for s in SKILLS if (s.worker == worker) and (s.task == task))
# Iterator on houses numbers
HOUSES = range(1, NB_HOUSES + 1)
# Build the list of all worker names
WORKERS = set(sk.worker for sk in SKILLS)
#-----------------------------------------------------------------------------
# Build the model
#-----------------------------------------------------------------------------
# Create model
mdl = CpoModel()
# Variables of the model
tasks = {} # dict of interval variable for each house and task
wtasks = {} # dict of interval variable for each house and skill
for house in HOUSES:
for task in TASKS:
v = (0, MAX_SCHEDULE)
tasks[(house, task)] = mdl.interval_var(v, v, size=task.duration, name="house {} task {}".format(house, task))
for task in SKILLS:
wtasks[(house, task)] = mdl.interval_var(optional=True, name="house {} skill {}".format(house, task))
# Maximization objective of the model
obj2 = mdl.sum([s.level * mdl.presence_of(wtasks[(h, s)]) for s in SKILLS for h in HOUSES])
mdl.add(mdl.maximize(obj2))
# Constraints of the model
for h in HOUSES:
# Temporal constraints
for p in TASK_PRECEDENCES:
mdl.add(mdl.end_before_start(tasks[(h, find_tasks(p.beforeTask))], tasks[(h, find_tasks(p.afterTask))]))
# Alternative workers
for t in TASKS:
mdl.add(mdl.alternative(tasks[(h, t)], [wtasks[(h, s)] for s in SKILLS if (s.task == t.name)], 1))
# Continuity constraints
for c in CONTINUITIES:
mdl.add(mdl.presence_of(wtasks[(h, find_skills(c.worker, c.task1))]) ==
mdl.presence_of(wtasks[(h, find_skills(c.worker, c.task2))]))
# No overlap constraint
for w in WORKERS:
mdl.add(mdl.no_overlap([wtasks[(h, s)] for h in HOUSES for s in SKILLS if s.worker == w]))
#-----------------------------------------------------------------------------
# Solve the model and display the result
#-----------------------------------------------------------------------------
print("\nSolving model....")
msol = mdl.solve(TimeLimit=20, trace_log=False)
# Print solution
print("Solve status: " + msol.get_solve_status())
if msol.is_solution():
# Sort tasks in increasing begin order
ltasks = []
for hs in HOUSES:
for tsk in TASKS:
(beg, end, dur) = msol[tasks[(hs, tsk)]]
ltasks.append((hs, tsk, beg, end, dur))
ltasks = sorted(ltasks, key = lambda x : x[2])
# Print solution
print("\nList of tasks in increasing start order:")
for tsk in ltasks:
print("From " + str(tsk[2]) + " to " + str(tsk[3]) + ", " + tsk[1].name + " in house " + str(tsk[0]))
|
StarcoderdataPython
|
1852492
|
# Part 1
my_tuple = 1,
my_tuple
my_tuple[1] = 2
# Part 2 - Will Throw an Error
# TypeError: 'tuple' object does not support item assignment
person = ('Jim', 29, 'Austin, TX')
name, age, hometown = person
name
age
hometown
|
StarcoderdataPython
|
1706681
|
<reponame>danielroa98/mariAI
import retro
# Create the enviroment
env = retro.make('SuperMarioBros-Nes', 'Level1-1')
env.reset()
# We need to loop while not DONE
done = False
while not done:
# See whats happenin
env.render()
# Call a random button press from the controller
# action = env.action_space.sample()
action = [0,0,1,0,0,0,0,1,1,1,0,0]
#print(action)
# ob = image of the screen at the time of the action
# reward = amount of reward that he earns from the scenario.json file
# done = if the done
# info = data.json
ob, reward, done, info = env.step(action) # Send vars to the enviroment
print(reward)
|
StarcoderdataPython
|
9634480
|
from . import mass, redshift, spin
|
StarcoderdataPython
|
1605723
|
<reponame>Willtech/DistributedUrandom
#!/usr/bin/python
## Distributed Urandom Increment Global CoOperative
# DUIGCO API
# entropy.py script
# Source Code produced by Willtech 2021
# v0.1 hand coded by HRjJ
## setup dependencies
import requests
import time
##URL for delay from API *should be on local system*
api_url = "http://127.0.0.1/urandomapi.php?delay"
api_entropy = "http://127.0.0.1/urandomapi.php?api"
## Flush API request
#entropy_burn = True
entropy_burn = False
#debug = True
debug = False
## MAIN Program
while True:
try:
##get value for delay
r = requests.get(api_url)
except:
print "DELAY HTTP API GET FAILURE"
try:
if r.status_code == 200:
print "Entropy Wait " + r.text[0:1]
time.sleep(int(r.text[0:1]))
else:
print "Invalid API response detected"
print "Retry Wait 10"
time.sleep(10)
except:
print "Invalid API response detected"
print "Wait 10"
time.sleep(10)
if entropy_burn == True:
try:
##get entropy
e = requests.get(api_entropy)
except:
print "ENTROPY HTTP API GET FAILURE"
try:
if e.status_code == 200:
print "Entropy Burn"
entropy = e.text[0:1000]
if debug == True:
print entropy
print(len(entropy)) #684
else:
print "No Entropy Burn"
except:
print "Invalid API response detected"
print "No Entropy Burn"
|
StarcoderdataPython
|
9724220
|
<filename>base/utils.py
from django.contrib.auth.models import User
from .models import SystemConfig
def get_admin_config():
admin_users = User.objects.filter(is_superuser=True)
system_config = SystemConfig.objects.all()
if admin_users.count() == 0:
raise RuntimeError('Please create a superuser!')
if system_config.count() == 0:
raise RuntimeError('Please login admin site and set system config!')
elif system_config.count() != 1:
raise RuntimeError('Please login admin site and delete other system config!')
if system_config[0].user.config is None:
raise RuntimeError(
'Please login admin site and create a config for user {}!'.format(system_config[0].user.username)
)
return system_config[0].user.config.server_username, \
system_config[0].user.config.server_private_key_path, \
system_config[0].gpustat_path
|
StarcoderdataPython
|
8123650
|
<filename>fdk_client/platform/models/CompanyProfileValidator.py
"""Class Validators."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
class CompanyProfileValidator:
class updateCompany(BaseSchema):
company_id = fields.Str(required=False)
class cbsOnboardGet(BaseSchema):
company_id = fields.Str(required=False)
class getCompanyMetrics(BaseSchema):
company_id = fields.Str(required=False)
class editBrand(BaseSchema):
company_id = fields.Str(required=False)
brand_id = fields.Str(required=False)
class getBrand(BaseSchema):
company_id = fields.Str(required=False)
brand_id = fields.Str(required=False)
class createBrand(BaseSchema):
company_id = fields.Str(required=False)
class createCompanyBrandMapping(BaseSchema):
company_id = fields.Str(required=False)
class getBrands(BaseSchema):
company_id = fields.Str(required=False)
page_no = fields.Int(required=False)
page_size = fields.Int(required=False)
q = fields.Str(required=False)
class createLocation(BaseSchema):
company_id = fields.Str(required=False)
class getLocations(BaseSchema):
company_id = fields.Str(required=False)
store_type = fields.Str(required=False)
q = fields.Str(required=False)
stage = fields.Str(required=False)
page_no = fields.Int(required=False)
page_size = fields.Int(required=False)
class updateLocation(BaseSchema):
company_id = fields.Str(required=False)
location_id = fields.Str(required=False)
class getLocationDetail(BaseSchema):
company_id = fields.Str(required=False)
location_id = fields.Str(required=False)
class createLocationBulk(BaseSchema):
company_id = fields.Str(required=False)
|
StarcoderdataPython
|
8127925
|
<reponame>GuangC-iScience/rnn-viscoelasticity
#!/usr/bin/env python
#! author: GC @ 11/25/2020 customized class for stacked RNN layers
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.models import load_model
from tensorflow.keras.layers import LSTM, Dense, TimeDistributed
def RNN_VE(strain_arr, RNNmodel, states=None):
""" RNN model of VE constitutive law of one sequence
Inputs: strains with the shape (timesteps, n_features) 2-dim inputs
Outputs: the stress prediction values
"""
stress = []
# loop around the timesteps
for i in range(strain_arr.shape[0]):
inputs = strain_arr[i,:]
inputs=np.reshape(inputs, (1, 1, inputs.shape[0])) # inputs should be in 3-dim
outputs, h_state0, c_state0, h_state1, c_state1 = RNNmodel.pred(inputs, states)
istress = K.get_value(outputs)
stress.append(istress)
states = [h_state0, c_state0, h_state1, c_state1]
return np.asarray(stress)
class myLSTM():
def __init__(self, n_hidden, n_features, n_out, model_file):
self._n_hidden = n_hidden
self._n_features = n_features
self._n_out = n_out
self._model_file = model_file
# self._initial_states = [tf.zeros((1, n_hidden)) for i in range(4)]
# obtain the weights
best_model = load_model(self._model_file)
weights_list = best_model.weights
# for the LSTM cell
self._kernel_0 = K.get_value(weights_list[0])
self._rec_kernel_0 = K.get_value(weights_list[1])
self._bias_rnn_0 = K.get_value(weights_list[2])
self._kernel_1 = K.get_value(weights_list[3])
self._rec_kernel_1 = K.get_value(weights_list[4])
self._bias_rnn_1 = K.get_value(weights_list[5])
self._kernel_dense = K.get_value(weights_list[6])
self._bias_dense = K.get_value(weights_list[7])
# set up 3 layers
self.lstm_layer1 = LSTM(n_hidden, return_sequences=True, return_state=True)
self.lstm_layer2 = LSTM(n_hidden, return_sequences=True, return_state=True)
self.DenseLayer = TimeDistributed(Dense(n_out, input_shape=(None, 1, self._n_hidden)))
# set up the model
inputs = tf.keras.Input(shape=(1, self._n_features))
# set the first LSTM layer
LSTMout0, h_state0, c_state0 = self.lstm_layer1(inputs) # , initial_state=self._initial_states[0:2])
# set the first LSTM layer
LSTMout1, h_state1, c_state1 = self.lstm_layer2(LSTMout0) #, initial_state=self._initial_states[2:])
# get the outputs
outputs = self.DenseLayer(LSTMout1)
# construct the model
self.myRNNet = tf.keras.Model(inputs=inputs, outputs=[outputs, h_state0, c_state0, h_state1, c_state1])
# set up the weights
# self.myRNNet.set_weights(weights_list)
self.myRNNet.layers[1].set_weights([self._kernel_0, \
self._rec_kernel_0, \
self._bias_rnn_0] )
self.myRNNet.layers[2].set_weights([self._kernel_1, \
self._rec_kernel_1, \
self._bias_rnn_1] )
self.myRNNet.layers[3].set_weights([self._kernel_dense, \
self._bias_dense ])
def pred(self, input_strains, states=None):
"""
inputs: the (1,1,6) strain input at a single time step
The model has to be inherite from Model so that it can be called directly.!!!
"""
if states is None:
# set the first LSTM layer
LSTMout0, h_state0, c_state0 = self.lstm_layer1(input_strains)
# set the first LSTM layer
LSTMout1, h_state1, c_state1 = self.lstm_layer2(LSTMout0)
# get the outputs
outputs = self.DenseLayer(LSTMout1)
else:
# set the first LSTM layer
LSTMout0, h_state0, c_state0 = self.lstm_layer1(input_strains, initial_state = states[0:2])
# set the first LSTM layer
LSTMout1, h_state1, c_state1 = self.lstm_layer2(LSTMout0, initial_state = states[2:])
# get the outputs
outputs = self.DenseLayer(LSTMout1)
# # construct the model
# self.myRNNet = tf.keras.Model(inputs=inputs, outputs=[outputs, h_state0, c_state0, h_state1, c_state1])
return outputs, h_state0, c_state0, h_state1, c_state1
|
StarcoderdataPython
|
3595597
|
<reponame>acocuzzo/python-pubsub
# Copyright 2017, Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from enum import Enum
from google.api_core.exceptions import GoogleAPICallError
from typing import Optional
class AcknowledgeStatus(Enum):
SUCCESS = 1
PERMISSION_DENIED = 2
FAILED_PRECONDITION = 3
INVALID_ACK_ID = 4
OTHER = 5
class AcknowledgeError(GoogleAPICallError):
"""Error during ack/modack/nack operation on exactly-once-enabled subscription."""
def __init__(self, error_code: AcknowledgeStatus, info: Optional[str]):
self.error_code = error_code
self.info = info
message = None
if info:
message = str(self.error_code) + " : " + str(self.info)
else:
message = str(self.error_code)
super(AcknowledgeError, self).__init__(message)
__all__ = ("AcknowledgeError",)
|
StarcoderdataPython
|
5050960
|
import math
import scipy
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import metrics
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import cross_val_score, cross_val_predict, train_test_split, GridSearchCV
bike_df = pd.read_csv('data/day.csv')
print(f"Shape of the dataset: {bike_df.shape}")
print()
print("Data types: ")
print(bike_df.dtypes)
print()
print("Data: ")
print(bike_df.head(5))
print()
print("Description: ")
print(bike_df.describe())
print()
bike_df['dteday'] = pd.to_datetime(bike_df.dteday)
bike_df['season'] = bike_df.season.astype('category')
bike_df['yr'] = bike_df.yr.astype('category')
bike_df['mnth'] = bike_df.mnth.astype('category')
bike_df['holiday'] = bike_df.holiday.astype('category')
bike_df['weekday'] = bike_df.weekday.astype('category')
bike_df['workingday'] = bike_df.workingday.astype('category')
bike_df['weathersit'] = bike_df.weathersit.astype('category')
# check missing values
print("Missing Values:")
print(bike_df.isnull().sum())
print()
# Histograms
plt.figure(figsize=(15, 8))
sns.barplot(x='mnth', y='cnt', data=bike_df[[
'mnth', 'cnt', 'season']], hue='season')
plt.title('Season monthly distribution')
plt.savefig('day-results/season.png')
plt.clf()
print("Seasonwise distribution: 'day-results/season.png'")
print()
plt.figure(figsize=(15, 8))
sns.barplot(x='mnth', y='cnt', data=bike_df[[
'mnth', 'cnt', 'weekday']], hue='weekday')
plt.title('Weekday monthly distribution')
plt.savefig('day-results/weekday.png')
plt.clf()
print("Weekday distribution: 'day-results/weekday.png'")
print()
# Violin Plot
plt.figure(figsize=(15, 8))
sns.violinplot(x='yr', y='cnt',
data=bike_df[['yr', 'cnt']])
plt.title('Yearly distribution')
plt.savefig('day-results/year.png')
plt.clf()
print("Yearly distribution: 'day-results/year.png'")
print()
plt.figure(figsize=(15, 8))
sns.barplot(data=bike_df, x='holiday', y='cnt', hue='season')
plt.title('Holiday distribution')
plt.savefig('day-results/holiday.png')
plt.clf()
print("Holiday distribution: 'day-results/holiday.png'")
print()
plt.figure(figsize=(15, 8))
sns.barplot(data=bike_df, x='workingday', y='cnt', hue='season')
plt.title('Workingday wise distribution of counts')
plt.savefig('day-results/workday.png')
plt.clf()
print("Workingday distribution: 'day-results/workday.png'")
print()
# outliers
plt.figure(figsize=(15, 8))
sns.boxplot(data=bike_df[['temp', 'windspeed', 'hum']])
plt.title('Temp_windspeed_humidity_outiers')
plt.savefig('day-results/outliers.png')
plt.clf()
print("Outliers: 'day-results/outliers.png'")
print()
# Replace and impute outliers
wind_hum = pd.DataFrame(bike_df, columns=['windspeed', 'hum'])
cnames = ['windspeed', 'hum']
for i in cnames:
q75, q25 = np.percentile(wind_hum.loc[:, i], [75, 25])
iqr = q75 - q25
min = q25 - (iqr * 1.5)
max = q75 + (iqr * 1.5)
wind_hum.loc[wind_hum.loc[:, i] < min, :i] = np.nan
wind_hum.loc[wind_hum.loc[:, i] > max, :i] = np.nan
wind_hum['windspeed'] = wind_hum['windspeed'].fillna(
wind_hum['windspeed'].mean())
wind_hum['hum'] = wind_hum['hum'].fillna(wind_hum['hum'].mean())
bike_df['windspeed'] = bike_df['windspeed'].replace(wind_hum['windspeed'])
bike_df['hum'] = bike_df['hum'].replace(wind_hum['hum'])
print("Imputed data: ")
print(bike_df.head(5))
print()
# Normal plot
plt.figure(figsize=(15, 8))
stats.probplot(bike_df.cnt.tolist(), dist='norm', plot=plt)
plt.savefig('day-results/normal.png')
plt.clf()
print("Normal Plot: 'day-results/normal.png'")
print()
# Correlation Matrix
# Create the correlation matrix
correMtr = bike_df.corr()
fig = sns.heatmap(correMtr, annot=True, square=True)
fig = fig.get_figure()
fig.savefig('day-results/correlation.png')
plt.clf()
print("Correlation Matrix: 'day-results/correlation.png'")
print()
# Modelling the dataset
X_train, X_test, y_train, y_test = train_test_split(
bike_df.iloc[:, 0:-3], bike_df.iloc[:, -1], test_size=0.3, random_state=43)
X_train.reset_index(inplace=True)
y_train = y_train.reset_index()
X_test.reset_index(inplace=True)
y_test = y_test.reset_index()
print(f"Training data shape: {X_train.shape}, {y_train.shape}")
print()
print(f"Testing data shape: {X_test.shape}, {y_test.shape}")
print()
print(f"Training data: \n{y_train.head()}")
print()
print(f"Testing data: \n{y_test.head()}")
print()
train_attributes = X_train[['season', 'mnth', 'yr', 'weekday', 'holiday',
'workingday', 'weathersit', 'hum', 'temp', 'windspeed']]
test_attributes = X_test[['season', 'mnth', 'yr', 'weekday', 'holiday',
'workingday', 'hum', 'temp', 'windspeed', 'weathersit']]
cat_attributes = ['season', 'holiday',
'workingday', 'weathersit', 'yr']
num_attributes = ['temp', 'windspeed', 'hum', 'mnth', 'weekday']
train_encoded_attributes = pd.get_dummies(
train_attributes, columns=cat_attributes)
print('Shape of training data: ', train_encoded_attributes.shape)
print()
print(train_encoded_attributes.head())
print()
X_train = train_encoded_attributes
y_train = y_train.cnt.values
# print("Performing GridSearch...")
# print()
# regressor = RandomForestRegressor()
# parameters = [{'n_estimators': [150, 200, 250, 300],
# 'max_features': ['auto', 'sqrt', 'log2']}]
# grid_search = GridSearchCV(
# estimator=regressor, param_grid=parameters, n_jobs=-1)
# grid_search = grid_search.fit(X_train, y_train)
# best_parameters = grid_search.best_params_
# print(best_parameters)
# print()
test_encoded_attributes = pd.get_dummies(
test_attributes, columns=cat_attributes)
print('Shape test data: ', test_encoded_attributes.shape)
print()
print(test_encoded_attributes.head())
print()
X_test = test_encoded_attributes
y_test = y_test.cnt.values
regressor = RandomForestRegressor(n_estimators=150)
regressor.fit(X_train, y_train)
r_score = regressor.score(X_test, y_test)
print("Accuracy of the model: ", r_score)
print()
y_pred = regressor.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print(f"Mean Absolute Error: {mae}")
print()
rmse = math.sqrt(mean_squared_error(y_test, y_pred))
print(f"Root Mean Squared Error: {rmse}")
print()
feature_importance = regressor.feature_importances_
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.figure(figsize=(12, 10))
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, X_train.columns[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.savefig('day-results/features.png')
plt.clf()
print(f"Important features saved: 'day-results/features.png'")
print()
plt.plot([obj for obj in y_test[:150]], color='b', label='Actual')
plt.plot(y_pred[:150], color='r', label='Predicted')
plt.xlabel('Values')
plt.ylabel('Count')
plt.legend()
plt.title('Actual Count vs Predicted Count')
plt.savefig('day-results/prediction.png')
plt.clf()
print(f"Actual vs Prediction Results saved: 'day-results/prediction.png'")
print()
with open('day-results/output.txt', 'w') as file:
file.write("Predictions vs Actual: \n\n")
file.write(" Prediction: Actual:\n")
i = 0
for obj in y_test:
file.write(" {0:15} {1}\n".format(y_pred[i], obj))
i += 1
file.write("\n")
print("Text format for prediction saved: 'day-results/output.txt")
print()
|
StarcoderdataPython
|
6497854
|
import torch
import torch.nn as nn
from torchdiffeq import odeint_adjoint as odeint
from .wrappers.cnf_regularization import RegularizedODEfunc
__all__ = ["CNF"]
class CNF(nn.Module):
def __init__(self, odefunc, T=1.0, train_T=False, regularization_fns=None, solver='dopri5', atol=1e-5, rtol=1e-5):
super(CNF, self).__init__()
if train_T:
self.register_parameter("sqrt_end_time", nn.Parameter(torch.sqrt(torch.tensor(T))))
else:
self.register_buffer("sqrt_end_time", torch.sqrt(torch.tensor(T)))
nreg = 0
if regularization_fns is not None:
odefunc = RegularizedODEfunc(odefunc, regularization_fns)
nreg = len(regularization_fns)
self.odefunc = odefunc
self.nreg = nreg
self.regularization_states = None
self.solver = solver
self.atol = atol
self.rtol = rtol
self.test_solver = solver
self.test_atol = atol
self.test_rtol = rtol
self.solver_options = {}
def forward(self, z, logpz=None, integration_times=None, reverse=False):
if logpz is None:
_logpz = torch.zeros(z.shape[0], 1).to(z)
else:
_logpz = logpz
if integration_times is None:
integration_times = torch.tensor([0.0, self.sqrt_end_time * self.sqrt_end_time]).to(z)
if reverse:
integration_times = _flip(integration_times, 0)
# Refresh the odefunc statistics.
self.odefunc.before_odeint()
# Add regularization states.
reg_states = tuple(torch.tensor(0).to(z) for _ in range(self.nreg))
if self.training:
state_t = odeint(
self.odefunc,
(z, _logpz) + reg_states,
integration_times.to(z),
atol=self.atol,
rtol=self.rtol,
method=self.solver,
options=self.solver_options,
)
else:
state_t = odeint(
self.odefunc,
(z, _logpz),
integration_times.to(z),
atol=self.test_atol,
rtol=self.test_rtol,
method=self.test_solver,
)
if len(integration_times) == 2:
state_t = tuple(s[1] for s in state_t)
z_t, logpz_t = state_t[:2]
self.regularization_states = state_t[2:]
if logpz is not None:
return z_t, logpz_t
else:
return z_t
def get_regularization_states(self):
reg_states = self.regularization_states
self.regularization_states = None
return reg_states
def num_evals(self):
return self.odefunc._num_evals.item()
def _flip(x, dim):
indices = [slice(None)] * x.dim()
indices[dim] = torch.arange(x.size(dim) - 1, -1, -1, dtype=torch.long, device=x.device)
return x[tuple(indices)]
|
StarcoderdataPython
|
3562406
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import Dict, Optional, Union
import numpy as np
import pandas as pd
import torch
from torch.distributions import Distribution
from gluonts.model.forecast import Forecast, Quantile, SampleForecast
class DistributionForecast(Forecast):
"""
A `Forecast` object that uses a distribution directly.
This can for instance be used to represent marginal probability
distributions for each time point -- although joint distributions are
also possible, e.g. when using MultiVariateGaussian).
Parameters
----------
distribution
Distribution object. This should represent the entire prediction
length, i.e., if we draw `num_samples` samples from the distribution,
the sample shape should be
samples = trans_dist.sample(num_samples)
samples.shape -> (num_samples, prediction_length)
start_date
start of the forecast
freq
forecast frequency
info
additional information that the forecaster may provide e.g. estimated
parameters, number of iterations ran etc.
"""
def __init__(
self,
distribution: Distribution,
start_date: pd.Timestamp,
freq: str,
item_id: Optional[str] = None,
info: Optional[Dict] = None,
) -> None:
self.distribution = distribution
self.shape = distribution.batch_shape + distribution.event_shape
self.prediction_length = self.shape[0]
self.item_id = item_id
self.info = info
assert isinstance(
start_date, pd.Timestamp
), "start_date should be a pandas Timestamp object"
self.start_date = start_date
assert isinstance(freq, str), "freq should be a string"
self.freq = freq
self._mean = None
@property
def mean(self) -> np.ndarray:
"""
Forecast mean.
"""
if self._mean is not None:
return self._mean
else:
self._mean = self.distribution.mean.cpu().numpy()
return self._mean
@property
def mean_ts(self) -> pd.Series:
"""
Forecast mean, as a pandas.Series object.
"""
return pd.Series(data=self.mean, index=self.index)
def quantile(self, level: Union[float, str]) -> np.ndarray:
level = Quantile.parse(level).value
return self.distribution.icdf(torch.tensor([level])).cpu().numpy()
def to_sample_forecast(self, num_samples: int = 200) -> SampleForecast:
return SampleForecast(
samples=self.distribution.sample((num_samples,)).cpu().numpy(),
start_date=self.start_date,
freq=self.freq,
item_id=self.item_id,
info=self.info,
)
|
StarcoderdataPython
|
12841084
|
import torch
from torch import Tensor
from torch.nn.utils.rnn import pad_sequence
from typing import List, Optional
__all__ = [
'to_tensor',
'truncate',
'add_token',
]
def to_tensor(input: List[List[int]], padding_value: Optional[int] = None) -> Tensor:
if padding_value is None:
output = torch.tensor(input, dtype=torch.long)
return output
else:
output = pad_sequence(
[torch.tensor(ids, dtype=torch.long) for ids in input],
batch_first=True,
padding_value=float(padding_value)
)
return output
def truncate(input: List[List[int]], max_seq_len: int) -> List[List[int]]:
output: List[List[int]] = []
for ids in input:
output.append(ids[:max_seq_len])
return output
def add_token(input: List[List[int]], token_id: int, begin: bool = True) -> List[List[int]]:
output: List[List[int]] = []
if begin:
for ids in input:
output.append([token_id] + ids)
else:
for ids in input:
output.append(ids + [token_id])
return output
|
StarcoderdataPython
|
6447173
|
<filename>src/LP/solver/solution.py
"""
<NAME> - March 2021
Solution analyzer of the optimized model
"""
# Reads instances from the solver and creates plots
# you can check the example that used networkx. although it is not very helpful
import matplotlib.pyplot as plt
import networkx as nx
def print_solution_x(x):
pass
def print_solution_y(y):
pass
def print_solution_r(y, x, node_pos, L, R):
G = nx.DiGraph()
list_nodes = list(range(1, L + 1))
G.add_nodes_from(list_nodes)
for r in R:
for i, j in enumerate(y) :
G.add_edge(i, j)
# Adding the position attribute to each node
# node_pos = {1: (0, 0), 2: (2, 2), 3: (2, -2), 4: (5, 2), 5: (5, -2), 6: (7, 0)}
# Create a list of edges in shortest path
red_edges = [(i, j) for i, j in y if x[i, j].x > 0]
# Create a list of nodes in shortest path
for r in R:
# If the node is in the shortest path, set it to red, else set it to white color
node_col = ['white' if not node in x[r] else 'red' for node in G.nodes()]
# If the edge is in the shortest path set it to red, else set it to white color
edge_col = ['black' if not edge in red_edges else 'red' for edge in G.edges()]
# Draw the nodes
nx.draw_networkx(G, node_pos, node_color=node_col, node_size=450)
# Draw the node labels
# nx.draw_networkx_labels(G1, node_pos,node_color= node_col)
# Draw the edges
nx.draw_networkx_edges(G, node_pos, edge_color=edge_col)
# Draw the edge labels
nx.draw_networkx_edge_labels(G, node_pos, edge_color=edge_col, edge_labels=cost)
# Remove the axis
plt.axis('off')
# TODO: Add description of the plot
# TODO: Save the plot
# Show the plot
plt.show()
|
StarcoderdataPython
|
377622
|
"""
Holds all global app variables.
"""
SUPPLIER_DEFAULT_INVENTORY_INTERVAL = 86400
THK_VERSION_NUMBER = "2.0.0"
THK_VERSION_NAME = "Arrakis"
THK_CYCLE_PID = "ThunderhawkCycle"
THK_CYCLE_LAST_POSITION = "thunderhawk cycle last position"
"""
Collections.
"""
MONGO_USERS_COLLECTION = "Users"
MONGO_SUPPLIER_REGISTER_COLLECTION = "SupplierRegister"
MONGO_SUPPLIER_UNAVAILABLE_PRODUCT_COLLECTION = "UnavailableSupplierProducts"
MONGO_SUPPLIER_PRODUCT_GROUPING_COLLECTION = "SupplierProductGroups"
MONGO_SUPPLIER_HISTORICAL_PRODUCT_COLLECTION = "SupplierHistoricalProducts"
MONGO_SUPPLIER_PRODUCT_COLLECTION = "SupplierProducts"
MONGO_SUPPLIER_PRODUCT_IMAGE_COLLECTION = "SupplierProductImages"
MONGO_SUPPLIER_PRODUCT_METAFIELDS_COLLECTION = "SupplierProductMetafields"
MONGO_SUPPLIER_PRODUCT_METAFIELD_CONFIG_COLLECTION = "SupplierProductMetafieldConfig"
MONGO_MASTER_PRODUCT_COLLECTION = "MasterProductData"
MONGO_TAGS_TAG_FILTER_COLLECTION = "TagFilters"
MONGO_MASTER_PRODUCT_OVERRIDES_COLLECTION = "MasterProductDataOverrides"
MONGO_MASTER_PRODUCT_TAG_GROUPING_COLLECTION = "MasterProductTagGrouping"
MONGO_MASTER_PRODUCT_TRANSPOSITIONS_COLLECTION = "MasterProductTranspositions"
MONGO_MASTER_PRODUCT_BUILD_QUEUE_COLLECTION = "MasterProductBuildQueue"
MONGO_REDIS_QUEUE_COLLECTION = "RedisQueueJobs"
MONGO_THK_LOG_COLLECTION = "Log"
MONGO_THK_REPORTS_COLLECTION = "Reports"
MONGO_THK_SETTINGS_COLLECTION = "Settings"
MONGO_THK_GOOGLEADS_COLLECTION = "GoogleAds"
"""
PATHS
"""
THK_DATA_DIR = "data"
THK_FEEDS_DIR = "feeds"
# Config file paths
THK_CONFIG_DIR = "config"
THK_CONFIG__FEEDS_DIR = "{}/{}".format(THK_CONFIG_DIR, "feeds")
THK_CONFIG__SUPPLIERS_DIR = "{}/{}".format(THK_CONFIG_DIR, "suppliers")
THK_CONFIG__TAGS_DIR = "{}/{}".format(THK_CONFIG_DIR, "tags")
THK_CONFIG__ADVERTISING_DIR = "{}/{}".format(THK_CONFIG_DIR, "advertising")
THK_CONFIG__ADWORDS_DIR = "{}/{}".format(THK_CONFIG__ADVERTISING_DIR, "adwords")
THK_CONFIG__ADWORDS_CAMPAIGN_DIR = "{}/{}".format(THK_CONFIG__ADWORDS_DIR, "campaign")
"""
Global field/collection field names and tags.
"""
THK_ACTIVE = "active"
THK_STAGED = "staged"
THK_SUPPLIER = "supplier"
THK_TIMESTAMP = "timestamp"
THK_PRODUCT = "product"
THK_PRODUCT_AVAILABLE = "available"
THK_UNIQUE_FIELD = "uniqueField"
THK_NEW_PRODUCT = "new"
THK_REPORT = "report"
THK_FIELDS = "fields"
THK_METAFIELDS = "metafields"
THK_THUMBNAIL = "thumbnail"
THK_ALT_TEXT = "altText"
THK_ALT = "alt"
THK_ATTACHMENT = "attachment"
THK_HISTORICAL_INVENTORY_LEVEL = "inventory"
THK_HISTORICAL_INVENTORY_DIFFERENCE = "difference"
THK_FILENAME = "filename"
THK_FILESIZE = "filesize"
THK_METAFIELD_LABEL = "label"
THK_METAFIELD_VALUE = "value"
THK_DEFAULT_IMAGE_DIR = "static/images"
THK_DEFAULT_UNIQUE_FIELD = "SKU"
THK_METAFIELD_MAPPING = "mapping"
THK_TRANSPOSITION = "transposition"
THK_METAFIELD_ACTIVE = "active"
THK_METAFIELD_STORE_WEIGHT = "weight"
THK_SUPPLIER_PRODUCT_OBJECT_ID = "supplierProductId"
THK_MASTER_PRODUCT_OBJECT_ID = "masterProductId"
"""
Variants
"""
THK_VARIANTS = "variants"
THK_VARIATION_GROUP = "group"
THK_VARIATION_PRODUCTS = "products"
THK_VARIATION_ATTRIBUTES__LABEL = "label"
THK_VARIATION_ATTRIBUTES__FIELD = "field"
THK_VARIATION_ATTRIBUTES__APPEND = "append"
THK_VARIATION_ATTRIBUTES__FORMAT = "format"
THK_VARIATION_ATTRIBUTES__METHOD = "method"
"""
Images
"""
THK_IMAGES = "images"
THK_IMAGES__FILENAME = "filename"
THK_IMAGES__URL = "url"
THK_IMAGES__ALT = "alt"
"""
Tag Filters
"""
THK_TAG_FILTERS__NAME = "name"
THK_TAG_FILTERS__MAPPING = "mapping"
THK_TAG_FILTERS__SUPPLIER = "supplier"
"""
Settings
"""
THK_SETTINGS_GROUP = "group"
THK_SETTINGS_DATA = "data"
"""
Settings Groups
"""
THK_SETTINGS_GROUP__THK = "thunderhawk"
"""
Supplier Register Variables
"""
THK_SUPPLIER_REGISTER__STAGING_QUEUE = "staging_queue"
THK_SUPPLIER_REGISTER__PURGE_QUEUE = "purge_queue"
"""
Logging
"""
THK_LOG_MAX_ENTRIES = 10000
THK_LOG_CATEGORY = "category"
THK_LOG_CATEGORY__SYSTEM = "system"
THK_LOG_MESSAGE = "message"
THK_LOG_STATUS_CODE = "code"
THK_LOG_STATUS__INFO = "info"
THK_LOG_STATUS__STATUS = "status"
THK_LOG_STATUS__NOTICE = "notice"
THK_LOG_STATUS__WARNING = "warning"
THK_LOG_STATUS__ERROR = "error"
THK_LOG_STATUS_CODES = [
THK_LOG_STATUS__INFO,
THK_LOG_STATUS__STATUS,
THK_LOG_STATUS__NOTICE,
THK_LOG_STATUS__ERROR,
THK_LOG_STATUS__WARNING,
]
"""
Supplier Purge Options
"""
THK_SUPPLIER_PURGE_OPTION__ALL = "All"
THK_SUPPLIER_PURGE_OPTION__CURRENT = "Current Product Data"
THK_SUPPLIER_PURGE_OPTION__HISTORICAL = "Historical Product Data"
THK_SUPPLIER_PURGE_OPTION__IMAGES = "Image Data"
THK_SUPPLIER_PURGE_OPTION__MASTER_PRODUCTS = "Master Product Data"
THK_SUPPLIER_PURGE_OPTION__MASTER_PRODUCT_OVERRIDES = "Master Product Override Data"
THK_SUPPLIER_PURGE_OPTION__MASTER_PRODUCT_QUEUE = "Master Product Queue Data"
THK_SUPPLIER_PURGE_OPTION__METAFIELD_CONFIG = "Metafield Config Data"
THK_SUPPLIER_PURGE_OPTION__METAFIELDS = "Metafield Data"
THK_SUPPLIER_PURGE_OPTION__GROUPING = "Product Grouping Data"
THK_SUPPLIER_PURGE_OPTION__UNAVAILABLE = "Unavailable Product Data"
THK_SUPPLIER_PURGE_OPTIONS = (
THK_SUPPLIER_PURGE_OPTION__ALL,
THK_SUPPLIER_PURGE_OPTION__MASTER_PRODUCTS,
THK_SUPPLIER_PURGE_OPTION__MASTER_PRODUCT_OVERRIDES,
THK_SUPPLIER_PURGE_OPTION__MASTER_PRODUCT_QUEUE,
THK_SUPPLIER_PURGE_OPTION__CURRENT,
THK_SUPPLIER_PURGE_OPTION__HISTORICAL,
THK_SUPPLIER_PURGE_OPTION__METAFIELD_CONFIG,
THK_SUPPLIER_PURGE_OPTION__METAFIELDS,
THK_SUPPLIER_PURGE_OPTION__IMAGES,
THK_SUPPLIER_PURGE_OPTION__GROUPING,
THK_SUPPLIER_PURGE_OPTION__UNAVAILABLE,
)
"""
Possible Queue Actions
"""
THK_QUEUE_ACTIONS = "actions"
THK_QUEUE_ACTION__REBUILD = "rebuild"
THK_QUEUE_ACTION__DELETE = "delete"
THK_QUEUE_AVAILABLE_ACTIONS = [
THK_QUEUE_ACTION__REBUILD,
THK_QUEUE_ACTION__DELETE,
]
"""
Redis Queue Job Store Fields.
"""
REDIS_QUEUE__JOB_NAME = "name"
REDIS_QUEUE__JOB_ID = "jobId"
REDIS_QUEUE__DATA = "data"
REDIS_QUEUE__TIMESTAMP = "timestamp"
"""
Redis Queue Job Names
"""
REDIS_QUEUE_JOB__CACHE_ADWORDS_CAMPAIGNS = "CacheAdWordsCampaigns"
REDIS_QUEUE_JOB__CACHE_ADWORDS_ADGROUPS = "CacheAdWordsAdGroups"
REDIS_QUEUE_JOB__GENERATE_ADWORDS_BID_STRUCTURE = "GenerateAdWordsBidStructure"
REDIS_QUEUE_JOB__REBUILD_INVALID_VARIANT_REPORT = "RebuildInvalidVariantReport"
REDIS_QUEUE_JOB__SYNC_FILTERED_TRANSPOSITION_PRODUCTS = "SyncFilteredTranspositionProducts"
"""
Redis Queue Job Settings
"""
REDIS_QUEUE_JOB__JOB_TIMEOUT__DEFAULT = "10m"
REDIS_QUEUE_JOB__JOB_TIMEOUT__REPORTS_DEFAULT = "1h"
REDIS_QUEUE_JOB__JOB_TIMEOUT__SYNC_FILTERED_TRANSPOSITION_PRODUCTS = "1h"
"""
Advertising Variables
"""
ADWORDS_CLIENT_VERSION = "v201809"
ADWORDS_USER_AGENT = "Thunderhawk"
ADWORDS_TYPE__CAMPAIGN = "campaign"
ADWORDS_TYPE__ADGROUP = "ad group"
ADWORDS_TYPE__PRODUCT_PARTITION = "product partition"
ADWORDS_MICRO_AMOUNT_VALUE = 1000000
"""
Reports
"""
REPORT__INVALID_VARIANT_REPORT = "InvalidVariantReport"
REPORT__INVALID_VARIANT_REPORT__MISSING_ATTRIBUTES = "missing attributes"
REPORT__INVALID_VARIANT_REPORT__DUPLICATE_ATTRIBUTES = "duplicate attributes"
"""
Thumbnail settings, these are used to display
smaller versions of images for optimization
purposed in the web ui.
"""
# The web dir should be the location a static
# link can be made from.
THK_THUMBNAILS_DIR = "web/static/assets/thumbs"
THK_THUMBNAILS_WEB_DIR = "assets/thumbs"
THK_THUMBNAILS_SIZE = (250, 250)
"""
Supplier Tag Map Globals
"""
SUPPLIER_TAG_MAP__METHOD = "method"
SUPPLIER_TAG_MAP__FIELD = "field"
SUPPLIER_TAG_MAP__FILTER = "filter"
SUPPLIER_TAG_MAP__DELIMITER = "delimiter"
SUPPLIER_TAG_MAP__VALUE = "value"
# Tag Map Methods
SUPPLIER_TAG_MAP__METHOD_SPLIT = "split"
SUPPLIER_TAG_MAP__METHOD_VALUE = "value"
SUPPLIER_TAG_MAP__METHOD_FILTER = "filter"
"""
Supplier products should be mapped to these fields name,
when the transposer runs through, it will map any fields
that have been mapped to these and assume these as their
value.
Metafields can be mapped when transposing in the supplier config
file using $Metafields followed by a colon and then the metafield key i.e.
$Metafields:_description will map the description from the metafields
for a product if it has it available.
"""
MASTER_PRODUCT_FIELD__IDENTIFIER = "sku"
MASTER_PRODUCT_FIELD__ATTRIBUTES = "attributes"
MASTER_PRODUCT_FIELD__TYPE = "type"
MASTER_PRODUCT_FIELD__TITLE = "title"
MASTER_PRODUCT_FIELD__BODY = "body"
MASTER_PRODUCT_FIELD__BARCODE = "barcode"
MASTER_PRODUCT_FIELD__VENDOR = "vendor" # The Supplier Id
MASTER_PRODUCT_FIELD__VENDOR_NAME = "vendor name" # The Supplier friendly name.
MASTER_PRODUCT_FIELD__COLLECTION = "collection"
MASTER_PRODUCT_FIELD__PRICE = "price"
MASTER_PRODUCT_FIELD__COST = "cost"
MASTER_PRODUCT_FIELD__STOCKQTY = "stockQty"
MASTER_PRODUCT_FIELD__TAGS = "tags"
MASTER_PRODUCT_FIELD__WEIGHT = "weight"
MASTER_PRODUCT_FIELD__SEO_TITLE = "seo title"
MASTER_PRODUCT_FIELD__SEO_DESCRIPTION = "seo description"
# Metafields aren't added to the master product fields
# because we map them during the transposition, not
# from a config level.
MASTER_PRODUCT_FIELD__METAFIELDS = "metafields"
MASTER_PRODUCT_FIELDS = (
MASTER_PRODUCT_FIELD__IDENTIFIER, # The SKU of the product, should map to the suppliers unique field.
MASTER_PRODUCT_FIELD__TAGS, # The tags for the product.
MASTER_PRODUCT_FIELD__TITLE, # The product display title.
MASTER_PRODUCT_FIELD__TYPE, # The product type i.e. Pendulum.
MASTER_PRODUCT_FIELD__BODY, # Product description/body text containing HTML.
MASTER_PRODUCT_FIELD__BARCODE, # Product barcode (ISBN, UPC, GTIN, etc.)
MASTER_PRODUCT_FIELD__VENDOR, # The product vendor or supplier name.
MASTER_PRODUCT_FIELD__COLLECTION, # The suppliers collection, separated by commas.
MASTER_PRODUCT_FIELD__PRICE, # The supplier product cost with applied markup.
MASTER_PRODUCT_FIELD__COST, # The supplier product cost.
MASTER_PRODUCT_FIELD__STOCKQTY, # The stock level for the supplier product.
MASTER_PRODUCT_FIELD__WEIGHT, # The weight for the supplier product.
MASTER_PRODUCT_FIELD__SEO_TITLE, # The seo title for the supplier product.
MASTER_PRODUCT_FIELD__SEO_DESCRIPTION, # The seo description for the supplier product.
MASTER_PRODUCT_FIELD__ATTRIBUTES, # The product attributes for variants i.e. color, size.
)
# Master Product fields that cannot be edited.
MASTER_PRODUCT_DISALLOWED_EDIT_FIELDS = (
MASTER_PRODUCT_FIELD__IDENTIFIER, # The SKU of the product, should map to the suppliers unique field.
MASTER_PRODUCT_FIELD__VENDOR, # The product vendor or supplier name.
)
|
StarcoderdataPython
|
11246135
|
"""
BACON = Building Autama's Core Overall Nature
This file contains a class to handle generating an Autama's personality.
"""
from itertools import chain
from random import choice, randint, seed
from Nucleus.utils import get_dataset
from Nucleus.tools import read_pickle
class Bacon:
def __init__(self):
self.__nucleus = read_pickle("nucleus.pickle")
self.__args = self.__nucleus.get_args()
self.__tokenizer = self.__nucleus.get_tokenizer()
self.__required_amount = 6 # The required amount of traits per personality
# A method to generate a random personality with exactly the required amount of traits
def generate_full_personality(self):
personality = self.__generate_personality()
return self.__fill_up(personality)
# A method to generate a personality with half of a new user's personality and half random
def make_hybrid_freak(self, user_personality: list):
personality = self.__format_personality(user_personality)
personality = self.__remove_name_trait(personality)
personality = self.__choose_some(personality)
return self.__fill_up(personality)
# A method to check and remove name traits from user's personality when copying the personality to a new Autama
def check_personality(self, user_personality: list):
personality = self.__format_personality(user_personality)
personality = self.__remove_name_trait(personality)
return self.__fill_up(personality)
# A method to generate a random personality. It returns a list of personality trait strings.
def __generate_personality(self):
encoded_personality = self.__generate_encoded_personality()
decoded_personality = self.__decode_personality(encoded_personality)
return self.__remove_name_trait(decoded_personality)
# A method for generating one encoded personality
# https://github.com/huggingface/transfer-learning-conv-ai/blob/master/interact.py
def __generate_encoded_personality(self):
dataset = get_dataset(self.__tokenizer, self.__args.dataset_path, self.__args.dataset_cache)
personalities = [dialog["personality"] for dataset in dataset.values() for dialog in dataset]
personality = choice(personalities)
return personality
# A method for decoding an encoded personality
def __decode_personality(self, encoded_personality: list):
encoded_personality_string = self.__tokenizer.decode(chain(*encoded_personality))
decoded_personality_list = [i + "." for i in encoded_personality_string.split(".")]
decoded_personality_list.pop()
return decoded_personality_list
# A method that takes an existing personality and fills it up until it has the required amount of traits
def __fill_up(self, personality: list):
required = self.__required_amount
copy_personality = personality[:]
amount = len(copy_personality)
# Loop until personality has the required amount or more
while amount < required:
new_personality = self.__generate_personality()
new_amount = len(new_personality)
# Determine if new personality has required amount
if new_amount == required:
copy_personality = new_personality
amount = new_amount
else:
copy_personality = copy_personality + new_personality
amount = len(copy_personality)
difference = amount - required
# Loop until personality is equal to required amount
for i in range(difference):
copy_personality.pop()
amount = amount - 1
return copy_personality
# A method that chooses some of the user's traits
def __choose_some(self, user_personality: list):
personality = user_personality[:]
amount = len(personality)
half = int(amount / 2)
seed()
for i in range(half):
amount = amount - 1
index = randint(0, amount)
personality.pop(index)
return personality
# A method to format a user's personality for copying
def __format_personality(self, user_personality: list):
personality = user_personality[:]
personality = [trait.lower() for trait in personality]
return personality
# A method to check and remove Convai generated names (any traits with "my name is")
def __remove_name_trait(self, personality: list):
revised_personality = []
# Loop and keep traits that do not contain "my name is"
for trait in personality:
if "my name is" not in trait:
revised_personality.append(trait)
return revised_personality
|
StarcoderdataPython
|
74739
|
from fastapi import FastAPI
from models import User, db
app = FastAPI()
db.init_app(app)
@app.get("/")
async def root():
# count number of users in DB
return {"hello": "Hello!"}
@app.get("/users")
async def users():
# count number of users in DB
return {"count_users": await db.func.count(User.id).gino.scalar()}
|
StarcoderdataPython
|
1789099
|
from pacman.model.routing_tables.multicast_routing_table import \
MulticastRoutingTable
from pacman.model.routing_tables.multicast_routing_tables import \
MulticastRoutingTables
from spinn_machine.multicast_routing_entry import MulticastRoutingEntry
from spinn_machine.utilities.progress_bar import ProgressBar
MAX_KEYS_SUPPORTED = 2048
MASK = 0xFFFFF800
class BasicRoutingTableGenerator(object):
""" An basic algorithm that can produce routing tables
"""
def __call__(
self, routing_infos, routing_table_by_partitions,
machine):
"""
:param routing_infos:
:param routing_table_by_partitions:
:param machine:
:return:
"""
progress_bar = ProgressBar(
len(list(machine.chips)), "Generating routing tables")
routing_tables = MulticastRoutingTables()
for chip in machine.chips:
partitions_in_table = routing_table_by_partitions.\
get_entries_for_router(chip.x, chip.y)
if len(partitions_in_table) != 0:
routing_table = MulticastRoutingTable(chip.x, chip.y)
for partition in partitions_in_table:
keys_and_masks = routing_infos.\
get_keys_and_masks_from_partition(partition)
entry = partitions_in_table[partition]
for key_and_mask in keys_and_masks:
multicast_routing_entry = MulticastRoutingEntry(
routing_entry_key=key_and_mask.key_combo,
defaultable=entry.defaultable,
mask=key_and_mask.mask,
link_ids=entry.out_going_links,
processor_ids=entry.out_going_processors)
routing_table.add_mutlicast_routing_entry(
multicast_routing_entry)
routing_tables.add_routing_table(routing_table)
progress_bar.update()
progress_bar.end()
return {"router_tables": routing_tables}
|
StarcoderdataPython
|
1815925
|
from typing import List
import attr
from . import helpers
from .action_test_summary_identifiable_object import (
ActionTestSummaryIdentifiableObject,
)
@attr.s
class ActionTestSummaryGroup(ActionTestSummaryIdentifiableObject):
subtests: List[ActionTestSummaryIdentifiableObject] = attr.ib()
@classmethod
def from_report(cls, report: dict):
if report["_type"]["_name"] != "ActionTestSummaryGroup":
raise ValueError("type error")
return cls(
cls.convert_name_field(report),
cls.convert_identifier_field(report),
helpers.list_from_report(
ActionTestSummaryIdentifiableObject,
report.get("subtests"),
dict(default=[]),
),
)
helpers.registry_subtype(
ActionTestSummaryGroup, ActionTestSummaryIdentifiableObject
)
|
StarcoderdataPython
|
1718927
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2022 Osyris contributors (https://github.com/osyris-project/osyris)
from common import arrayclose, arraytrue, arrayequal
from osyris import Array, units
from copy import copy, deepcopy
import numpy as np
from pint.errors import DimensionalityError
import pytest
def test_constructor_ndarray():
a = np.arange(100.)
array = Array(values=a, unit='m')
assert array.unit == units('m')
assert len(array) == len(a)
assert array.shape == a.shape
assert np.array_equal(array.values, a)
def test_constructor_list():
alist = [1., 2., 3., 4., 5.]
array = Array(values=alist, unit='s')
assert array.unit == units('s')
assert np.array_equal(array.values, alist)
def test_constructor_int():
num = 15
array = Array(values=num, unit='m')
assert array.unit == units('m')
assert np.array_equal(array.values, np.array(num))
def test_constructor_float():
num = 154.77
array = Array(values=num, unit='m')
assert array.unit == units('m')
assert np.array_equal(array.values, np.array(num))
def test_constructor_quantity():
q = 6.7 * units('K')
array = Array(values=q)
assert array.unit == units('K')
assert np.array_equal(array.values, np.array(q.magnitude))
def test_bad_constructor_quantity_with_unit():
q = 6.7 * units('K')
with pytest.raises(ValueError):
_ = Array(values=q, unit='s')
def test_constructor_masked_array():
a = np.arange(5.)
b = np.ma.masked_where(a > 2, a)
array = Array(values=b, unit='m')
assert array.unit == units('m')
assert len(array) == len(b)
assert array.shape == b.shape
assert np.array_equal(array.values, b)
assert np.array_equal(array.values.mask, [False, False, False, True, True])
def test_addition():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = Array(values=[6., 7., 8., 9., 10.], unit='m')
expected = Array(values=[7., 9., 11., 13., 15.], unit='m')
assert arrayclose(a + b, expected)
def test_addition_conversion():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = Array(values=[6., 7., 8., 9., 10.], unit='cm')
expected = Array(values=[1.06, 2.07, 3.08, 4.09, 5.1], unit='m')
assert arrayclose(a + b, expected)
def test_addition_bad_units():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = Array(values=[6., 7., 8., 9., 10.], unit='s')
with pytest.raises(DimensionalityError):
_ = a + b
with pytest.raises(TypeError):
_ = a + 3.0
def test_addition_quantity():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = 3.5 * units('m')
expected = Array(values=[4.5, 5.5, 6.5, 7.5, 8.5], unit='m')
assert arrayclose(a + b, expected)
def test_addition_inplace():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = Array(values=[6., 7., 8., 9., 10.], unit='m')
expected = Array(values=[7., 9., 11., 13., 15.], unit='m')
a += b
assert arrayclose(a, expected)
def test_addition_quantity_inplace():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = 3.5 * units('m')
expected = Array(values=[4.5, 5.5, 6.5, 7.5, 8.5], unit='m')
a += b
assert arrayclose(a, expected)
def test_subtraction():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = Array(values=[6., 7., 8., 9., 10.], unit='m')
expected = Array(values=[5., 5., 5., 5., 5.], unit='m')
assert arrayclose(b - a, expected)
def test_subtraction_bad_units():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = Array(values=[6., 7., 8., 9., 10.], unit='s')
with pytest.raises(DimensionalityError):
_ = a - b
with pytest.raises(TypeError):
_ = a - 3.0
def test_subtraction_quantity():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = 3.5 * units('m')
expected = Array(values=[-2.5, -1.5, -0.5, 0.5, 1.5], unit='m')
assert arrayclose(a - b, expected)
def test_subtraction_inplace():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = Array(values=[6., 7., 8., 9., 10.], unit='m')
expected = Array(values=[5., 5., 5., 5., 5.], unit='m')
b -= a
assert arrayclose(b, expected)
def test_subtraction_quantity_inplace():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = 3.5 * units('m')
expected = Array(values=[-2.5, -1.5, -0.5, 0.5, 1.5], unit='m')
a -= b
assert arrayclose(a, expected)
def test_multiplication():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = Array(values=[6., 7., 8., 9., 10.], unit='m')
expected = Array(values=[6., 14., 24., 36., 50.], unit='m*m')
assert arrayclose(a * b, expected)
def test_multiplication_conversion():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = Array(values=[6., 7., 8., 9., 10.], unit='cm')
expected = Array(values=[0.06, 0.14, 0.24, 0.36, 0.5], unit='m*m')
assert arrayclose(a * b, expected)
def test_multiplication_float():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = 3.0
expected = Array(values=[3., 6., 9., 12., 15.], unit='m')
assert arrayclose(a * b, expected)
assert arrayclose(b * a, expected)
def test_multiplication_ndarray():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = np.arange(5.)
expected = Array(values=[0., 2., 6., 12., 20.], unit='m')
assert arrayclose(a * b, expected)
assert arrayclose(b * a, expected)
def test_multiplication_quantity():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = 3.5 * units('s')
expected = Array(values=[3.5, 7.0, 10.5, 14.0, 17.5], unit='m*s')
assert arrayclose(a * b, expected)
def test_multiplication_inplace():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = Array(values=[6., 7., 8., 9., 10.], unit='m')
expected = Array(values=[6., 14., 24., 36., 50.], unit='m*m')
a *= b
assert arrayclose(a, expected)
def test_multiplication_float_inplace():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = 3.0
expected = Array(values=[3., 6., 9., 12., 15.], unit='m')
a *= b
assert arrayclose(a, expected)
def test_multiplication_ndarray_inplace():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = np.arange(5.)
expected = Array(values=[0., 2., 6., 12., 20.], unit='m')
a *= b
assert arrayclose(a, expected)
def test_multiplication_quantity_inplace():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = 3.5 * units('s')
expected = Array(values=[3.5, 7.0, 10.5, 14.0, 17.5], unit='m*s')
a *= b
assert arrayclose(a, expected)
def test_division():
a = Array(values=[1., 2., 3., 4., 5.], unit='s')
b = Array(values=[6., 7., 8., 9., 10.], unit='m')
expected = Array(values=[6., 3.5, 8. / 3., 2.25, 2.], unit='m/s')
assert arrayclose(b / a, expected)
def test_division_float():
a = Array(values=[1., 2., 3., 4., 5.], unit='s')
b = 3.0
expected = Array(values=[1. / 3., 2. / 3., 1., 4. / 3., 5. / 3.], unit='s')
assert arrayclose(a / b, expected)
expected = Array(values=[3., 3. / 2., 1., 3. / 4., 3. / 5.], unit='1/s')
assert arrayclose(b / a, expected)
def test_division_ndarray():
a = Array(values=[1., 2., 3., 4., 5.], unit='s')
b = np.arange(5., 10.)
expected = Array(values=[1. / 5., 2. / 6., 3. / 7., 4. / 8., 5. / 9.], unit='s')
assert arrayclose(a / b, expected)
# expected = Array(values=[3., 3. / 2., 1., 3. / 4., 3. / 5.], unit='1/s')
# assert arrayclose(b / a, expected)
def test_division_quantity():
a = Array(values=[0., 2., 4., 6., 200.], unit='s')
b = 2.0 * units('s')
expected = Array(values=[0., 1., 2., 3., 100.], unit='dimensionless')
assert arrayclose(a / b, expected)
def test_division_inplace():
a = Array(values=[1., 2., 3., 4., 5.], unit='s')
b = Array(values=[6., 7., 8., 9., 10.], unit='m')
expected = Array(values=[6., 3.5, 8. / 3., 2.25, 2.], unit='m/s')
b /= a
assert arrayclose(b, expected)
def test_division_float_inplace():
a = Array(values=[1., 2., 3., 4., 5.], unit='s')
b = 3.0
expected = Array(values=[1. / 3., 2. / 3., 1., 4. / 3., 5. / 3.], unit='s')
a /= b
assert arrayclose(a, expected)
def test_division_ndarray_inplace():
a = Array(values=[1., 2., 3., 4., 5.], unit='s')
b = np.arange(5., 10.)
expected = Array(values=[1. / 5., 2. / 6., 3. / 7., 4. / 8., 5. / 9.], unit='s')
a /= b
assert arrayclose(a, expected)
# expected = Array(values=[3., 3. / 2., 1., 3. / 4., 3. / 5.], unit='1/s')
# assert arrayclose(b / a, expected)
def test_division_quantity_inplace():
a = Array(values=[0., 2., 4., 6., 200.], unit='s')
b = 2.0 * units('s')
expected = Array(values=[0., 1., 2., 3., 100.], unit='dimensionless')
a /= b
assert arrayclose(a, expected)
def test_power():
a = Array(values=[1., 2., 4., 6., 200.], unit='s')
expected = Array(values=[1., 8., 64., 216., 8.0e6], unit='s**3')
assert arrayclose(a**3, expected)
def test_negative():
a = Array(values=[1., 2., 4., 6., 200.], unit='s')
expected = Array(values=[-1., -2., -4., -6., -200.], unit='s')
assert arrayequal(-a, expected)
def test_equal():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = Array(values=[11., 2., 3., 4.1, 5.], unit='m')
expected = [False, True, True, False, True]
assert all((a == b).values == expected)
def test_equal_conversion():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = Array(values=[1100., 200., 300., 410., 500.], unit='cm')
expected = [False, True, True, False, True]
assert all((a == b).values == expected)
def test_equal_bad_units():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = Array(values=[11., 2., 3., 4.1, 5.], unit='s')
with pytest.raises(DimensionalityError):
_ = a == b
def test_equal_ndarray():
a = Array(values=[1., 2., 3., 4., 5.])
b = np.array([11., 2., 3., 4.1, 5.])
expected = [False, True, True, False, True]
assert all((a == b).values == expected)
a.unit = 'm'
with pytest.raises(DimensionalityError):
_ = a == b
def test_equal_float():
a = Array(values=[1., 2., 3., 4., 5.])
b = 3.
expected = [False, False, True, False, False]
assert all((a == b).values == expected)
a.unit = 'm'
with pytest.raises(DimensionalityError):
_ = a == b
def test_equal_quantity():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = 3. * units('m')
expected = [False, False, True, False, False]
assert all((a == b).values == expected)
b = 3. * units('s')
with pytest.raises(DimensionalityError):
_ = a == b
def test_not_equal():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = Array(values=[11., 2., 3., 4.1, 5.], unit='m')
expected = [True, False, False, True, False]
assert all((a != b).values == expected)
def test_not_equal_conversion():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = Array(values=[1100., 200., 300., 410., 500.], unit='cm')
expected = [True, False, False, True, False]
assert all((a != b).values == expected)
def test_not_equal_bad_units():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = Array(values=[11., 2., 3., 4.1, 5.], unit='s')
with pytest.raises(DimensionalityError):
_ = a != b
def test_not_equal_ndarray():
a = Array(values=[1., 2., 3., 4., 5.])
b = np.array([11., 2., 3., 4.1, 5.])
expected = [True, False, False, True, False]
assert all((a != b).values == expected)
a.unit = 'm'
with pytest.raises(DimensionalityError):
_ = a != b
def test_not_equal_float():
a = Array(values=[1., 2., 3., 4., 5.])
b = 3.
expected = [True, True, False, True, True]
assert all((a != b).values == expected)
a.unit = 'm'
with pytest.raises(DimensionalityError):
_ = a != b
def test_not_equal_quantity():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = 3. * units('m')
expected = [True, True, False, True, True]
assert all((a != b).values == expected)
b = 3. * units('s')
with pytest.raises(DimensionalityError):
_ = a != b
def test_less_than():
a = Array(values=[1., 2., 3., 4., 5.], unit='s')
b = Array(values=[6., 7., 1., 4., 10.], unit='s')
expected = [True, True, False, False, True]
assert all((a < b).values == expected)
def test_less_than_conversion():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = Array(values=[600., 700., 100., 400., 1000.], unit='cm')
expected = [True, True, False, False, True]
assert all((a < b).values == expected)
def test_less_than_bad_units():
a = Array(values=[1., 2., 3., 4., 5.], unit='s')
b = Array(values=[6., 7., 1., 4., 10.], unit='m')
with pytest.raises(DimensionalityError):
_ = a < b
def test_less_than_ndarray():
a = Array(values=[1., 2., 3., 4., 5.])
b = np.array([6., 7., 1., 4., 10.])
expected = [True, True, False, False, True]
assert all((a < b).values == expected)
a.unit = 'm'
with pytest.raises(DimensionalityError):
_ = a < b
def test_less_than_float():
a = Array(values=[1., 2., 3., 4., 5.])
b = 3.
expected = [True, True, False, False, False]
assert all((a < b).values == expected)
a.unit = 'm'
with pytest.raises(DimensionalityError):
_ = a < b
def test_less_than_quantity():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = 3. * units('m')
expected = [True, True, False, False, False]
assert all((a < b).values == expected)
b = 3. * units('s')
with pytest.raises(DimensionalityError):
_ = a < b
def test_less_equal():
a = Array(values=[1., 2., 3., 4., 5.], unit='s')
b = Array(values=[6., 7., 1., 4., 10.], unit='s')
expected = [True, True, False, True, True]
assert all((a <= b).values == expected)
def test_less_equal_bad_units():
a = Array(values=[1., 2., 3., 4., 5.], unit='s')
b = Array(values=[6., 7., 1., 4., 10.], unit='m')
with pytest.raises(DimensionalityError):
_ = a <= b
def test_less_equal_ndarray():
a = Array(values=[1., 2., 3., 4., 5.])
b = np.array([6., 7., 1., 4., 10.])
expected = [True, True, False, True, True]
assert all((a <= b).values == expected)
a.unit = 'm'
with pytest.raises(DimensionalityError):
_ = a < b
def test_less_equal_float():
a = Array(values=[1., 2., 3., 4., 5.])
b = 3.
expected = [True, True, True, False, False]
assert all((a <= b).values == expected)
a.unit = 'm'
with pytest.raises(DimensionalityError):
_ = a < b
def test_less_equal_quantity():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = 3. * units('m')
expected = [True, True, True, False, False]
assert all((a <= b).values == expected)
b = 3. * units('s')
with pytest.raises(DimensionalityError):
_ = a < b
def test_greater_than():
a = Array(values=[1., 2., 3., 4., 5.], unit='s')
b = Array(values=[6., 7., 1., 4., 10.], unit='s')
expected = [True, True, False, False, True]
assert all((b > a).values == expected)
def test_greater_than_bad_units():
a = Array(values=[1., 2., 3., 4., 5.], unit='s')
b = Array(values=[6., 7., 1., 4., 10.], unit='K')
with pytest.raises(DimensionalityError):
_ = b > a
def test_greater_than_ndarray():
a = Array(values=[1., 2., 3., 4., 5.])
b = np.array([6., 7., 1., 4., 10.])
expected = [False, False, True, False, False]
assert all((a > b).values == expected)
a.unit = 'm'
with pytest.raises(DimensionalityError):
_ = a > b
def test_greater_than_float():
a = Array(values=[1., 2., 3., 4., 5.])
b = 3.
expected = [False, False, False, True, True]
assert all((a > b).values == expected)
a.unit = 'm'
with pytest.raises(DimensionalityError):
_ = a > b
def test_greater_than_quantity():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = 3. * units('m')
expected = [False, False, False, True, True]
assert all((a > b).values == expected)
b = 3. * units('s')
with pytest.raises(DimensionalityError):
_ = a > b
def test_greater_equal():
a = Array(values=[1., 2., 3., 4., 5.], unit='s')
b = Array(values=[6., 7., 1., 4., 10.], unit='s')
expected = [True, True, False, True, True]
assert all((b >= a).values == expected)
def test_greater_equal_bad_units():
a = Array(values=[1., 2., 3., 4., 5.], unit='s')
b = Array(values=[6., 7., 1., 4., 10.], unit='K')
with pytest.raises(DimensionalityError):
_ = b >= a
def test_greater_equal_ndarray():
a = Array(values=[1., 2., 3., 4., 5.])
b = np.array([6., 7., 1., 4., 10.])
expected = [False, False, True, True, False]
assert all((a >= b).values == expected)
a.unit = 'm'
with pytest.raises(DimensionalityError):
_ = a >= b
def test_greater_equal_float():
a = Array(values=[1., 2., 3., 4., 5.])
b = 3.
expected = [False, False, True, True, True]
assert all((a >= b).values == expected)
a.unit = 'm'
with pytest.raises(DimensionalityError):
_ = a >= b
def test_greater_equal_quantity():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = 3. * units('m')
expected = [False, False, True, True, True]
assert all((a >= b).values == expected)
b = 3. * units('s')
with pytest.raises(DimensionalityError):
_ = a >= b
def test_logical_and():
a = Array(values=[True, True, True, False, False, False])
b = Array(values=[True, False, True, False, True, False])
expected = [True, False, True, False, False, False]
assert all((b & a).values == expected)
def test_logical_or():
a = Array(values=[True, True, True, False, False, False])
b = Array(values=[True, False, True, False, True, False])
expected = [True, True, True, False, True, False]
assert all((b | a).values == expected)
def test_logical_xor():
a = Array(values=[True, True, True, False, False, False])
b = Array(values=[True, False, True, False, True, False])
expected = [False, True, False, False, True, False]
assert all((b ^ a).values == expected)
def test_logical_invert():
a = Array(values=[True, True, False, False, True, False])
expected = [False, False, True, True, False, True]
assert all((~a).values == expected)
def test_to():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
b = Array(values=[1.0e-3, 2.0e-3, 3.0e-3, 4.0e-3, 5.0e-3], unit='km')
assert arrayclose(a.to('km'), b)
assert a.unit == units('m')
def test_to_bad_units():
a = Array(values=[1., 2., 3., 4., 5.], unit='m')
with pytest.raises(DimensionalityError):
_ = a.to('s')
def test_min():
a = Array(values=[1., -2., 3., 0.4, 0.5, 0.6], unit='m')
assert (a.min() == Array(values=-2., unit='m')).values
b = Array(values=np.array([1., -2., 3., 0.4, 0.5, 0.6]).reshape(2, 3), unit='m')
assert (b.min() == Array(values=-2., unit='m')).values
def test_max():
a = Array(values=[1., 2., 3., -15., 5., 6.], unit='m')
assert (a.max() == Array(values=6.0, unit='m')).values
b = Array(values=np.array([1., 2., 3., -15., 5., 6.]).reshape(2, 3), unit='m')
assert (b.max() == Array(values=6.0, unit='m')).values
def test_reshape():
a = Array(values=[1., 2., 3., 4., 5., 6.], unit='m')
expected = Array(values=[[1., 2., 3.], [4., 5., 6.]], unit='m')
assert arraytrue(np.ravel(a.reshape(2, 3) == expected))
def test_slicing():
a = Array(values=[11., 12., 13., 14., 15.], unit='m')
assert a[2] == Array(values=[13.], unit='m')
assert arraytrue(a[:4] == Array(values=[11., 12., 13., 14.], unit='m'))
assert arraytrue(a[2:4] == Array(values=[13., 14.], unit='m'))
def test_slicing_vector():
a = Array(values=np.arange(12.).reshape(4, 3), unit='m')
assert arraytrue(np.ravel(a[2:3] == Array(values=[[6., 7., 8.]], unit='m')))
assert a[2:3].shape == (1, 3)
assert arraytrue(
np.ravel(a[:2] == Array(values=[[0., 1., 2.], [3., 4., 5.]], unit='m')))
def test_copy():
a = Array(values=[11., 12., 13., 14., 15.], unit='m')
b = a.copy()
a *= 10.
assert arraytrue(b == Array(values=[11., 12., 13., 14., 15.], unit='m'))
def test_copy_overload():
a = Array(values=[11., 12., 13., 14., 15.], unit='m')
b = copy(a)
a *= 10.
assert arraytrue(b == Array(values=[11., 12., 13., 14., 15.], unit='m'))
def test_deepcopy():
a = Array(values=[11., 12., 13., 14., 15.], unit='m')
b = deepcopy(a)
a *= 10.
assert arraytrue(b == Array(values=[11., 12., 13., 14., 15.], unit='m'))
def test_numpy_unary():
values = [1., 2., 3., 4., 5.]
a = Array(values=values, unit='m')
expected = np.log10(values)
result = np.log10(a)
assert np.allclose(result.values, expected)
assert result.unit == units('m')
def test_numpy_sqrt():
values = [1., 2., 3., 4., 5.]
a = Array(values=values, unit='m*m')
expected = np.sqrt(values)
result = np.sqrt(a)
assert np.allclose(result.values, expected)
assert result.unit == units('m')
def test_numpy_binary():
a_buf = [1., 2., 3., 4., 5.]
b_buf = [6., 7., 8., 9., 10.]
a = Array(values=a_buf, unit='m')
b = Array(values=b_buf, unit='m')
expected = np.dot(a_buf, b_buf)
result = np.dot(a, b)
assert result.values == expected
assert result.unit == units('m')
def test_numpy_iterable():
a_buf = [1., 2., 3., 4., 5.]
b_buf = [6., 7., 8., 9., 10.]
a = Array(values=a_buf, unit='m')
b = Array(values=b_buf, unit='m')
expected = np.concatenate([a_buf, b_buf])
result = np.concatenate([a, b])
assert np.array_equal(result.values, expected)
assert result.unit == units('m')
def test_numpy_multiply_with_ndarray():
a_buf = [1., 2., 3., 4., 5.]
a = Array(values=a_buf, unit='m')
b = np.array([6., 7., 8., 9., 10.])
expected = np.multiply(a_buf, b)
result = np.multiply(a, b)
assert np.array_equal(result.values, expected)
assert result.unit == units('m')
result = np.multiply(b, a)
assert np.array_equal(result.values, expected)
assert result.unit == units('m')
def test_numpy_multiply_with_quantity():
a_buf = [1., 2., 3., 4., 5.]
a = Array(values=a_buf, unit='m')
b = 3.5 * units('s')
expected = np.multiply(a_buf, b.magnitude)
result = np.multiply(a, b)
assert np.array_equal(result.values, expected)
assert result.unit == units('m*s')
def test_numpy_multiply_with_float():
a_buf = [1., 2., 3., 4., 5.]
a = Array(values=a_buf, unit='m')
b = 3.5
expected = np.multiply(a_buf, b)
result = np.multiply(a, b)
assert np.array_equal(result.values, expected)
assert result.unit == units('m')
result = np.multiply(b, a)
assert np.array_equal(result.values, expected)
assert result.unit == units('m')
def test_numpy_divide_with_ndarray():
a_buf = [1., 2., 3., 4., 5.]
a = Array(values=a_buf, unit='m')
b = np.array([6., 7., 8., 9., 10.])
expected = np.divide(a_buf, b)
result = np.divide(a, b)
assert np.array_equal(result.values, expected)
assert result.unit == units('m')
expected = np.divide(b, a_buf)
result = np.divide(b, a)
assert np.array_equal(result.values, expected)
assert result.unit == units('1/m')
def test_numpy_divide_with_quantity():
a_buf = [1., 2., 3., 4., 5.]
a = Array(values=a_buf, unit='m')
b = 3.5 * units('s')
expected = np.divide(a_buf, b.magnitude)
result = np.divide(a, b)
assert np.array_equal(result.values, expected)
assert result.unit == units('m/s')
def test_numpy_divide_with_float():
a_buf = [1., 2., 3., 4., 5.]
a = Array(values=a_buf, unit='m')
b = 3.5
expected = np.divide(a_buf, b)
result = np.divide(a, b)
assert np.array_equal(result.values, expected)
assert result.unit == units('m')
expected = np.divide(b, a_buf)
result = np.divide(b, a)
assert np.array_equal(result.values, expected)
assert result.unit == units('1/m')
|
StarcoderdataPython
|
9648453
|
""" Test for quest load/save handling system """
import pytest
from semver import VersionInfo # type: ignore
from tick import TickType
from quest import Quest, Difficulty, QuestDefinitionError, DEBUG_QUEST_NAME
from quest.stage import DebugStage
from quest.loader import all_quests
from quest.content.debug import DebugQuest
class BadQuest(Quest):
version = VersionInfo.parse("1.0.0")
description = "Bad quest for testing, it is missing stuff"
class BadStageCycle(Quest):
version = VersionInfo.parse("1.0.0")
difficulty = Difficulty.RESERVED
description = "Bad quest for testing, it has malformed stages"
class Start(DebugStage):
children = ["Loop"]
class Loop(DebugStage):
""" This should form a cycle, and get flagged by test """
children = ["Start"]
class BadStageNotExist(Quest):
version = VersionInfo.parse("1.0.0")
difficulty = Difficulty.RESERVED
description = "Bad quest for testing, it has malformed stages"
class Start(DebugStage):
""" This references a stage that doesn't exist, and get flagged """
children = ["Loop"]
def test_all_quest_instantiate(testing_quest_page):
"""Instantiate all quests to check abstract base class implementation
and stage loading
"""
for quest_class in all_quests.values():
# should succeed if correctly implemented
quest = quest_class(testing_quest_page)
def test_fail_instantiate(testing_quest_page):
""" Test bad quests that fail to instantiate """
with pytest.raises(TypeError):
BadQuest(testing_quest_page)
def test_fail_stage(testing_quest_page):
""" Test bad quests that fail due to stage problems """
with pytest.raises(QuestDefinitionError):
quest = BadStageCycle(testing_quest_page)
with pytest.raises(QuestDefinitionError):
quest = BadStageNotExist(testing_quest_page)
def test_quest_has_stages(testing_quest_page):
""" Tests if quest has stages """
quest = DebugQuest(testing_quest_page)
stages = quest.stages
assert len(stages)
# check repr
first_stage = next(iter(stages.values()))(quest)
assert str(first_stage)
assert repr(first_stage)
def test_execute(testing_quest_page):
""" Test quest execution """
# check we didn't start off with any completed stages
assert not testing_quest_page.data.completed_stages
# Debugquest is linear, so we expect to see only the start quest
testing_quest_page.execute(TickType.FULL)
assert len(testing_quest_page.data.completed_stages) == len(
testing_quest_page.quest.stages
)
def test_resume(testing_quest_page):
""" Test quest resume """
# manually set completed stages
testing_quest_page.data.completed_stages = ["Start"]
# resume
testing_quest_page.execute(TickType.FULL)
assert len(testing_quest_page.data.completed_stages) == len(
testing_quest_page.quest.stages
)
def test_done_skip(testing_quest_page):
""" Test quest execution skipping if done """
# check we have no completed stages
assert not testing_quest_page.data.completed_stages
testing_quest_page.mark_quest_complete()
# excecution should just skip because we marked quest as complete
testing_quest_page.execute(TickType.FULL)
assert not testing_quest_page.data.completed_stages
|
StarcoderdataPython
|
285184
|
import sys
import os
def main(argv):
print("")
infile = "..\\days\\day3.txt"
outfile = os.path.splitext(infile)[0] + '.dat'
output = "day3_input:" + chr(10)
with open(infile, 'r', encoding='utf-8-sig') as f:
for line in f:
line = line.strip()
output += " dc.b\t'" + line + "'\n"
output += "day3_input_end:" + chr(10)
# Write fixed file
fout = open(outfile, "wt")
fout.write(output)
fout.close()
print("Done!")
if __name__ == "__main__":
main(sys.argv)
|
StarcoderdataPython
|
4834327
|
import logging
import re
import shutil
from chibi.file import Chibi_path
from chibi_dl.site.base.site import Site
logger = logging.getLogger( "chibi_dl.sites.manga_plus.episode" )
class Episode( Site ):
def download( self, path ):
logger.info(
"iniciando la descarga de las {} imagenes del capitulo {}".format(
len( self.images_urls ), self.number,
) )
raise NotImplementedError
def compress( self, path_ouput, path_input, format="zip" ):
logger.info( "comprimiendo capitulo usando {}".format( format ) )
result = Chibi_path( shutil.make_archive(
path_ouput + self.number, format, path_input ) )
expected = result.replace_extensions( "cbz" )
result.move( expected )
return expected
@property
def file_name( self ):
return "{}.{}".format( self.number, "cbz" )
@property
def number( self ):
re_float = re.compile( r"[-+]?([0-9]*\.[0-9]+|[0-9]+)" )
return re_float.findall( self.title )[0]
@property
def images_urls( self ):
try:
return self._images_urls
except AttributeError:
self.load_soup()
return self._images_urls
def load_soup( self ):
page = self.get( self.url )
raise NotImplementedError
return page.text
def enumerate_images_urls( self ):
for i, url in enumerate( self.images_urls ):
ext = url.rsplit( '.', 1 )[-1]
yield "{}.{}".format( i, ext ), url
|
StarcoderdataPython
|
8002986
|
import os
from itertools import filterfalse
from typing import Iterable
from . import strings
from .utils import is_python_module
def validate_paths(paths: Iterable[str]) -> None:
non_existent_paths = list(filterfalse(os.path.exists, paths))
if not non_existent_paths:
return
non_existent_paths_str = strings.join(map(strings.wrap_with_quotes,
non_existent_paths))
err_msg = ('Next paths not found on file system:\n'
'{paths}.'
.format(paths=non_existent_paths_str))
raise FileNotFoundError(err_msg)
def validate_modules_paths(paths: Iterable[str]) -> None:
invalid_paths = list(filterfalse(is_python_module, paths))
if invalid_paths:
err_msg = ('Next paths are not valid Python modules:\n'
'{paths}'.format(paths=strings.join(invalid_paths)))
raise OSError(err_msg)
|
StarcoderdataPython
|
11318003
|
# -*- coding: utf-8 -*-
"""QCDF-1.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/18Dia-C1cambzheRZ2fzIf1EAO0fte_0F
"""
!unzip "Captain Tsubasa.zip"
import pandas as pd
train = pd.read_csv('train.csv')
train
f = open("output.txt", "w")
f.write(str(len(set(train['playerId'].tolist()))) + '\n')
f.write('p_18\n')
f.write('p_18,p_147\n')
f.write('71')
f.close()
goals = [0] * 160
shoots = [0] * 160
for i, row in train.iterrows():
shoots[int(row['playerId'].split('_')[1]) - 1] += 1
if row['outcome'] == 'گُل':
goals[int(row['playerId'].split('_')[1]) - 1] += 1
ratio = []
for i in range(len(goals)):
ratio.append(goals[i] / shoots[i])
import math
max = 0
for i, row in train.iterrows():
tmp = math.sqrt(row['x'] ** 2 + row['y'] ** 2)
if tmp > max:
max = tmp
max
|
StarcoderdataPython
|
3470429
|
from django.db import models
from django.conf import settings
from django.apps import apps
from django.contrib.auth.hashers import make_password, check_password
class UserPasswordHistory(models.Model):
DEFAULT_PASSWORD_COUNT = 5
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
password_1 = models.CharField(blank=True, null=True, max_length=128)
password_2 = models.CharField(blank=True, null=True, max_length=128)
password_3 = models.CharField(blank=True, null=True, max_length=128)
password_4 = models.CharField(blank=True, null=True, max_length=128)
password_5 = models.CharField(blank=True, null=True, max_length=128)
updated_at = models.DateTimeField(auto_now=True)
def password_is_used(self, password):
pass_count = getattr(
settings, "PREVIOUS_PASSWORD_COUNT", self.DEFAULT_PASSWORD_COUNT
)
for x in range(1, min(pass_count, 5) + 1):
f = getattr(self, f'password_{x}', None)
if f is not None and check_password(password, f):
return True
return False
def store_password(self):
self.password_5 = self.<PASSWORD>
self.password_4 = <PASSWORD>
self.password_3 = <PASSWORD>
self.password_2 = <PASSWORD>
self.password_1 = <PASSWORD>
self.save()
def __str__(self):
return self.user.email
|
StarcoderdataPython
|
1745537
|
from ps4a import *
#
# Test code
# You don't need to understand how this test code works (but feel free to look it over!)
# To run these tests, simply run this file (open up in your IDE, then run the file as normal)
def test_getWordScore():
"""
Unit test for getWordScore
"""
failure = False
# dictionary of words and scores
words = {("", 7): 0, ("it", 7): 4, ("was", 7): 18, ("scored", 7): 54,
("waybill", 7): 155, ("outgnaw", 7): 127, ("fork", 7): 44, ("fork", 4): 94}
for (word, n) in words.keys():
score = getWordScore(word, n)
if score != words[(word, n)]:
print("FAILURE: test_getWordScore()")
print("\tExpected", words[(word, n)], "points but got '" +
str(score) + "' for word '" + word + "', n=" + str(n))
failure = True
if not failure:
print("SUCCESS: test_getWordScore()")
# end of test_getWordScore
def test_updateHand():
"""
Unit test for updateHand
"""
# test 1
handOrig = {'a': 1, 'q': 1, 'l': 2, 'm': 1, 'u': 1, 'i': 1}
handCopy = handOrig.copy()
word = "quail"
hand2 = updateHand(handCopy, word)
expectedHand1 = {'l': 1, 'm': 1}
expectedHand2 = {'a': 0, 'q': 0, 'l': 1, 'm': 1, 'u': 0, 'i': 0}
if hand2 != expectedHand1 and hand2 != expectedHand2:
print("FAILURE: test_updateHand('" + word + "', " + str(handOrig) + ")")
print("\tReturned: ", hand2, "\n\t-- but expected:",
expectedHand1, "or", expectedHand2)
return # exit function
if handCopy != handOrig:
print("FAILURE: test_updateHand('" + word + "', " + str(handOrig) + ")")
print("\tOriginal hand was", handOrig)
print("\tbut implementation of updateHand mutated the original hand!")
print("\tNow the hand looks like this:", handCopy)
return # exit function
# test 2
handOrig = {'e': 1, 'v': 2, 'n': 1, 'i': 1, 'l': 2}
handCopy = handOrig.copy()
word = "evil"
hand2 = updateHand(handCopy, word)
expectedHand1 = {'v': 1, 'n': 1, 'l': 1}
expectedHand2 = {'e': 0, 'v': 1, 'n': 1, 'i': 0, 'l': 1}
if hand2 != expectedHand1 and hand2 != expectedHand2:
print("FAILURE: test_updateHand('" + word + "', " + str(handOrig) + ")")
print("\tReturned: ", hand2, "\n\t-- but expected:",
expectedHand1, "or", expectedHand2)
return # exit function
if handCopy != handOrig:
print("FAILURE: test_updateHand('" + word + "', " + str(handOrig) + ")")
print("\tOriginal hand was", handOrig)
print("\tbut implementation of updateHand mutated the original hand!")
print("\tNow the hand looks like this:", handCopy)
return # exit function
# test 3
handOrig = {'h': 1, 'e': 1, 'l': 2, 'o': 1}
handCopy = handOrig.copy()
word = "hello"
hand2 = updateHand(handCopy, word)
expectedHand1 = {}
expectedHand2 = {'h': 0, 'e': 0, 'l': 0, 'o': 0}
if hand2 != expectedHand1 and hand2 != expectedHand2:
print("FAILURE: test_updateHand('" + word + "', " + str(handOrig) + ")")
print("\tReturned: ", hand2, "\n\t-- but expected:",
expectedHand1, "or", expectedHand2)
return # exit function
if handCopy != handOrig:
print("FAILURE: test_updateHand('" + word + "', " + str(handOrig) + ")")
print("\tOriginal hand was", handOrig)
print("\tbut implementation of updateHand mutated the original hand!")
print("\tNow the hand looks like this:", handCopy)
return # exit function
print("SUCCESS: test_updateHand()")
# end of test_updateHand
def test_isValidWord(wordList):
"""
Unit test for isValidWord
"""
failure = False
# test 1
word = "hello"
handOrig = getFrequencyDict(word)
handCopy = handOrig.copy()
if not isValidWord(word, handCopy, wordList):
print("FAILURE: test_isValidWord()")
print("\tExpected True, but got False for word: '" +
word + "' and hand:", handOrig)
failure = True
# Test a second time to see if wordList or hand has been modified
if not isValidWord(word, handCopy, wordList):
print("FAILURE: test_isValidWord()")
if handCopy != handOrig:
print("\tTesting word", word,
"for a second time - be sure you're not modifying hand.")
print("\tAt this point, hand ought to be",
handOrig, "but it is", handCopy)
else:
print("\tTesting word", word,
"for a second time - have you modified wordList?")
wordInWL = word in wordList
print("The word", word, "should be in wordList - is it?", wordInWL)
print("\tExpected True, but got False for word: '" +
word + "' and hand:", handCopy)
failure = True
# test 2
hand = {'r': 1, 'a': 3, 'p': 2, 'e': 1, 't': 1, 'u': 1}
word = "rapture"
if isValidWord(word, hand, wordList):
print("FAILURE: test_isValidWord()")
print("\tExpected False, but got True for word: '" +
word + "' and hand:", hand)
failure = True
# test 3
hand = {'n': 1, 'h': 1, 'o': 1, 'y': 1, 'd': 1, 'w': 1, 'e': 2}
word = "honey"
if not isValidWord(word, hand, wordList):
print("FAILURE: test_isValidWord()")
print("\tExpected True, but got False for word: '" +
word + "' and hand:", hand)
failure = True
# test 4
hand = {'r': 1, 'a': 3, 'p': 2, 't': 1, 'u': 2}
word = "honey"
if isValidWord(word, hand, wordList):
print("FAILURE: test_isValidWord()")
print("\tExpected False, but got True for word: '" +
word + "' and hand:", hand)
failure = True
# test 5
hand = {'e': 1, 'v': 2, 'n': 1, 'i': 1, 'l': 2}
word = "evil"
if not isValidWord(word, hand, wordList):
print("FAILURE: test_isValidWord()")
print("\tExpected True, but got False for word: '" +
word + "' and hand:", hand)
failure = True
# test 6
word = "even"
if isValidWord(word, hand, wordList):
print("FAILURE: test_isValidWord()")
print("\tExpected False, but got True for word: '" +
word + "' and hand:", hand)
print("\t(If this is the only failure, make sure isValidWord() isn't mutating its inputs)")
failure = True
if not failure:
print("SUCCESS: test_isValidWord()")
wordList = loadWords()
print("----------------------------------------------------------------------")
print("Testing getWordScore...")
test_getWordScore()
print("----------------------------------------------------------------------")
print("Testing updateHand...")
test_updateHand()
print("----------------------------------------------------------------------")
print("Testing isValidWord...")
test_isValidWord(wordList)
print("----------------------------------------------------------------------")
print("All done!")
|
StarcoderdataPython
|
11270940
|
<filename>nascd/ImprovedFishes/keras_baseline.py
import time
import tensorflow as tf
import numpy as np
from nascd.ImprovedFishes.load_data import load_data
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
(X_train, y_train), (X_valid,y_valid)= load_data()
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(10, activation=tf.nn.relu)
self.dense11 = tf.keras.layers.Dense(10, activation=tf.nn.relu)
self.dense12 = tf.keras.layers.Dense(10, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5)
# self.dropout = tf.keras.layers.Dropout(0.5)
def call(self, inputs, training=False):
x = self.dense1(inputs)
x = self.dense11(x)
x = self.dense12(x)
# if training:
# x = self.dropout(x, training=training)
return self.dense2(x)
model = MyModel()
# code to bce loss: https://github.com/tensorflow/tensorflow/blob/v2.1.0/tensorflow/python/keras/backend.py#L4585-L4615
model.compile("adam", loss="mse", metrics=["mse"])
tstart = time.time()
hist = model.fit(X_train, y_train,
validation_data=(X_valid,y_valid),
epochs=1000,
batch_size=8).history
t = time.time() - tstart
pred = model.predict(X_valid)
print("R2 SCORE",r2_score(y_valid, pred))
print(f"Training time: {t}")
plt.plot(hist["loss"], 'b')
plt.plot(hist["val_loss"], 'r')
plt.xlabel("epochs")
plt.ylabel("Loss: MSE")
plt.show()
|
StarcoderdataPython
|
3470865
|
import pandas as pd
from splinter import Browser
from bs4 import BeautifulSoup
def init_browser():
executable_path = {"executable_path":"/usr/local/bin/chromedriver"}
return Browser("chrome", **executable_path, headless=False)
url = "https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest"
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
news_title = soup.find("div", class_="content_title").text
news_p = soup.find("div", class_="article_teaser_body").text
print(news_title)
print("--------------------------------------------------------")
print(news_p)
# In[9]:
#JPL Mars Space Images - Featured Image
# In[10]:
jpl_url = "https://www.jpl.nasa.gov"
image_url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(image_url)
html = browser.html
image_soup = BeautifulSoup(html, "html.parser")
image_path = image_soup.find_all("img")[3]["src"]
featured_image_url = jpl_url + image_path
print(featured_image_url)
# In[37]:
#Mars Weather
# In[31]:
mars_weather_url = "https://twitter.com/marswxreport?lang=en"
browser.visit(mars_weather_url)
html = browser.html
weather_soup = BeautifulSoup(html, "html.parser")
mars_weather = weather_soup.find('p', class_='TweetTextSize TweetTextSize--normal js-tweet-text tweet-text').text
print(mars_weather)
# In[13]:
#Mars Facts
# In[14]:
mars_facts_df = pd.read_html("https://space-facts.com/mars/")[1]
mars_facts_df
# In[15]:
mars_html = mars_facts_df.to_html()
print(mars_html)
# In[24]:
#Mars Hemispheres
# In[23]:
#Cerberus Hemisphere
url = "https://astrogeology.usgs.gov/search/map/Mars/Viking/cerberus_enhanced"
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
cerberus_url = soup.find('div', class_='downloads')
link = cerberus_url.find('a')
href_cerberus = link['href']
print(href_cerberus)
# In[25]:
#Schiaparelli Hemisphere
url = "https://astrogeology.usgs.gov/search/map/Mars/Viking/schiaparelli_enhanced"
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
schiaparelli_url = soup.find('div', class_='downloads')
link = schiaparelli_url.find('a')
href_schiaparelli = link['href']
print(href_schiaparelli)
# In[26]:
#Syrtis Hemisphere
url = "https://astrogeology.usgs.gov/search/map/Mars/Viking/syrtis_major_enhanced"
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
syrtis_url = soup.find('div', class_='downloads')
link = syrtis_url.find('a')
href_syrtis = link['href']
print(href_syrtis)
# In[27]:
#Valles Hemisphere
url = "https://astrogeology.usgs.gov/search/map/Mars/Viking/valles_marineris_enhanced"
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
valles_url = soup.find('div', class_='downloads')
link = valles_url.find('a')
href_valles = link['href']
print(href_valles)
# In[30]:
hemisphere_image_urls = [
{"title": "Cerberus Hemisphere", "img_url": href_cerberus},
{"title": "Schiaparelli Hemisphere", "img_url": href_schiaparelli},
{"title": "Syrtis Major Hemisphere", "img_url": href_syrtis},
{"title": "Valles Marineris Hemisphere", "img_url": href_valles}
]
print(hemisphere_image_urls)
# In[ ]:
|
StarcoderdataPython
|
8034612
|
<filename>torchwisdom/core/metrics/__init__.py
from .metrics import *
|
StarcoderdataPython
|
1700709
|
<filename>setup.py
from setuptools import setup, find_packages
setup(
name='currency-wallet',
version='0.1.0',
description="Track investment returns in multiple currencies through the National Bank of Poland's API.",
packages=find_packages(include=['currency_wallet']),
python_requires='>=3.6',
entry_points={
'console_scripts': ['currency-wallet=currency_wallet.cli:cli'],
},
)
|
StarcoderdataPython
|
1776163
|
from __future__ import print_function, absolute_import, division
from gi.repository import Gtk, cairo
from toga.cassowary.widget import Container as CassowaryContainer
class GtkContainer(Gtk.Fixed):
def __init__(self, layout_manager):
super(GtkContainer, self).__init__()
self.layout_manager = layout_manager
def do_get_preferred_width(self):
# Calculate the minimum and natural width of the container.
width = self.layout_manager.bounding_box.width.value
# print "PREFERRED WIDTH", width
return width, width
def do_get_preferred_height(self):
# Calculate the minimum and natural height of the container.
height = self.layout_manager.bounding_box.height.value
# print "PREFERRED HEIGHT", height
return height, height
def do_size_allocate(self, allocation):
# print "Size allocate", allocation.width, 'x', allocation.height, ' @ ', allocation.x, 'x', allocation.y
self.set_allocation(allocation)
# Temporarily enforce a size requirement based on the allocation
with self.layout_manager.layout(allocation.width, allocation.height):
for widget in self.layout_manager.children:
# print(widget, widget._bounding_box)
if not widget._impl.get_visible():
print("CHILD NOT VISIBLE")
else:
min_width, preferred_width = widget._width_hint
min_height, preferred_height = widget._height_hint
x_pos = widget._bounding_box.x.value
if widget._expand_horizontal:
width = widget._bounding_box.width.value
else:
x_pos = x_pos + ((widget._bounding_box.width.value - preferred_width) / 2.0)
width = preferred_width
y_pos = widget._bounding_box.y.value
if widget._expand_vertical:
height = widget._bounding_box.height.value
else:
y_pos = y_pos + ((widget._bounding_box.height.value - preferred_height) / 2.0)
height = preferred_height
child_allocation = cairo.RectangleInt()
child_allocation.x = x_pos
child_allocation.y = y_pos
child_allocation.width = width
child_allocation.height = height
widget._impl.size_allocate(child_allocation)
class Container(CassowaryContainer):
def __init__(self):
super(Container, self).__init__()
def _create_container(self):
return GtkContainer(self._layout_manager)
@property
def _width_hint(self):
return self._impl.get_preferred_width()
@property
def _height_hint(self):
return self._impl.get_preferred_height()
|
StarcoderdataPython
|
1855232
|
import numpy as np
import pytest
from qflow.hamiltonians import HarmonicOscillator
from qflow.samplers import ImportanceSampler
from qflow.wavefunctions import SimpleGaussian, Dnn
from qflow.wavefunctions.nn.layers import DenseLayer
from qflow.wavefunctions.nn.activations import (
sigmoid,
tanh,
relu,
identity,
exponential,
)
from qflow import DistanceCache
small_system = np.zeros((2, 2))
large_system = np.zeros((50, 3))
samples = 10000
H0 = HarmonicOscillator()
psi0 = SimpleGaussian(0.5)
layers = [
DenseLayer(50 * 3, 32, activation=tanh, scale_factor=0.001),
DenseLayer(32, 16, activation=tanh),
DenseLayer(16, 1, activation=exponential),
]
dnn = Dnn()
for l in layers:
dnn.add_layer(l)
def local_energy_gradient(H, psi, sampler, samples):
return H.local_energy_gradient(sampler, psi, samples)
@pytest.mark.benchmark(group="evaluation", warmup=True)
def test_dnn_eval(benchmark):
benchmark(dnn, large_system)
@pytest.mark.benchmark(group="gradient", warmup=True)
def test_dnn_gradient(benchmark):
benchmark(dnn.gradient, large_system)
@pytest.mark.benchmark(group="laplacian", warmup=True)
def test_dnn_laplacian(benchmark):
benchmark(dnn.laplacian, large_system)
|
StarcoderdataPython
|
5094894
|
<reponame>goerz-testing/pypkg_bintray_01
"""Tests for `pypkg_bintray_01` package."""
import pytest
from pkg_resources import parse_version
import pypkg_bintray_01
def test_valid_version():
"""Check that the package defines a valid ``__version__``."""
v_curr = parse_version(pypkg_bintray_01.__version__)
v_orig = parse_version("0.0.1-dev")
assert v_curr >= v_orig
|
StarcoderdataPython
|
227452
|
<reponame>FarsetLabs/farset-nadine
import os
import time
import urllib
from datetime import datetime, timedelta, date
import sys
import tempfile
import shutil
import traceback
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.conf import settings
from django.utils import timezone
class Command(BaseCommand):
help = "Shows the billing information for a given user."
args = "[username]"
requires_system_checks = False
def handle(self, *labels, **options):
from staff.billing import Run
if not labels or len(labels) != 1:
raise CommandError('Enter one argument, a username.')
username = int(labels[0])
try:
user = User.objects.get(username=username)
start_date = timezone.localtime(timezone.now()) - timedelta(days=365)
end_date = timezone.localtime(timezone.now())
print('Run info for %s (%s - %s)' % (user, start_date, end_date))
run = Run(user, start_date, end_date, False)
run.print_info()
except:
traceback.print_exc()
print("Could not find user with username %s" % username)
# Copyright 2010 Office Nomads LLC (http://www.officenomads.com/) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
|
StarcoderdataPython
|
9712352
|
"""
Tests for Year, Quarter, and Month-based DateOffset subclasses
"""
import pytest
import pandas as pd
from pandas.tseries.offsets import (
BMonthBegin,
BMonthEnd,
BQuarterBegin,
BQuarterEnd,
BYearBegin,
BYearEnd,
MonthBegin,
MonthEnd,
QuarterBegin,
QuarterEnd,
YearBegin,
YearEnd,
)
@pytest.mark.parametrize("n", [-2, 1])
@pytest.mark.parametrize(
"cls",
[
MonthBegin,
MonthEnd,
BMonthBegin,
BMonthEnd,
QuarterBegin,
QuarterEnd,
BQuarterBegin,
BQuarterEnd,
YearBegin,
YearEnd,
BYearBegin,
BYearEnd,
],
)
def test_apply_index(cls, n):
offset = cls(n=n)
rng = pd.date_range(start="1/1/2000", periods=100000, freq="T")
ser = pd.Series(rng)
res = rng + offset
assert res.freq is None # not retained
assert res[0] == rng[0] + offset
assert res[-1] == rng[-1] + offset
res2 = ser + offset
# apply_index is only for indexes, not series, so no res2_v2
assert res2.iloc[0] == ser.iloc[0] + offset
assert res2.iloc[-1] == ser.iloc[-1] + offset
|
StarcoderdataPython
|
5149479
|
<filename>npt/search/__init__.py
"""
Query USGS/ODE API for image data products
"""
from npt import log
def ode(dataset: str, bbox: dict, match: str = 'intersect', bbox_ref:str='C0'):
"""
Return GeoDataFrame with found data products as features
Input:
- dataset: name of the dataset (see `npt.datasets`)
- bbox: bounding-box to query overlapping products.
Dictionary keys: minlat, maxlat, westlon, eastlon;
Latitude/longitude values range: (-90:90, -180:180)
- match: how to consider overlapping matching footprints.
Options are: 'intersect', 'contain'
- filter: Not implemented yet.
"""
from npt.search._ode import ODE
prods = ODE(dataset).query(bbox=bbox, match=match, bbox_ref=bbox_ref).parse()
if not prods:
return None
return prods.to_dataframe()
|
StarcoderdataPython
|
4869612
|
import os
import uuid
from decouple import config
# Django
from django.conf import settings
from django.contrib import messages
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, redirect
from django.template import Context
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils import timezone
from django.views.generic import ListView
# SendGrid
import sendgrid
from sendgrid.helpers.mail import *
# Permissions
from .permissions import IsOwnerOrReadOnly
# REST Framework
from rest_framework import views, permissions, status, authentication, generics, pagination
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.parsers import MultiPartParser, FormParser, JSONParser
# JWT REST
import rest_framework_jwt.authentication
# Models
from .models import User, JobPost, Membership, UserMembership, Subscription, Payment
# Serializers
from .api import (
JobPostSerializer,
JobPreviewSerializer,
UserIDSerializer,
UserRegistrationSerializer,
UserViewSerializer,
MembershipSerializer,
PaymentViewSerializer,
JWTSerializer
)
import stripe
################### NOTES ######################
# User creation is handled by Djoser, in settings.py / DJOSER / SERIALIZERS
# Django REST Framework Generics reference: https://www.django-rest-framework.org/api-guide/generic-views/
# Django REST Framework Views reference: https://www.django-rest-framework.org/api-guide/views/
################################################
def jwt_get_secret_key(user):
return user.jwt_secret
# determines extra field `user` added to returned JWT (payload itself is determined by payload_handler-> see jwt_config.py)
def jwt_response_handler(token, user=None, request=None):
return {
'token': token,
'user': JWTSerializer(user, context={'request': request}).data
}
class UserCreateView(generics.CreateAPIView):
serializer_class = UserRegistrationSerializer
# Methods create, perform_create, get_success_headers
# all from Django REST Framework source-code mixins:
# https://github.com/encode/django-rest-framework/blob/master/rest_framework/mixins.py
# To customize, must overwrite but also add in default source-code.
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(status=status.HTTP_201_CREATED, headers=headers)
def perform_create(self, serializer):
serializer.save()
def get_success_headers(self, data):
try:
return {'Location': str(data[api_settings.URL_FIELD_NAME])}
except (TypeError, KeyError):
return {}
# Create custom view because auth is handles by Django REST framework JWT Auth (not Djoser)
class UserView(generics.RetrieveUpdateDestroyAPIView):
serializer_class = UserViewSerializer
authentication_classes = (
rest_framework_jwt.authentication.JSONWebTokenAuthentication,
authentication.SessionAuthentication,
authentication.BasicAuthentication
)
permission_classes = (permissions.IsAuthenticated,)
parser_classes = (MultiPartParser,)
def get_queryset(self):
id = self.request.user.pk
return User.objects.filter(id=id)
# Methods update, perform_update, partial_update, destroy, perform_destory
# all from Django REST Framework source-code mixins:
# https://github.com/encode/django-rest-framework/blob/master/rest_framework/mixins.py
# To customize, must overwrite but also add in default source-code.
def update(self, request, *args, **kwargs):
user = self.get_object()
# Check job company id matches user id
if user.pk is not self.request.user.pk:
message = {'FORBIDDEN'}
return Response(message, status=status.HTTP_403_FORBIDDEN)
# Determines if PUT or PATCH request
partial = kwargs.pop('partial', False)
if partial is False:
message = { "detail": "Method \"PUT\" not allowed." }
return Response(message, status=status.HTTP_405_METHOD_NOT_ALLOWED)
serializer = self.get_serializer(user, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
if getattr(user, '_prefetched_objects_cache', None):
# If 'prefetch_related' has been applied to a queryset, we need to
# forcibly invalidate the prefetch cache on the user.
user._prefetched_objects_cache = {}
return Response(status=status.HTTP_204_NO_CONTENT)
def perform_update(self, serializer):
serializer.save()
def partial_update(self, request, *args, **kwargs):
kwargs['partial'] = True
return self.update(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
user = self.get_object()
# Checks user id on request == user id making delete request:
# (prevents company 1 deleting for company 2)
if user.pk is not self.request.user.pk:
message = {'FORBIDDEN'}
return Response(message, status=status.HTTP_403_FORBIDDEN)
self.perform_destroy(user)
return Response(status=status.HTTP_204_NO_CONTENT)
def perform_destroy(self, instance):
instance.delete()
# Resets the jwt_secret, invalidating all token issued
class UserLogoutAllView(views.APIView):
authentication_classes = (
rest_framework_jwt.authentication.JSONWebTokenAuthentication,
authentication.SessionAuthentication,
authentication.BasicAuthentication
)
permission_classes = (permissions.IsAuthenticated,)
def post(self, request, *args, **kwargs):
user = request.user
user.jwt_secret = uuid.uuid4()
user.save()
return Response(status=status.HTTP_204_NO_CONTENT)
class PostPageNumberPagination(pagination.PageNumberPagination):
page_size = 10
page_size_query_param = 'post'
max_page_size = 20
class ListJobPost(generics.ListCreateAPIView):
# returns first 10 most recently published jobs
queryset = JobPost.objects.exclude(published_date=None)[:10]
serializer_class = JobPreviewSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
pagination_class = PostPageNumberPagination
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class ViewJobPost(generics.ListAPIView):
serializer_class = JobPostSerializer
# Override queryset: returns object whose id matches int passed in url params (self.kwargs)
def get_queryset(self):
return JobPost.objects.filter(id=self.kwargs['pk'])
class ModifyJobPost(generics.RetrieveUpdateDestroyAPIView):
serializer_class = JobPostSerializer
authentication_classes = (
rest_framework_jwt.authentication.JSONWebTokenAuthentication,
authentication.SessionAuthentication,
authentication.BasicAuthentication
)
permission_classes = (permissions.IsAuthenticated,)
# Methods update, perform_update, partial_update, destroy, perform_destory
# all from Django REST Framework source-code mixins:
# https://github.com/encode/django-rest-framework/blob/master/rest_framework/mixins.py
# To customize, must overwrite but also add in default source-code.
# Override queryset: returns object whose id matches int passed in url params (self.kwargs)
def get_queryset(self):
return JobPost.objects.filter(id=self.kwargs['pk'])
def update(self, request, *args, **kwargs):
job = self.get_object()
# Check job company id matches user id
if job.company.pk is not self.request.user.pk:
message = {'FORBIDDEN'}
return Response(message, status=status.HTTP_403_FORBIDDEN)
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
if getattr(instance, '_prefetched_objects_cache', None):
# If 'prefetch_related' has been applied to a queryset, we need to
# forcibly invalidate the prefetch cache on the instance.
instance._prefetched_objects_cache = {}
return Response(serializer.data)
def perform_update(self, serializer):
serializer.save()
def partial_update(self, request, *args, **kwargs):
kwargs['partial'] = True
return self.update(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
job = self.get_object()
# Checks job id on request == user id making delete request:
# (prevents company 1 deleting for company 2)
if job.company.pk is not self.request.user.pk:
message = {'FORBIDDEN'}
return Response(message, status=status.HTTP_403_FORBIDDEN)
self.perform_destroy(job)
return Response(status=status.HTTP_204_NO_CONTENT)
def perform_destroy(self, instance):
instance.delete()
# Returns list of Jobs posted by a company account
class ListCompanyJobPosts(generics.ListCreateAPIView):
serializer_class = JobPostSerializer
authentication_classes = (
rest_framework_jwt.authentication.JSONWebTokenAuthentication,
authentication.SessionAuthentication,
authentication.BasicAuthentication
)
permission_classes = (permissions.IsAuthenticated,)
pagination_class = PostPageNumberPagination
# lookup_field = "company"
def get_queryset(self):
company = self.request.user
return JobPost.objects.filter(company=company)
# print(self.request.data)
# if self.request.data['is_active'] is True:
# return JobPost.objects.filter(published_date=True)
# else:
# return JobPost.objects.filter(company=company)
def post(self, request, *args, **kwargs):
# print('REQUEST>>>>', request.user.pk)
# Checks company id on request == user id making post request:
# (prevents company 1 posting for company 2)
if request.data['company'] is not self.request.user.pk:
message = {'FORBIDDEN'}
return Response(message, status=status.HTTP_403_FORBIDDEN)
# If job post is published, set published_date to current time
if request.data['is_active'] is True:
request.data['published_date'] = timezone.now()
# print('REQUEST>>>>', request.data)
serializer = self.get_serializer(data=request.data)
# print('SERIALIZER>>>>>>', serializer)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
# print('SERIALIZER.DATA>>>>>>', serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
########### Membership Views and Methods ###########
def get_user_membership(request):
user_membership_qs = UserMembership.objects.filter(user=request.user)
if user_membership_qs.exists():
return user_membership_qs.first()
return None
def get_user_subscription(request):
user_subscription_qs = Subscription.objects.filter(
user_membership=get_user_membership(request))
if user_subscription_qs.exists():
user_subscription = user_subscription_qs.first()
return user_subscription
return None
def send_email(request):
sg = sendgrid.SendGridAPIClient(
apikey=config('SENDGRID_API_KEY')
)
from_email = Email('<EMAIL>')
to_email = Email('<EMAIL>')
subject = 'Testing!'
msg_html = render_to_string('templates/email_confirm.html', {'email': sendgrid})
content = Content(
html_message=msg_html,
)
mail = Mail(from_email, subject, to_email, content)
response = sg.client.mail.send.post(request_body=mail.get())
return HttpResponse('Email sent!')
def get_selected_membership(request):
membership_type = request.session['selected_membership_type']
selected_membership_qs = Membership.objects.filter(
membership_type=membership_type)
if selected_membership_qs.exists():
return selected_membership_qs.first()
return None
# for selecting a paid membership
class MembershipSelectView(generics.ListAPIView):
model = Membership
queryset = Membership.objects.all()
serializer_class = MembershipSerializer
authentication_classes = (
rest_framework_jwt.authentication.JSONWebTokenAuthentication,
authentication.SessionAuthentication,
authentication.BasicAuthentication
)
permission_classes = (permissions.IsAuthenticated,)
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(**kwargs)
current_membership = get_user_membership(self.request)
context['current_membership'] = str(current_membership.membership)
# print(context)
return context
def post(self, request, **kwargs):
selected_membership_type = request.POST.get('membership_type')
user_subscription = get_user_subscription(request)
user_membership = get_user_membership(request)
selected_membership_qs = Membership.objects.filter(
membership_type=selected_membership_type
)
if selected_membership_qs.exists():
selected_membership = selected_membership_qs.first()
'''
============
VALIDATION
============
'''
if user_membership.membership == selected_membership:
if user_subscription != None:
messages.info(request, "You already have this membership. Your next payment is due {}".format(
'get this value from Stripe'))
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
#assign any changes to membership type to the session
request.session['selected_membership_type'] = selected_membership.membership_type
return HttpResponseRedirect(reverse('memberships:payment'))
class PaymentView(generics.CreateAPIView):
queryset = Payment.objects.all()
serializer_class = PaymentViewSerializer
authentication_classes = (
rest_framework_jwt.authentication.JSONWebTokenAuthentication,
authentication.SessionAuthentication,
authentication.BasicAuthentication
)
permission_classes = (permissions.IsAuthenticated,)
# Tokenizes purchase
# class PaymentView(generics.CreateAPIView):
# user_membership = get_user_membership(request)
# selected_membership = get_selected_membership(request)
# publishKey = settings.STRIPE_PUBLISHABLE_KEY
# if request.method == "POST":
# try:
# token = request.POST['stripeToken']
# subscription = stripe.Subscription.create(
# customer=user_membership.stripe_customer_id,
# items=[
# {
# "plan": selected_membership.stripe_plan_id,
# },
# ],
# source=token # <PASSWORD>
# )
# return redirect(reverse('memberships:update-transactions',
# kwargs={
# 'subscription_id': subscription.id
# }))
# except stripe.CardError as e:
# messages.info(request, "Your card has been declined")
# context = {
# 'publishKey': publishKey,
# 'selected_membership': selected_membership
# }
# return render(request, "templates/membership_payment.html", context)
# def updateTransactionRecords(request, subscription_id):
# user_membership = get_user_membership(request)
# selected_membership = get_selected_membership(request)
# user_membership.membership = selected_membership
# user_membership.save()
# sub, created = Subscription.objects.get_or_create(user_membership=user_membership)
# sub.stripe_subscription_id = subscription_id
# sub.active = True
# sub.save()
# try:
# del request.session['selected_membership_type']
# except:
# pass
# messages.info(request, 'Successfully created {} membership'.format(selected_membership))
# return redirect('/memberships')
# def cancelSubscription(request):
# user_sub = get_user_subscription(request)
# if user_sub.active == False:
# messages.info(request, "You dont have an active membership")
# return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
# sub = stripe.Subscription.retrieve(user_sub.stripe_subscription_id)
# sub.delete()
# user_sub.active = False
# user_sub.save()
# free_membership = Membership.objects.filter(membership_type='Free').first()
# user_membership = get_user_membership(request)
# user_membership.membership = free_membership
# user_membership.save()
# messages.info(request, "Successfully cancelled membership. We have sent an email")
# # sending an email here
# return redirect('/memberships')
|
StarcoderdataPython
|
6608169
|
# Copyright (c) 2022 Food-X Technologies
#
# This file is part of foodx_backup_source.
#
# You should have received a copy of the MIT License along with
# foodx_backup_source. If not, see <https://opensource.org/licenses/MIT>.
import pathlib
import typing
import click
import pytest
from click.testing import CliRunner
from foodx_backup_source._file_io import BackupDefinitions
from foodx_backup_source._main import (
DEFAULT_OUTPUT_PATH,
_launch_packaging,
click_entry,
)
from foodx_backup_source.schema import ApplicationDefinition, DependencyFile
@pytest.fixture()
def mock_gather(mocker):
return mocker.patch("foodx_backup_source._main._launch_packaging")
@pytest.fixture()
def mock_runner():
return CliRunner()
@pytest.fixture()
def mock_path(mocker):
def mock_convert(
value: typing.Any,
param: typing.Optional["Parameter"],
ctx: typing.Optional["Context"],
) -> typing.Any:
# don't do any checking on the parameter
return pathlib.Path(value)
mocker.patch.object(click.Path, "convert", side_effect=mock_convert)
@pytest.fixture()
def mock_definitions(load_yaml_content, mocker):
content_text = """
---
context:
dependencies:
r1:
backup:
repo_url: "https://this.host/path"
branch_name: master
docker:
image_name: r1-image
tag_prefix: pp-
release:
ref: "3.1.4"
"""
data = DependencyFile.parse_obj(load_yaml_content(content_text))
definitions: BackupDefinitions = list()
for name, configuration in data.context.dependencies.items():
this_element = ApplicationDefinition(
name=name, configuration=configuration
)
definitions.append(this_element)
mocker.patch(
"foodx_backup_source._main.load_backup_definitions",
return_value=definitions,
)
class TestLaunchPackaging:
@pytest.mark.asyncio
async def test_clean(self, mock_definitions, mocker):
mock_snapshot = mocker.patch("foodx_backup_source._main.do_snapshot")
mocker.patch("foodx_backup_source._main.tarfile.open")
mocker.patch("foodx_backup_source._main.discover_backup_definitions")
mocker.patch(
"foodx_backup_source._main._isoformat_now", return_value="today"
)
mock_hash_file = mocker.patch(
"foodx_backup_source._main.create_hash_file"
)
arguments = {
"project_name": "this_project",
"project_directory": pathlib.Path("some/project"),
"output_directory": pathlib.Path("some/output"),
"git_refs": dict(),
"token": None,
}
await _launch_packaging(**arguments)
mock_hash_file.assert_called_once_with(
pathlib.Path("some/output/this_project-today.tar.gz")
)
mock_snapshot.assert_called_once()
@pytest.mark.asyncio
async def test_git_ref(self, mock_definitions, mocker):
mock_snapshot = mocker.patch("foodx_backup_source._main.do_snapshot")
mocker.patch("foodx_backup_source._main.tarfile.open")
mocker.patch("foodx_backup_source._main.discover_backup_definitions")
mocker.patch(
"foodx_backup_source._main._isoformat_now", return_value="today"
)
mock_hash_file = mocker.patch(
"foodx_backup_source._main.create_hash_file"
)
arguments = {
"project_name": "this_project",
"project_directory": pathlib.Path("some/project"),
"output_directory": pathlib.Path("some/output"),
"token": None,
"git_refs": {"r1": "abc123"},
}
await _launch_packaging(**arguments)
mock_hash_file.assert_called_once_with(
pathlib.Path("some/output/this_project-today.tar.gz")
)
mock_snapshot.assert_called_once()
assert (
mock_snapshot.call_args[0][0].configuration.release.ref == "abc123"
)
class TestMain:
def test_default(self, mock_gather, mock_runner, mock_path):
arguments = [
"this_project",
"some/path",
]
result = mock_runner.invoke(click_entry, arguments)
assert result.exit_code == 0
mock_gather.assert_awaited_once_with(
arguments[0],
pathlib.Path(arguments[1]),
DEFAULT_OUTPUT_PATH,
None,
dict(),
)
def test_token_file_stdin(
self, mock_gather, mock_runner, mock_path, mocker
):
arguments = [
"this_project",
"some/path",
"--token-file",
"-",
]
result = mock_runner.invoke(click_entry, arguments, input="deadb33f")
assert result.exit_code == 0
mock_gather.assert_awaited_once_with(
mocker.ANY,
mocker.ANY,
mocker.ANY,
"deadb33f",
mocker.ANY,
)
def test_token_file_whitespace(
self, mock_gather, mock_runner, mock_path, mocker
):
arguments = [
"this_project",
"some/path",
"--token-file",
"-",
]
result = mock_runner.invoke(click_entry, arguments, input=" deadb33f\n")
assert result.exit_code == 0
mock_gather.assert_awaited_once_with(
mocker.ANY,
mocker.ANY,
mocker.ANY,
"deadb33f",
mocker.ANY,
)
def test_output(self, mock_gather, mock_runner, mock_path, mocker):
arguments = [
"this_project",
"some/path",
"--output-dir",
"output/dir",
]
result = mock_runner.invoke(click_entry, arguments)
assert result.exit_code == 0
mock_gather.assert_awaited_once_with(
mocker.ANY,
mocker.ANY,
pathlib.Path("output/dir"),
mocker.ANY,
mocker.ANY,
)
def test_git_ref(self, mock_gather, mock_runner, mock_path, mocker):
arguments = [
"this_project",
"some/path",
"--git-ref",
"r1=abc123",
]
result = mock_runner.invoke(click_entry, arguments)
assert result.exit_code == 0
mock_gather.assert_awaited_once_with(
mocker.ANY,
mocker.ANY,
mocker.ANY,
mocker.ANY,
{"r1": "abc123"},
)
def test_multiple_git_ref(
self, mock_gather, mock_runner, mock_path, mocker
):
arguments = [
"this_project",
"some/path",
"--git-ref",
"r1=abc123",
"--git-ref",
"r3=123abc",
]
result = mock_runner.invoke(click_entry, arguments)
assert result.exit_code == 0
mock_gather.assert_awaited_once_with(
mocker.ANY,
mocker.ANY,
mocker.ANY,
mocker.ANY,
{"r1": "abc123", "r3": "123abc"},
)
|
StarcoderdataPython
|
213851
|
<gh_stars>1-10
# coding=utf-8
from tornado.web import authenticated
from handlers.base_handler import BaseHandler
from tornado.log import access_log as weblog
from handlers.Project.project_manage_handler import get_project_list, get_user_list
class TaskManageHandler(BaseHandler):
@authenticated
def get(self):
weblog.info("%s.", self._request_summary())
task = dict()
task['progress'] = 45
self.render("project/task.html", message="", projects=get_project_list(self),
users=get_user_list(self), task=task)
pass
@authenticated
def post(self):
pass
|
StarcoderdataPython
|
5198866
|
<gh_stars>1-10
"""
LLNotifyMixin
"""
from functools import partial
from appdaemon import adbase as ad
METHODS = ["success", "warning", "error", "alert", "confirm", "notify", "message"]
METHODS_NO_MSG = ["dismiss_all", "ping"]
class LLNotifyMixin(ad.ADBase):
"""
Helper function to make it easy to call add alerts to Lovelace.
class MyHass(LLNotifyMixin, appdaemon.plugins.hass.hassapi.Hass):
pass
class MyApp(MyHass):
def initialize(self):
self.ll_success("This will create a success notification in Lovelace!", wait=0)
Methods:
* ll_success
* ll_warning
* ll_error
* ll_alert
* ll_confirm
* ll_dismiss_all
* ll_notify
* ll_message
* ll_ping
"""
def __init__(self, *args, **kwargs):
super(ad.ADBase, self).__init__(*args, **kwargs)
# For static analysis
self.ll_success = self.__noop
self.ll_warning = self.__noop
self.ll_error = self.__noop
self.ll_alert = self.__noop
self.ll_confirm = self.__noop
self.ll_dismiss_all = self.__noop
self.ll_notify = self.__noop
self.ll_message = self.__noop
self.ll_ping = self.__noop
# def initialize(self):
# super().initialize()
adbase = self.get_ad_api()
if self.__ll_notify_component_installed():
self.__add_methods()
else:
adbase.log(
"ll_notify component not installed. Any calls to ll_notify will be noops.",
level="WARNING",
)
def __ll_notify_component_installed(self) -> bool:
adbase = self.get_ad_api()
for service in adbase.list_services():
if service.get("domain") == "ll_notify":
return True
return False
def __add_methods(self):
def call_ll_notify_service(method, message, *args, **kwargs):
"""Pass through directly via call_service"""
adbase = self.get_ad_api()
return adbase.call_service(f"ll_notify/{method}", message=message, **kwargs)
for method in METHODS:
setattr(self, "ll_" + method, partial(call_ll_notify_service, method))
for method in METHODS_NO_MSG:
setattr(self, "ll_" + method, partial(call_ll_notify_service, method, ""))
@staticmethod
def __noop(*args, **kwargs):
pass
|
StarcoderdataPython
|
3434024
|
resnext101_32_path = 'resnext_101_32x4d.pth'
|
StarcoderdataPython
|
186887
|
<filename>Desenv_Web/Desenv_Web/views.py
from django.shortcuts import render
from django.http import HttpResponse
def index(request):
return render(request, 'index.html')
def contato(request):
if request.method == 'GET':
return render(request, 'login.html')
else:
print('Acesso via POST')
def login(request):
if request.method == 'GET':
print('Acesso via GET')
else:
request.POST.get("loginUser")
request.POST.get("senhaUser")
print('Acesso via POST')
print("Acesso via POST com usuário", request.POST.get("loginUser"), "e senha", request.POST.get("senhaUser"))
return render(request, 'login.html')
|
StarcoderdataPython
|
6685098
|
<filename>ldt/tests/dicts/morphology/test_wordnet.py
# -*- coding: utf-8 -*-
import unittest
import os
os.environ["TESTING_LDT"] = "TRUE"
import ldt
from ldt.helpers.ignore import ignore_warnings
class Tests(unittest.TestCase):
"""
The tests in this block inspect the WordNet morphological functions:
lemmatization and retrieval of possible POS tags for a query word.
"""
@ignore_warnings
def test_dict_initialization(self):
test_dict = ldt.dicts.morphology.wordnet.en.MorphWordNet()
self.assertEqual(test_dict.language, "en")
@ignore_warnings
def test_pos_dict(self):
test_dict = ldt.dicts.morphology.wordnet.en.MorphWordNet()
res = test_dict.get_pos("cat")
self.assertGreaterEqual(res["noun"], 8)
@ignore_warnings
def test_pos_list(self):
test_dict = ldt.dicts.morphology.wordnet.en.MorphWordNet()
res = test_dict.get_pos("cat", formatting="list")
worked = len(res) >= 2 and "noun" in res
self.assertTrue(worked)
@ignore_warnings
def test_lemmatize(self):
test_dict = ldt.dicts.morphology.wordnet.en.MorphWordNet()
res = test_dict.lemmatize("cats")
worked = len(res) == 1 and "cat" in res
self.assertTrue(worked)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
4984331
|
from django.contrib.auth.mixins import AccessMixin
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
class GroupRequiredMixin(AccessMixin):
group_required = None
def get_required_group(self):
if self.group_required is None:
raise ImproperlyConfigured(
'{0} is missing the group_required attribute. Define {0}.group_required, or override '
'{0}.get_required_group().'.format(self.__class__.__name__)
)
if isinstance(self.group_required, six.string_types):
return self.group_required
raise ImproperlyConfigured(
'{0}.group_required attribute must be a string. Define {0}.group_required, or override '
'{0}.get_required_group().'.format(self.__class__.__name__)
)
def in_group(self):
group = self.get_required_group()
return self.request.user.groups.filter(name=group).exists()
def dispatch(self, request, *args, **kwargs):
if not self.in_group():
return self.handle_no_permission()
return super(GroupRequiredMixin, self).dispatch(request, *args, **kwargs)
class QueryMixin(object):
def do_query(self, request, *args, **kwargs):
raise NotImplemented
def dispatch(self, request, *args, **kwargs):
self.do_query(request, *args, **kwargs)
return super(QueryMixin, self).dispatch(request, *args, **kwargs)
|
StarcoderdataPython
|
1681057
|
a=1000
b=1000
c=30
if a>b and a>c:
print (a)
elif b>a and b>c:
print (b)
elif c>a and c>b:
print(c)
|
StarcoderdataPython
|
5185919
|
#!/usr/bin/env python3
# =====================
# Зависнуть над маркером
# =====================
from turtle import circle
import numpy as np
import rospy
import cv2
import cv2.aruco as aruco
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from aruco_calibration import Calibration as clb
from drone_api import *
from math import pi, sqrt, sin, cos, atan2
import argparse
import time
FONT = cv2.FONT_HERSHEY_PLAIN
class Circle:
def __init__(self):
self.__buffer = [None] * 25
self.__yaw = 0
self.__counter = 0
def write(self, pose):
self.__buffer[self.__counter] = pose
self.__yaw = pose[5]
self.__counter += 1
if self.__counter >= len(self.__buffer):
self.__counter = 0
def mean(self):
num = 0
mean_pose = [0] * 6
for i in self.__buffer:
if i is not None:
num += 1
for j in range(len(mean_pose)):
mean_pose[j] += i[j]
if num != 0:
for i in range(len(mean_pose) - 1):
mean_pose[i] = mean_pose[i] / num
mean_pose[5] = self.__yaw
return mean_pose
def erase_yaw(self):
self.__yaw = 0
def toFixed(numObj, digits=0):
return f'{numObj:.{digits}f}'
# ARGPARSER
parser = argparse.ArgumentParser()
parser.add_argument('--write', dest='write', action='store_true',
help='if set, video stream is written to a file')
parser.add_argument('--show', dest='show', action='store_true',
help='if set, video stream is displayed in the window')
parser.add_argument('--output', dest='output', action='store_true',
help='if set, ArUco recognition process is output to the terminal')
args = parser.parse_args()
parser.set_defaults(write=False)
parser.set_defaults(show=False)
parser.set_defaults(output=False)
# OPEN VIDEO
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print("Cannot open camera")
exit()
if args.write:
import time
time_now = time.gmtime(time.time())
video_file = f'{time.strftime("%Y.%m.%d %H:%M:%S", time.gmtime())}.avi'
while True:
ret, frame = cap.read()
if ret:
image_size = frame.shape[:2]
print(f'Resolution: {image_size[1]}x{image_size[0]}')
break
fps = 25.0
out = cv2.VideoWriter(video_file, cv2.VideoWriter_fourcc(*'MJPG'),
fps, (image_size[1], image_size[0]))
# calibration_save.yaml - уже проведена калибровка
camera_matrix, dist_coef = clb.loadCoefficients('calibration_save.yaml')
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
parameters = aruco.DetectorParameters_create()
# DRONE PREPARATION
drone = Drone_api()
drone.start()
rospy.loginfo('Drone armed')
drone.sleep(5)
drone.set_local_pose(0, 0, 4, 0)
while not drone.point_is_reached() and not drone.is_shutdown():
drone.sleep(0.5)
cir = Circle()
# CYCLE
while not drone.is_shutdown():
# Get ArUco pose
ret, frame = cap.read()
frame = cv2.flip(frame, -1)
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict,
parameters=parameters,
cameraMatrix=camera_matrix,
distCoeff=dist_coef)
if np.all(ids is not None):
rvec, tvec, markerPoints = aruco.estimatePoseSingleMarkers(corners, 0.2, camera_matrix,
dist_coef)
aruco.drawDetectedMarkers(frame, corners)
aruco.drawAxis(frame, camera_matrix,
dist_coef, rvec[0], tvec[0], 0.2)
cv2.putText(frame, ' id' + str(ids[0])[1:-1], (20, 30), FONT,
1, (255, 255, 255), 3, cv2.LINE_AA)
cv2.putText(frame, ' id' + str(ids[0])[1:-1], (20, 30), FONT,
1, (0, 0, 0), 1, cv2.LINE_AA)
drone_pose = drone.get_local_pose()
x, y, z, roll, pitch, yaw = Camera_api.marker_local_pose(rvec[0][0], tvec[0][0],
drone_pose, (0, 0, 0, 0, pi/2, 0))
cir.write([x, y, z, roll, pitch, yaw])
if args.output:
rospy.loginfo(str(toFixed(x, 3) + ' ' +
toFixed(y, 3) + ' ' +
toFixed(z, 3) + ' ' +
toFixed(roll, 3) + ' ' +
toFixed(pitch, 3) + ' ' +
toFixed(yaw, 3)))
# Белая обводка и черный текст
cv2.putText(frame, str(toFixed(x, 3)+' ' +
toFixed(y, 3) + ' ' +
toFixed(z, 3) + ' '), (20, 70+20),
FONT, 1, (255, 255, 255), 3, cv2.LINE_AA)
cv2.putText(frame, str(toFixed(x, 3) + ' ' +
toFixed(y, 3) + ' ' +
toFixed(z, 3) + ' '), (20, 70+20),
FONT, 1, (0, 0, 0), 1, cv2.LINE_AA)
cv2.putText(frame, str(toFixed(roll, 3)+' ' +
toFixed(pitch, 3) + ' ' +
toFixed(yaw, 3)), (20, 100+20),
FONT, 1, (255, 255, 255), 3, cv2.LINE_AA)
cv2.putText(frame, str(toFixed(roll, 3) + ' ' +
toFixed(pitch, 3) + ' ' +
toFixed(yaw, 3)), (20, 100+20),
FONT, 1, (0, 0, 0), 1, cv2.LINE_AA)
else:
# Сбрасываем yaw маркера, чтобы, в случае потери маркера, дрон не продолжал кружиться
cir.erase_yaw()
if args.output:
rospy.loginfo('NOT FOUND')
cv2.putText(frame, 'NOT FOUND', (20, 30), FONT,
1, (255, 255, 255), 3, cv2.LINE_AA)
cv2.putText(frame, 'NOT FOUND', (20, 30), FONT,
1, (0, 0, 0), 1, cv2.LINE_AA)
if args.write:
out.write(frame)
if args.show:
cv2.imshow('frame', frame)
cv2.waitKey(1)
# Set drone pose
marker_pose = cir.mean()
drone_pose = drone.get_local_pose()
correct_drone_yaw = marker_pose[5] + drone_pose[5]
# Чтобы исключить "дерганье" дрона из-за небольшой ошибки определения местоположения маркера:
drone.set_local_pose(marker_pose[0], marker_pose[1],
marker_pose[2] + 3, correct_drone_yaw)
drone.sleep(0.5)
rospy.loginfo('Drone disarmed')
cap.release()
if args.write:
out.release()
rospy.loginfo(f'Create {video_file}')
cv2.destroyAllWindows()
|
StarcoderdataPython
|
6648692
|
opt = {
"no_cuda": True,
"task": "internal:blended_skill_talk,wizard_of_wikipedia,convai2,empathetic_dialogues",
"multitask_weights": [
1.0,
3.0,
3.0,
3.0
],
"init_model": "./data/models/blender/blender_90M/model",
"dict_file":"./data/models/blender/blender_90M/model.dict",
"embedding_size": 512,
"n_layers": 8,
"ffn_size": 2048,
"dropout": 0.1,
"n_heads": 16,
"learn_positional_embeddings": True,
"n_positions": 512,
'variant': 'xlm',
'activation': 'gelu',
'skip_generation': True,
'fp16': True,
'text-truncate': 512,
'label_truncate': 128,
'dict_tokenizer': 'bpe',
'dict_lower': True,
'lr': 1e-06,
'optimizer': 'adamax',
'lr_scheduler': 'reduceonplateau',
'gradient_clip': 0.1,
'veps': 0.25,
"betas": [
0.9,
0.999
],
"update_freq": 1,
"attention_dropout": 0.0,
"relu_dropout": 0.0,
"skip_generation": False,
'vp': 15,
'stim': 60,
'vme': 20000,
'bs': 16,
'vmt': 'ppl',
'vmm': 'min',
'save_after_valid': True,
'model_file': '/tmp/test_train_90M',
'datapath': './custom/data/',
'history_size': -1,
'truncate': -1,
'rank_candidates': False,
'embeddings_scale': True,
'output_scaling': 1.0,
'embedding_type': 'random',
'gpu': -1
}
|
StarcoderdataPython
|
6424244
|
#https://github.com/MerosCrypto/Meros/issues/106. Specifically tests elements in Blocks (except MeritRemovals).
#Types.
from typing import Dict, List, IO, Any
#Sketch class.
from PythonTests.Libs.Minisketch import Sketch
#Blockchain classes.
from PythonTests.Classes.Merit.Blockchain import Block
from PythonTests.Classes.Merit.Blockchain import Blockchain
#VerificationPacket class.
from PythonTests.Classes.Consensus.VerificationPacket import VerificationPacket
#Transactions class.
from PythonTests.Classes.Transactions.Transactions import Transactions
#Meros classes.
from PythonTests.Meros.RPC import RPC
from PythonTests.Meros.Meros import MessageType
#TestError Exception.
from PythonTests.Tests.Errors import TestError
#JSON standard lib.
import json
#pylint: disable=too-many-statements
def HundredSixBlockElementsTest(
rpc: RPC
) -> None:
#Load the vectors.
file: IO[Any] = open("PythonTests/Vectors/Consensus/HundredSix/BlockElements.json", "r")
vectors: Dict[str, Any] = json.loads(file.read())
file.close()
#Blockchain. Solely used to get the genesis Block hash.
blockchain: Blockchain = Blockchain()
#Transactions.
transactions: Transactions = Transactions.fromJSON(vectors["transactions"])
#Parse the Blocks from the vectors.
blocks: List[Block] = []
for block in vectors["blocks"]:
blocks.append(Block.fromJSON({}, block))
for block in blocks:
#Handshake with the node.
rpc.meros.connect(254, 254, blockchain.blocks[0].header.hash)
#Send the Block.
rpc.meros.blockHeader(block.header)
#Flag of if the Block's Body synced.
blockBodySynced: bool = False
#Handle sync requests.
reqHash: bytes = bytes()
while True:
try:
msg: bytes = rpc.meros.recv()
except TestError:
if not blockBodySynced:
raise TestError("Node disconnected us before syncing the body.")
#Verify the node didn't crash.
try:
if rpc.call("merit", "getHeight") != 1:
raise Exception()
except Exception:
raise TestError("Node crashed after being sent a malformed Element.")
#Since the node didn't crash, break out of this loop to trigger the next test case.
break
if MessageType(msg[0]) == MessageType.Syncing:
rpc.meros.syncingAcknowledged()
elif MessageType(msg[0]) == MessageType.BlockBodyRequest:
reqHash = msg[1 : 33]
if reqHash != block.header.hash:
raise TestError("Meros asked for a Block Body that didn't belong to the Block we just sent it.")
#Send the BlockBody.
blockBodySynced = True
rpc.meros.blockBody([], block)
elif MessageType(msg[0]) == MessageType.SketchHashesRequest:
if not block.body.packets:
raise TestError("Meros asked for Sketch Hashes from a Block without any.")
reqHash = msg[1 : 33]
if reqHash != block.header.hash:
raise TestError("Meros asked for Sketch Hashes that didn't belong to the Block we just sent it.")
#Create the haashes.
hashes: List[int] = []
for packet in block.body.packets:
hashes.append(Sketch.hash(block.header.sketchSalt, packet))
#Send the Sketch Hashes.
rpc.meros.sketchHashes(hashes)
elif MessageType(msg[0]) == MessageType.SketchHashRequests:
if not block.body.packets:
raise TestError("Meros asked for Verification Packets from a Block without any.")
reqHash = msg[1 : 33]
if reqHash != block.header.hash:
raise TestError("Meros asked for Verification Packets that didn't belong to the Block we just sent it.")
#Create a lookup of hash to packets.
packets: Dict[int, VerificationPacket] = {}
for packet in block.body.packets:
packets[Sketch.hash(block.header.sketchSalt, packet)] = packet
#Look up each requested packet and respond accordingly.
for h in range(int.from_bytes(msg[33 : 37], byteorder="big")):
sketchHash: int = int.from_bytes(msg[37 + (h * 8) : 45 + (h * 8)], byteorder="big")
if sketchHash not in packets:
raise TestError("Meros asked for a non-existent Sketch Hash.")
rpc.meros.packet(packets[sketchHash])
elif MessageType(msg[0]) == MessageType.TransactionRequest:
reqHash = msg[1 : 33]
if reqHash not in transactions.txs:
raise TestError("Meros asked for a non-existent Transaction.")
rpc.meros.transaction(transactions.txs[reqHash])
elif MessageType(msg[0]) == MessageType.SyncingOver:
pass
elif MessageType(msg[0]) == MessageType.BlockHeader:
#Raise a TestError if the Block was added.
raise TestError("Meros synced a Block with an invalid holder.")
else:
raise TestError("Unexpected message sent: " + msg.hex().upper())
#Reset the node.
rpc.reset()
|
StarcoderdataPython
|
3339213
|
<reponame>khalili-itelligence/basic<gh_stars>0
# import argparse
# parse = argparse.ArgumentParser(description="ForTest!")
# parse.add_argument('integers', type=int)
# print(parse.parse_args())
import sys
print("Result:", sys.argv)
|
StarcoderdataPython
|
5047630
|
import matplotlib
''' To use the Agg backend '''
matplotlib.use('Agg')
from pandas_datareader.data import DataReader
import matplotlib.pyplot as plt
import urllib.request
import re
import datetime
'''gets historical prices from pandas datareader'''
def getdata(ticker):
data = DataReader(ticker, 'yahoo')
return data
''' creates the Open Close Graph and saves it as svg '''
def getOpenClosegraph(ticker):
fig = plt.figure(figsize=(20,10))
data = getdata(ticker)
plt.plot(list(data.index), data['Open'], 'b-')
plt.plot(list(data.index), data['Close'], 'r-')
plt.legend(["Open", "Close"])
plt.title(ticker)
plt.xticks(rotation=45)
fig.savefig('finance\\static\\finance\\images\\openclose.svg')
return "finance/images/openclose.svg"
''' creates the High Low Graph and saves it as svg '''
def getHighLowgraph(ticker):
fig = plt.figure(figsize=(20,10))
data = getdata(ticker)
plt.plot(list(data.index), data['High'], 'b-')
plt.plot(list(data.index), data['Low'], 'r-')
plt.legend(["High", "Low"])
plt.title(ticker)
plt.xticks(rotation=45)
fig.savefig('finance\\static\\finance\\images\\highlow.svg')
return "finance/images/highlow.svg"
'''get Industry Peers from MorningStar'''
def getIndustryPeers(ticker):
stringdata = ""
try:
url = urllib.request.urlopen("http://financials.morningstar.com/competitors/industry-peer-data.action?type=com&t=XNAS:%s®ion=usa&culture=en-US&cur=" % ticker)
html = url.read()
html = str(html, "utf-8").split('<tbody>')
data = html[2].split('</tbody>')
data = data[0].split('</a>')
for line in data:
stuff = re.sub('[\<]{1}[aA]{1}[0-9a-zA-Z\D]*[\"]{1}[\>]{1}', "", line)
stringdata += stuff
except IndexError:
try:
url = urllib.request.urlopen("http://financials.morningstar.com/competitors/industry-peer-data.action?type=com&t=PINX:%s®ion=usa&culture=en-US&ops=p&cur=" % ticker)
html = url.read()
html = str(html, "utf-8").split('<tbody>')
data = html[2].split('</tbody>')
data = data[0].split('</a>')
for line in data:
stuff = re.sub('[\<]{1}[aA]{1}[0-9a-zA-Z\D]*[\"]{1}[\>]{1}', "", line)
stringdata += stuff
except IndexError:
try:
url = urllib.request.urlopen("http://financials.morningstar.com/competitors/industry-peer-data.action?type=com&t=XNYS:%s®ion=usa&culture=en-US&cur=" % ticker)
html = url.read()
html = str(html, "utf-8").split('<tbody>')
data = html[2].split('</tbody>')
data = data[0].split('</a>')
for line in data:
stuff = re.sub('[\<]{1}[aA]{1}[0-9a-zA-Z\D]*[\"]{1}[\>]{1}', "", line)
stringdata += stuff
except IndexError:
stringdata = "No Data"
return stringdata
'''gets the Price Quotes of Industry Peers from MorningStar'''
def getIndustyPeersQuote(ticker):
stringdata = ""
try:
url = urllib.request.urlopen("http://financials.morningstar.com/competitors/industry-peer-quote.action?type=com&t=XNAS:%s®ion=usa&culture=en-US&cur=" % ticker)
html = url.read()
html = str(html, "utf-8").split('<tbody>')
data = html[2].split('</tbody>')
data = data[0].split('</a>')
for line in data:
stuff = re.sub('[\<]{1}[aA]{1}[0-9a-zA-Z\D]*[\"]{1}[\>]{1}', "", line)
stringdata += stuff
except IndexError:
try:
url = urllib.request.urlopen("http://financials.morningstar.com/competitors/industry-peer-quote.action?type=com&t=PINX:%s®ion=usa&culture=en-US&cur=" % ticker)
html = url.read()
html = str(html, "utf-8").split('<tbody>')
data = html[2].split('</tbody>')
data = data[0].split('</a>')
for line in data:
stuff = re.sub('[\<]{1}[aA]{1}[0-9a-zA-Z\D]*[\"]{1}[\>]{1}', "", line)
stringdata += stuff
except IndexError:
try:
url = urllib.request.urlopen("http://financials.morningstar.com/competitors/industry-peer-quote.action?type=com&t=XNYS:%s®ion=usa&culture=en-US&cur=" % ticker)
html = url.read()
html = str(html, "utf-8").split('<tbody>')
data = html[2].split('</tbody>')
data = data[0].split('</a>')
for line in data:
stuff = re.sub('[\<]{1}[aA]{1}[0-9a-zA-Z\D]*[\"]{1}[\>]{1}', "", line)
stringdata += stuff
except IndexError:
stringdata = "No Data"
return(stringdata)
'''get Key Ratios data from MorningStar'''
def getKeyRatios(ticker):
try:
url = urllib.request.urlopen("http://financials.morningstar.com/financials/getKeyStatPart.html?&callback=?&t=XNAS:%s®ion=usa&culture=en-US&ops=clear&cur=&order=" % ticker)
html = url.read()
html = str(html, "utf-8").split('?({"componentData":"')
html = html[1].split('"})')
html = re.sub(r'\\', "", html[0])
html = html.split('</ul>')
html = html[1].split('style="display:none;"')
data = ""
for line in html:
data += line
data = data.split("</table>")
for i in range(len(data)-1):
splitdata = data[i].split("<table")
data[i] = "<table" + splitdata[1] + "</table>"
except IndexError:
try:
url = urllib.request.urlopen("http://financials.morningstar.com/financials/getKeyStatPart.html?&callback=?&t=PINX:%s®ion=usa&culture=en-US&ops=clear&cur=&order=" % ticker)
html = url.read()
html = str(html, "utf-8").split('?({"componentData":"')
html = html[1].split('"})')
html = re.sub(r'\\', "", html[0])
html = html.split('</ul>')
html = html[1].split('style="display:none;"')
data = ""
for line in html:
data += line
data = data.split("</table>")
for i in range(len(data)-1):
splitdata = data[i].split("<table")
data[i] = "<table" + splitdata[1] + "</table>"
except IndexError:
try:
url = urllib.request.urlopen("http://financials.morningstar.com/financials/getKeyStatPart.html?&callback=?&t=XNYS:%s®ion=usa&culture=en-US&ops=clear&cur=&order=" % ticker)
html = url.read()
html = str(html, "utf-8").split('?({"componentData":"')
html = html[1].split('"})')
html = re.sub(r'\\', "", html[0])
html = html.split('</ul>')
html = html[1].split('style="display:none;"')
data = ""
for line in html:
data += line
data = data.split("</table>")
for i in range(len(data)-1):
splitdata = data[i].split("<table")
data[i] = "<table" + splitdata[1] + "</table>"
except IndexError:
data = "No Data"
return data
'''gets financials from morningstar'''
def getFinancials(ticker):
try:
url = urllib.request.urlopen("http://financials.morningstar.com/financials/getFinancePart.html?&callback=?&t=XNAS:%s®ion=usa&culture=en-US&ops=clear&cur=&order=" % ticker)
html = url.read()
html = str(html, "utf-8").split('?({"componentData":"')
html = html[1].split('"})')
html = re.sub(r'\\', "", html[0])
html = html.split('<table')
html = '<table'+html[1]
except IndexError:
try:
url = urllib.request.urlopen("http://financials.morningstar.com/financials/getFinancePart.html?&callback=?&t=PINX:%s®ion=usa&culture=en-US&ops=clear&cur=&order=" % ticker)
html = url.read()
html = str(html, "utf-8").split('?({"componentData":"')
html = html[1].split('"})')
html = re.sub(r'\\', "", html[0])
html = html.split('<table')
html = '<table'+html[1]
except IndexError:
try:
url = urllib.request.urlopen("http://financials.morningstar.com/financials/getFinancePart.html?&callback=?&t=XNYS:%s®ion=usa&culture=en-US&ops=clear&cur=&order=" % ticker)
html = url.read()
html = str(html, "utf-8").split('?({"componentData":"')
html = html[1].split('"})')
html = re.sub(r'\\', "", html[0])
html = html.split('<table')
html = '<table'+html[1]
except IndexError:
html = "No Data"
return html
'''get quote summary from yahoo '''
def getSimpleSummary(ticker):
url = urllib.request.urlopen("http://finance.yahoo.com/quote/%s" % ticker)
html = url.read()
html = str(html, "utf8").split('<div id=\"quote-summary\"')
html[1] = '<div id=\"quote-summary\"' + html[1]
html = html[1].split("Trade prices are not sourced from all markets")
html[0] = html[0]+'</span></div>'
return html[0]
''' get quote summary from morningstar'''
def getCurrentPrice(ticker):
url = urllib.request.urlopen("http://quotes.morningstar.com/stock/c-header?&t=XNAS:%s®ion=usa&culture=en-US&version=RET" % ticker)
html = url.read()
if len(html) != 0:
html = str(html, "utf8").split('<script type="text/javascript">')
else:
url = urllib.request.urlopen("http://quotes.morningstar.com/stock/c-header?&t=PINX:%s®ion=usa&culture=en-US&version=RET" % ticker)
html = url.read()
if len(html) != 0:
html = str(html, "utf8").split('<script type="text/javascript">')
else:
url = urllib.request.urlopen("http://quotes.morningstar.com/stock/c-header?&t=XNYS:%s®ion=usa&culture=en-US&version=RET" % ticker)
html = url.read()
if len(html) != 0:
html = str(html, "utf8").split('<script type="text/javascript">')
else:
return None
return html[0]
'''find recommend stocks based on whether for the past week,
if the closing prices have been consective higher than the previous day's'''
def getRecommendation(ticker):
recommendlist = []
industrypeerlist = []
weekdatelist = []
stringdata = ""
try:
url = urllib.request.urlopen("http://financials.morningstar.com/competitors/industry-peer-quote.action?type=com&t=XNAS:%s®ion=usa&culture=en-US&cur=" % ticker)
html = url.read()
html = str(html, "utf-8").split('<tbody>')
data = html[2].split('</tbody>')
data = data[0].split('</a>')
for line in data:
stuff = re.sub('[\<]{1}[aA]{1}[0-9a-zA-Z\D]*[\"]{1}[\>]{1}', "", line)
stringdata += stuff
stringdata = stringdata.split('<td align="left" scope="row" >')
for i in range(2, len(stringdata)+1, 2):
inddata = stringdata[i].split('</td>')
industrypeerlist.append(inddata[0])
startdate = datetime.date.today()- datetime.timedelta(1)
for i in range(7):
nextdate = startdate -datetime.timedelta(i)
if nextdate.isoweekday() < 6:
weekdatelist.append(nextdate)
for peers in industrypeerlist:
comp = peers.strip()
try:
count = 0
data = DataReader(comp, 'yahoo', weekdatelist[len(weekdatelist)-1], weekdatelist[0])
for i in range(len(weekdatelist)-1, 0, -1):
try:
if data.ix[str(weekdatelist[i-1])]['Close']-data.ix[str(weekdatelist[i])]['Close'] > 0:
count += 1
elif data.ix[str(weekdatelist[i-1])]['Close']-data.ix[str(weekdatelist[i])]['Close'] < 0:
count = 0
except KeyError:
continue
if count >= 2:
recommendlist.append(comp)
except OSError:
continue
except IndexError:
try:
url = urllib.request.urlopen("http://financials.morningstar.com/competitors/industry-peer-quote.action?type=com&t=PINX:%s®ion=usa&culture=en-US&cur=" % ticker)
html = url.read()
html = str(html, "utf-8").split('<tbody>')
data = html[2].split('</tbody>')
data = data[0].split('</a>')
for line in data:
stuff = re.sub('[\<]{1}[aA]{1}[0-9a-zA-Z\D]*[\"]{1}[\>]{1}', "", line)
stringdata += stuff
stringdata = stringdata.split('<td align="left" scope="row" >')
for i in range(2, len(stringdata)+1, 2):
inddata = stringdata[i].split('</td>')
industrypeerlist.append(inddata[0])
startdate = datetime.date.today()- datetime.timedelta(1)
for i in range(7):
nextdate = startdate -datetime.timedelta(i)
if nextdate.isoweekday() < 6:
weekdatelist.append(nextdate)
for peers in industrypeerlist:
comp = peers.strip()
try:
count = 0
data = DataReader(comp, 'yahoo', weekdatelist[len(weekdatelist)-1], weekdatelist[0])
for i in range(len(weekdatelist)-1):
try:
if data.ix[str(weekdatelist[i-1])]['Close']-data.ix[str(weekdatelist[i])]['Close'] > 0:
count += 1
elif data.ix[str(weekdatelist[i-1])]['Close']-data.ix[str(weekdatelist[i])]['Close'] < 0:
count = 0
except KeyError:
continue
if count >= 2:
recommendlist.append(comp)
except OSError:
continue
except IndexError:
try:
url = urllib.request.urlopen("http://financials.morningstar.com/competitors/industry-peer-quote.action?type=com&t=XNYS:%s®ion=usa&culture=en-US&cur=" % ticker)
html = url.read()
html = str(html, "utf-8").split('<tbody>')
data = html[2].split('</tbody>')
data = data[0].split('</a>')
for line in data:
stuff = re.sub('[\<]{1}[aA]{1}[0-9a-zA-Z\D]*[\"]{1}[\>]{1}', "", line)
stringdata += stuff
stringdata = stringdata.split('<td align="left" scope="row" >')
for i in range(2, len(stringdata)+1, 2):
inddata = stringdata[i].split('</td>')
industrypeerlist.append(inddata[0])
startdate = datetime.date.today()- datetime.timedelta(1)
for i in range(7):
nextdate = startdate -datetime.timedelta(i)
if nextdate.isoweekday() < 6:
weekdatelist.append(nextdate)
for peers in industrypeerlist:
comp = peers.strip()
try:
count = 0
data = DataReader(comp, 'yahoo', weekdatelist[len(weekdatelist)-1], weekdatelist[0])
for i in range(len(weekdatelist)-1):
try:
if data.ix[str(weekdatelist[i-1])]['Close']-data.ix[str(weekdatelist[i])]['Close'] > 0:
count += 1
elif data.ix[str(weekdatelist[i-1])]['Close']-data.ix[str(weekdatelist[i])]['Close'] < 0:
count = 0
except KeyError:
continue
if count >= 2:
recommendlist.append(comp)
except OSError:
continue
except IndexError:
recommendlist = None
return recommendlist
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.