blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
5
283
content_id
stringlengths
40
40
detected_licenses
sequencelengths
0
41
license_type
stringclasses
2 values
repo_name
stringlengths
7
96
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
58 values
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
12.7k
662M
star_events_count
int64
0
35.5k
fork_events_count
int64
0
20.6k
gha_license_id
stringclasses
11 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
43 values
src_encoding
stringclasses
9 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
5.88M
extension
stringclasses
30 values
content
stringlengths
7
5.88M
authors
sequencelengths
1
1
author
stringlengths
0
73
5ec4adef5b21520747e106af5d1de30527e70bf7
a84ebe7120fa09fa70b5f8b99619d9e50bfad4e3
/tp4/ex2/boutabout.py
de3a32092ff8034b7da7dd889a9fdb10df813366
[]
no_license
Kevin-Grgd/TP_Outils_Info
e8317fd132914ed736d3ee881fd4f28ca858136b
9591dcaedb6a9c72efe35c06aee9558ee6023361
refs/heads/main
2023-01-19T01:54:23.078363
2020-11-27T10:59:27
2020-11-27T10:59:27
316,474,447
0
0
null
null
null
null
UTF-8
Python
false
false
281
py
def bout_a_bout(ch1, ch2): print("L1 :", len(ch1)) print("L2 :", len(ch2)) l = len(ch1) + len(ch2) print("Total :", l) return ch1 + ch2 print("Jamais") a = "Toto" b = bout_a_bout(a, "Titi") c = bout_a_bout(a, b) print() print(b) print(c)
31d2e9346836276bd48ea300ebbfd77901710115
623d737eae96c7f5081767fb3eec5f796a4746ca
/ToDo_App/ToDos/views.py
d2d7206f8bf6c0414f9f5990d53868524f4b0dac
[]
no_license
Daurigu/ToDo-App-Django
037e14b14e77296b73c39d67bfa0141fdcc5baf7
000e944dbbf18d0743c9c637ca76c91843d8f5f2
refs/heads/master
2022-07-15T02:28:49.364088
2020-05-19T02:25:21
2020-05-19T02:25:21
264,742,408
0
0
null
null
null
null
UTF-8
Python
false
false
1,246
py
from django.shortcuts import render, get_object_or_404, redirect from ToDo_App.form import todo_form from .models import Todo # Create your views here. def view_main_todo(request, *args, **kwargs): form = todo_form() if request.method == 'POST': print(request.POST) form = todo_form(request.POST) if form.is_valid(): Todo.objects.create(**form.cleaned_data) form = todo_form() context = { "form": form, "items": Todo.objects.all().order_by("-date"), } return render(request,"index.html", context) def view_delete_item(request, id): obj = get_object_or_404(Todo, id=id) if request.method == 'POST': obj.delete() return redirect('../../') return render(request,"index.html",{}) def view_update_item(request, id): #form = todo_form() updateForm = Todo.objects.get(id=id) if request.method == 'POST': print(request.POST) form = todo_form(request.POST) if form.is_valid(): updateForm.text = request.POST.get('text') updateForm.save() return redirect('../../') context = { "form": form, } return render(request, "edit.html", context)
d103b3f0053ec9921f4c7434b0e933b2e279f4a1
9023153e17338fde4be547340b975b70a75ff8d7
/secondquestion/preprocess.py
fff628575dc11b6f5c31154a35a9be4fa4839da4
[]
no_license
tiaotiaosong/CMB_bankrace
92975b09e6deb4d64ba9867710d32ebe5985bae5
ed892862e34409787187db75fc0cdf7a2e0ab757
refs/heads/master
2020-06-04T06:25:34.429357
2019-06-14T08:14:13
2019-06-14T08:14:13
191,903,718
1
0
null
null
null
null
UTF-8
Python
false
false
702
py
import pandas as pd import matplotlib.pyplot as plt f = open('b6bce3abb838406daea9af48bf059c633.txt',encoding='UTF-8') QRTA_NUM=dict() next(f) for eachline in f: Record=eachline.split() Record[0] = pd.to_datetime(Record[0]) if(Record[1]=='QRTA'and Record[3]=='32'): if(Record[0] not in QRTA_NUM): QRTA_NUM[Record[0]] = abs(int(Record[4].split('.')[0])) else: QRTA_NUM[Record[0]] += abs(int(Record[4].split('.')[0])) df1=pd.DataFrame.from_dict(QRTA_NUM,orient='index') df1.columns=['y'] df1=df1.reset_index() df1=df1.reset_index(drop=True) df1.columns=['ds', 'y'] print(df1) df1.to_csv ("perdayvolume.csv" , index=True,header=True,encoding = "utf-8")
30c2f27da31b131a2f06e09ba7e7ee586629ad89
33213395f9b7606da83003d9f89966af16a47ed7
/proyecto/aplicaion/views.py
8cc778e4ba513cb76491635765690c539a913e97
[]
no_license
sofiamanana/proyectoAnalisis
e75866d306424e37296c018da9cb7ee34a6450b4
3d7914dc2f6ef7813bd4672ada1cd57e01e24e26
refs/heads/main
2023-02-10T22:14:28.913022
2021-01-09T00:59:37
2021-01-09T00:59:37
311,785,039
0
0
null
null
null
null
UTF-8
Python
false
false
1,454
py
from django.shortcuts import render from .models import File, Reportador from .forms import FileForm from django.http import HttpResponse from django.contrib.auth import authenticate from django.contrib.auth.forms import AuthenticationForm, UserCreationForm from django.contrib.auth import login as do_login from django.contrib import auth from rest_framework import viewsets from django.contrib.auth.models import User from .serializers import UserSerializer # Create your views here. class UserViewSet(viewsets.ModelViewSet): queryset = User.objects.all() serializer_class = UserSerializer def showfile(request): lastfile=File.objects.last() filepath=lastfile.filepath filename=lastfile.name form=FileForm(request.POST or None, request.FILES or None) if form.is_valid(): form.save() context={'filepath': filepath, 'form': form, 'filename': filename } return render(request, 'aplicaion/subir_reporte.html', context) def login(request): form = UserCreationForm() ''' if request.method == 'POST': form = UserCreationForm(request.POST) if form.is_valid(): form.save() user = form.cleaned_data.get('username') messages.success(request, 'Account was created for ' + user) return redirect('/') ''' context = {'form': form} return render(request, 'aplicaion/index.html',context)
5e5af9b505acb2bba1a018c991379d63e548cd60
411a36e480bae8d7a5f3522be6bd8455d10ad256
/771. Jewels and Stones/s2.py
63e4d7926892027160f8fda73aa2373c40590e07
[]
no_license
rayony/LeetCode
8f48cf17e2073fe0b2b0e92a4a090a1c937cd128
c3489131917ae3ef04047d887405bea3967122da
refs/heads/master
2020-04-15T09:39:01.464909
2019-01-15T06:42:19
2019-01-15T06:42:19
164,559,733
0
0
null
null
null
null
UTF-8
Python
false
false
555
py
class Solution: def numJewelsInStones(self, J, S): """ :type J: str :type S: str :rtype: int """ #init dict using J dict = {} for i in range(len(J)): dict[J[i]]=0 #update count by comparing dict and S for i in range(len(S)): if S[i] in dict: dict[S[i]]+=1 #update count by summing up the dict count =sum(dict.values()) return count
3136f20917b2a34d79434bb479a0d2de152e14b5
f19de30c28ff2e962dca6bc74fd8f725b29104a7
/python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py
9b673bdcd1b958f75508c0457d1874cca6c4c52a
[ "Apache-2.0" ]
permissive
iclementine/Paddle
9d56d614731dc4042aaffff7c7ad9c0f30f6ae01
1cb6d68fd3f32b53eb3ebd8f8a4fa27d502f8a38
refs/heads/develop
2023-05-02T09:30:09.964347
2020-04-03T04:57:36
2020-04-03T04:57:36
158,829,753
0
0
Apache-2.0
2023-03-29T10:40:58
2018-11-23T12:26:32
C++
UTF-8
Python
false
false
6,144
py
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import gast import inspect import numpy as np import paddle.fluid as fluid import unittest from paddle.fluid.dygraph.jit import dygraph_to_static_func from paddle.fluid.dygraph.dygraph_to_static.loop_transformer import NameVisitor SEED = 2020 np.random.seed(SEED) def while_loop_dyfunc(x): i = fluid.dygraph.to_variable(x) while x < 10: i = i + x x = x + 1 return i def while_loop_dyfun_with_conflict_var(x): i = fluid.dygraph.to_variable(x) def relu(y): # 'y' is not visible outside the scope. return fluid.layers.relu(y) while x < 10: # If a tmp variable is created which has same name # with a argument in function, it should not be # included in the loop_vars. add_fn = lambda x, y: x + y i = add_fn(i, x) x = x + 1 return i def while_loop_dyfunc_with_none(x): i = fluid.dygraph.to_variable(x)\ if x is not None \ else fluid.dygraph.to_variable(x+1) flag = 1 while x < 10: i = i + x if flag is not None else x + i x = x + 1 return i def for_loop_dyfunc(max_len): for i in range(max_len): ret = fluid.layers.zeros(shape=[1], dtype='float32') fluid.layers.increment(ret, value=2.0, in_place=True) return ret def while_loop_bool_op(x): i = fluid.dygraph.to_variable(x) while (x >= 0 and x < 10) or x <= -1 or x < -3 or (x < -7 or x < -5): i = i + x x = x + 1 return i def var_create_in_for_loop(max_len): for i in range(max_len): ret = fluid.layers.zeros(shape=[3, 4, 5], dtype='float64') return ret class TestNameVisitor(unittest.TestCase): def setUp(self): self.loop_funcs = [ while_loop_dyfunc, for_loop_dyfunc, while_loop_dyfunc_with_none ] self.loop_var_names = [ set(["i", "x"]), set(["i", "ret", "max_len"]), set(["i", "x"]) ] self.create_var_names = [set(), set(["ret"]), set()] def test_loop_vars(self): for i in range(len(self.loop_funcs)): func = self.loop_funcs[i] test_func = inspect.getsource(func) gast_root = gast.parse(test_func) name_visitor = NameVisitor(gast_root) for node in gast.walk(gast_root): if isinstance(node, (gast.While, gast.For)): loop_var_names, create_var_names = name_visitor.get_loop_var_names( node) self.assertEqual(loop_var_names, self.loop_var_names[i]) self.assertEqual(create_var_names, self.create_var_names[i]) class TestTransformWhileLoop(unittest.TestCase): def setUp(self): self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda( ) else fluid.CPUPlace() self.x = np.zeros(shape=(1), dtype=np.int32) self._init_dyfunc() def _init_dyfunc(self): self.dyfunc = while_loop_dyfunc def _run_static(self): main_program = fluid.Program() with fluid.program_guard(main_program): x_var = fluid.layers.assign(self.x) static_func = dygraph_to_static_func(self.dyfunc) out = static_func(x_var) exe = fluid.Executor(self.place) ret = exe.run(main_program, fetch_list=out) return ret def _run_dygraph(self): with fluid.dygraph.guard(self.place): ret = self.dyfunc(fluid.dygraph.to_variable(self.x)) return ret.numpy() def test_ast_to_func(self): static_numpy = self._run_static() self.assertTrue( np.allclose( np.full( shape=(1), fill_value=45, dtype=np.int32), static_numpy)) # Enable next lines after Paddle dygraph supports while x < 10 # # self._run_dygraph() # self.assertTrue(np.allclose(self._run_dygraph(), self._run_static())) class TestTransformWhileLoopWithConflicVar(TestTransformWhileLoop): def _init_dyfunc(self): self.dyfunc = while_loop_dyfun_with_conflict_var class TestTransformWhileLoopWithNone(TestTransformWhileLoop): def _init_dyfunc(self): self.dyfunc = while_loop_dyfunc_with_none class TestWhileLoopBoolOp(TestTransformWhileLoop): def _init_dyfunc(self): self.dyfunc = while_loop_bool_op class TestTransformForLoop(unittest.TestCase): def setUp(self): self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda( ) else fluid.CPUPlace() self.len = 100 self._init_dyfunc() def _init_dyfunc(self): self.dyfunc = for_loop_dyfunc def _run_static(self): main_program = fluid.Program() with fluid.program_guard(main_program): static_func = dygraph_to_static_func(self.dyfunc) out = static_func(self.len) exe = fluid.Executor(self.place) ret = exe.run(main_program, fetch_list=out) return ret def _run_dygraph(self): with fluid.dygraph.guard(self.place): ret = self.dyfunc(self.len) return ret.numpy() def test_ast_to_func(self): static_numpy = self._run_static() self._run_dygraph() self.assertTrue(np.allclose(self._run_dygraph(), self._run_static())) class TestVarCreateInForLoop(TestTransformForLoop): def _init_dyfunc(self): self.dyfunc = var_create_in_for_loop if __name__ == '__main__': unittest.main()
4c7b6db17e6ab694ca109668ca14e6cdd9f31deb
e43488a799f03823d2eb9c0c6e9ee2727ca8ee5f
/agetoclassIndia.py
7b2653e39dd3e796f72bb54a2b84d177c8371981
[]
no_license
lekuid/Practice
efcaa8125782c582f162b3a01c455c775b8cc74b
dd28f2aeeb9bf7dc17a467088fa705c618920bdc
refs/heads/main
2023-02-25T11:36:05.041165
2021-01-19T12:25:01
2021-01-19T12:25:01
319,907,882
0
0
null
null
null
null
UTF-8
Python
false
false
992
py
#consideirng pre school and mursery, this specific code #is to help me write a blog because I have terrible memory. import tkinter as tk def age(n): if n in range(0,4): returned['text'] = 'preschool' elif n in range(5, 17): returned['text'] = 'class {}'.format(n-4) elif n in range(17, 21): returned['text'] = 'college {}'.format(n-16) + 'year' win= tk.Tk() win.title('Which Education?') base = tk.Canvas(win, height=100, width=400, bg='#181818') base.pack() main_frame = tk.Frame(base, bg='#181818') main_frame.place(relwidth=0.5, relheight=1) n = tk.Entry(main_frame, bg='#222222', fg='#999999', font=40) n.place(relx=0.2, rely=0.3, relwidth=0.6, relheight=0.25) submit = tk.Button(main_frame, text='Enlighten Me', bg='#555555', fg='white', command = lambda: age(int(n.get()))) submit.place(relx=0.4, rely=0.6, relheight=0.2, relwidth=0.4) returned = tk.Label(base, bg='#555555') returned.place(relx=0.5, relheight=1, relwidth=0.5) win.mainloop()
82462b3b84eb3dd1a80c3d879d724b7466866733
2e559f86d68b67b1602de4b1f1e358fbc7e688a3
/django_learn01/django_learn01/urls.py
2a0aebc699d9b77eac27a9ea94df02a99e291478
[]
no_license
xinyifuyun/django_learn
96e0f80c3d85b85846bbde6cedc326f164b32349
e56528de652ca5f40b8993ea19c3ff6dc52d6ef6
refs/heads/master
2021-09-10T08:01:24.943357
2018-03-22T14:25:11
2018-03-22T14:25:11
126,127,264
0
0
null
null
null
null
UTF-8
Python
false
false
771
py
"""django_learn01 URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.11/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url from django.contrib import admin urlpatterns = [ url(r'^admin/', admin.site.urls), ]
25f5973f8ab2a851097482e039611a07073c7937
576f7b951191d6095df8bc2691c8ad7045d55447
/Basics/ws1.py
2c093682f5dd9b60db3a8e6edd46257ee45a4641
[]
no_license
golam-saroar/Python_Learning
f555368420ef65ceef9a80349b9c3bae2c6842b9
c077a8c2e5738b47cb301f07806af5a4c6714dff
refs/heads/master
2021-09-22T09:31:35.907800
2018-09-07T09:14:25
2018-09-07T09:14:25
null
0
0
null
null
null
null
UTF-8
Python
false
false
565
py
import time from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.keys import Keys browser = webdriver.Chrome() browser.get('http://www.google.com') search = browser.find_element_by_name('q') search.send_keys("google search through python") search.send_keys(Keys.RETURN) # hit return after you enter search text time.sleep(60) # sleep for 5 seconds so you can see the results browser.quit()
595a03475ccae6898dc86f756ac94ef70b4626a8
82b946da326148a3c1c1f687f96c0da165bb2c15
/sdk/python/pulumi_azure_native/resources/get_tag_at_scope.py
b09a7ed0b50a69f536be91e3e72e2d84977b5b39
[ "BSD-3-Clause", "Apache-2.0" ]
permissive
morrell/pulumi-azure-native
3916e978382366607f3df0a669f24cb16293ff5e
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
refs/heads/master
2023-06-20T19:37:05.414924
2021-07-19T20:57:53
2021-07-19T20:57:53
387,815,163
0
0
Apache-2.0
2021-07-20T14:18:29
2021-07-20T14:18:28
null
UTF-8
Python
false
false
3,078
py
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs __all__ = [ 'GetTagAtScopeResult', 'AwaitableGetTagAtScopeResult', 'get_tag_at_scope', ] @pulumi.output_type class GetTagAtScopeResult: """ Wrapper resource for tags API requests and responses. """ def __init__(__self__, id=None, name=None, properties=None, type=None): if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if properties and not isinstance(properties, dict): raise TypeError("Expected argument 'properties' to be a dict") pulumi.set(__self__, "properties", properties) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) @property @pulumi.getter def id(self) -> str: """ The ID of the tags wrapper resource. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> str: """ The name of the tags wrapper resource. """ return pulumi.get(self, "name") @property @pulumi.getter def properties(self) -> 'outputs.TagsResponse': """ The set of tags. """ return pulumi.get(self, "properties") @property @pulumi.getter def type(self) -> str: """ The type of the tags wrapper resource. """ return pulumi.get(self, "type") class AwaitableGetTagAtScopeResult(GetTagAtScopeResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetTagAtScopeResult( id=self.id, name=self.name, properties=self.properties, type=self.type) def get_tag_at_scope(scope: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTagAtScopeResult: """ Wrapper resource for tags API requests and responses. API Version: 2019-10-01. :param str scope: The resource scope. """ __args__ = dict() __args__['scope'] = scope if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:resources:getTagAtScope', __args__, opts=opts, typ=GetTagAtScopeResult).value return AwaitableGetTagAtScopeResult( id=__ret__.id, name=__ret__.name, properties=__ret__.properties, type=__ret__.type)
bad61a3f30fe9c35194f7b6e7b9709ce2ead722e
e3ff9e938e07be5b8d853d85dc8eccca09de380e
/hw_02-equiprobability/B2-observable-x-position-markov.py
3683d15a4e0e24fbcbc0f1044bbda277dd569bcb
[]
no_license
pallavsen007/statistical_mechanics
ce0039928fbd050a1e8db0a4c22c0f9360a2c6f6
bd0377b85628c064ba5dc9cf52ac200864d745e4
refs/heads/master
2021-12-12T00:58:53.472651
2016-11-30T17:04:27
2016-11-30T17:04:27
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,163
py
#Problem B1: equiprobability for observable=x-position import random, pylab # update disk configuration L using markov chain algorithm # L = current disk configuration # delta = step for markov chain algorithm # sigma = disk radius def markov_disks_box_update(L,delta,sigma): a = random.choice(L) b = [a[0] + random.uniform(-delta, delta), a[1] + random.uniform(-delta, delta)] min_dist = min((b[0] - c[0]) ** 2 + (b[1] - c[1]) ** 2 for c in L if c != a) box_cond = min(b[0], b[1]) < sigma or max(b[0], b[1]) > 1.0 - sigma if not (box_cond or min_dist < 4.0 * sigma ** 2): a[:] = b return L N = 4 sigma = 0.1197 delta = 0.1 n_runs = 2000000 histo_data = [] L = [[0.25, 0.25], [0.75, 0.25], [0.25, 0.75], [0.75, 0.75]] for run in range(n_runs): L = markov_disks_box_update(L, delta, sigma) for k in range(N): histo_data.append(L[k][0]) pylab.hist(histo_data, bins=100, normed=True) pylab.xlabel('x') pylab.ylabel('frequency') pylab.title('Markov Chain algorithm: x coordinate histogram (density eta=0.18)') pylab.grid() pylab.savefig('markov_disks_histo.png') pylab.show()
ab61f660d0a067e4735543d8f22852731a7d8493
604ae72a87d4cd5774fb717f60e7ebc6b3466aad
/DecisionTrees/prepareData.py
fd4c1a8bde71ee58f36ca0d2472c4b2f92f54209
[]
no_license
remalcodex/MachineLearning
ed2e495057b9fdd732eee591c86048cb06667232
f3f1ea8660bfc3046877c425c3916b266c277778
refs/heads/master
2021-09-11T20:38:00.830789
2018-04-12T02:56:12
2018-04-12T02:56:12
105,211,815
0
0
null
null
null
null
UTF-8
Python
false
false
4,859
py
import numpy as np import os.path #Returns the data for features from 1-6 only. def getData(inFileName): my_path = os.path.abspath(os.path.dirname(__file__)) path = os.path.join(my_path, "./Updated_Dataset/" + inFileName) trainingFile = open(path, "r", encoding="utf8") Y = np.empty((0,1), int) X = np.empty((0,6),int) for line in trainingFile: newLine = line.strip() if len(newLine) == 0: continue if newLine[0] == '+': Y = np.append(Y, np.array([1])) elif newLine[0] == '-': Y = np.append(Y, np.array([0])) else: print("Problem with the data") continue name = newLine[2:] nameList = name.split(" ") if len(nameList) < 2: print("Data problematic with name") continue # Feature 1. x1 = 0 if len(nameList[0]) >= len(nameList[-1]): x1 = 1 #Feature 2. x2 = 0 if len(nameList) >= 3: x2 = 1 #Feature 3 x3 = 0 firstName = nameList[0] if firstName[0].lower() == firstName[-1].lower(): x3 = 1 #Feature 4 x4 = 0 if nameList[0] < nameList[-1]: x4 = 1 #Feature 5 x5 = 0; firstName = nameList[0] if len(firstName) >= 2: if (firstName[1].lower() == "a" or firstName[1].lower() == "e" or firstName[1].lower() == "i" or firstName[1].lower() == "o" or firstName[1].lower() == "u"): x5 = 1; #Feature 6 x6 = 0 lastName = nameList[-1] if len(lastName)%2 == 0: x6 = 1; X = np.vstack((X, np.array([x1,x2,x3,x4,x5,x6]))) #print(X) return X,Y #Returns data with 20 faetures. def getMoreData(inFileName): my_path = os.path.abspath(os.path.dirname(__file__)) path = os.path.join(my_path, "./Updated_Dataset/" + inFileName) trainingFile = open(path, "r", encoding="utf8") Y = np.empty((0,1), int) X = np.empty((0,20),int) for line in trainingFile: newLine = line.strip() if len(newLine) == 0: continue if newLine[0] == '+': Y = np.append(Y, np.array([1])) elif newLine[0] == '-': Y = np.append(Y, np.array([0])) else: print("Problem with the data") continue name = newLine[2:] nameList = name.split(" ") if len(nameList) < 2: print("Data problematic with name") continue firstName = nameList[0] lastName = nameList[-1] # Feature 1. x1 = 0 if len(firstName) >= len(lastName): x1 = 1 #Feature 2. x2 = 0 if len(nameList) >= 3: x2 = 1 #Feature 3 x3 = 0 if firstName[0].lower() == firstName[-1].lower(): x3 = 1 #Feature 4 x4 = 0 if nameList[0] < nameList[-1]: x4 = 1 #Feature 5 x5 = 0; if len(firstName) >= 2: if (firstName[1].lower() == "a" or firstName[1].lower() == "e" or firstName[1].lower() == "i" or firstName[1].lower() == "o" or firstName[1].lower() == "u"): x5 = 1; #Feature 6 x6 = 0 if len(lastName)%2 == 0: x6 = 1; #Feature 7 x7 = 0; if len(firstName)%2 == 0: x7 = 1 #Feature 8 x8 = 0 if len(name) > 10: x8 = 1 #Feature 9 x9 = 0 if firstName[0].lower() == "r": x9 = 1 #Feature 10 x10 = 0 if firstName[0].lower() == "a": x10 = 1 #Feature 11 x11 = 1 if len(nameList) > 3: x11 = 0 #Feature 12 x12 = 0 if firstName[0].lower() == "m": x12 = 1 #Feature 13 x13 = 1 if len(nameList) >= 3: if "." in nameList[1]: x13 = 0 #Feature 14 x14 = 1 if lastName[0].lower() == "r": x14 = 0 #Feature 15 x15 = 0 if firstName[0].lower() == "d": x15 = 1 #Feature 16 x16 = 0 if lastName[0].lower() == "t": x16 = 1 #Feature 17 x17 = 0 if firstName[0].lower() == "p": x17 = 1 #Feature 18 x18 = 0 if firstName[0].lower() == "j": x18 = 1 #Feature 19 x19 = 1 if len(nameList) == 3: x19 = 0 #Feature 20 x20 = 0 if lastName[0].lower() == "W": x20 = 1 X = np.vstack((X, np.array([x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20]))) #print(X) return X,Y
0b7d0fdaa4a71396493f6b461f3dbe60336569d7
03e424616ef4783c28f1ea57a7886aa76aa55edd
/NanoGardener/python/data/VBSjjlnu_vars.py
5321b2ece5c960492c3afcca12b0d7c32e7d932c
[]
no_license
dbrambilla13/LatinoAnalysis
8dd7751a6793012f12bbe3db4bfd0ba93bdb2716
f3a17a095450d200af7d278137f2f089e8f0d8b8
refs/heads/master
2021-03-19T03:11:43.998028
2020-03-12T14:47:55
2020-03-12T14:47:55
247,127,553
0
0
null
2020-03-13T17:44:49
2020-03-13T17:33:04
null
UTF-8
Python
false
false
12,050
py
from itertools import chain from math import cosh, sqrt, cos from ROOT import TLorentzVector VBSjjlnu_branches = { "F": [ "vbs_0_pt", "vbs_0_eta", "vbs_0_phi", "vbs_0_E", "vbs_1_pt", "vbs_1_eta", "vbs_1_phi", "vbs_1_E", "vjet_0_pt", "vjet_0_eta", "vjet_0_phi", "vjet_0_E", "vjet_1_pt", "vjet_1_eta", "vjet_1_phi", "vjet_1_E", "mjj_vbs", "mjj_vjet", "deltaeta_vbs", "deltaphi_vbs", "deltaeta_vjet", "deltaphi_vjet", "deltaphi_lep_vbs_0", "deltaphi_lep_vbs_1", "deltaeta_lep_vbs_0", "deltaeta_lep_vbs_1", "deltaphi_lep_vjet_0", "deltaphi_lep_vjet_1", "deltaeta_lep_vjet_0", "deltaeta_lep_vjet_1", "deltaR_lep_vbs", "deltaR_lep_vjet", "deltaphi_lep_nu", "deltaeta_lep_nu", "deltaR_lep_nu", "deltaR_vbs", "deltaR_vjet", "Rvjets_0", "Rvjets_1", "Zvjets_0", "Zvjets_1", "Zlep", "Asym_vbs", "Asym_vjet", "Mw_lep", "Mtw_lep", "w_lep_pt", "Mww", "R_ww", "R_mw", "A_ww", "Centr_vbs", "Centr_ww", "Lep_proj", "Lep_projw", "recoMET", "recoMET_pz" , ], "I": ["N_jets", "N_jets_forward", "N_jets_central"] } VBSjjlnu_vector_branches = [ { "type": "I", "len": "N_jets", "name": "other_jets_index" } ] def getDefault(): output = {} for br in VBSjjlnu_branches["F"]: output[br] = -999. for br in VBSjjlnu_branches["I"]: output[br] = -999 for vec_br in VBSjjlnu_vector_branches: output[vec_br["name"]] = [] return output def getVBSkin_resolved(vbsjets, vjets, lepton, met, reco_neutrino, other_jets, other_jets_ind, debug=False): output = getDefault() # variables extraction total_vbs = TLorentzVector(0,0,0,0) vbs_etas = [] vbs_phis = [] vbs_pts = [] vbs_Es = [] for i, j in enumerate(vbsjets): total_vbs+= j vbs_etas.append(j.Eta()) vbs_phis.append(j.Phi()) vbs_pts.append(j.Pt()) vbs_Es.append(j.E()) if debug: print "VBS pts", vbs_pts print "VBS etas", vbs_etas deltaeta_vbs = abs(vbs_etas[0]- vbs_etas[1]) mean_eta_vbs = sum(vbs_etas) / 2 output["vbs_0_pt"] = vbs_pts[0] output["vbs_1_pt"] = vbs_pts[1] output["vbs_0_eta"] = abs(vbs_etas[0]) output["vbs_1_eta"] = abs(vbs_etas[1]) output["vbs_0_phi"] = abs(vbs_phis[0]) output["vbs_1_phi"] = abs(vbs_phis[1]) output["vbs_0_E"] = abs(vbs_Es[0]) output["vbs_1_E"] = abs(vbs_Es[1]) output["mjj_vbs"] = total_vbs.M() output["deltaeta_vbs"] = deltaeta_vbs output["deltaphi_vbs"] = abs(vbsjets[0].DeltaPhi(vbsjets[1])) output["deltaR_vbs"] = vbsjets[0].DrEtaPhi(vbsjets[1]) total_vjet = TLorentzVector(0,0,0,0) vjet_etas = [] vjet_phis = [] vjet_pts = [] vjet_Es = [] for i, j in enumerate(vjets): total_vjet += j vjet_etas.append(j.Eta()) vjet_phis.append(j.Phi()) vjet_pts.append(j.Pt()) vjet_Es.append(j.E()) if debug: print "Vjet pts", vjet_pts print "Vjet etas", vjet_etas output["vjet_0_pt"] = vjet_pts[0] output["vjet_1_pt"] = vjet_pts[1] output["vjet_0_eta"] = abs(vjet_etas[0]) output["vjet_1_eta"] = abs(vjet_etas[1]) output["vjet_0_phi"] = abs(vjet_phis[0]) output["vjet_1_phi"] = abs(vjet_phis[1]) output["vjet_0_E"] = abs(vjet_Es[0]) output["vjet_1_E"] = abs(vjet_Es[1]) output["mjj_vjet"] = total_vjet.M() output["deltaphi_vjet"] = abs(vjets[0].DeltaPhi(vjets[1])) output["deltaeta_vjet"] = abs(vjet_etas[0] - vjet_etas[1]) output["deltaR_vjet"] = vjets[0].DrEtaPhi(vjets[1]) output["recoMET"] = reco_neutrino.Pt() output["recoMET_pz"] = reco_neutrino.Pz() output["deltaphi_lep_nu"] = abs(lepton.DeltaPhi(reco_neutrino)) output["deltaeta_lep_nu"] = abs(lepton.Eta() - reco_neutrino.Eta()) output["deltaR_lep_nu"] = lepton.DrEtaPhi(reco_neutrino) # Delta Phi with lepton output["deltaphi_lep_vbs_0"] = abs(lepton.DeltaPhi(vbsjets[0])) output["deltaphi_lep_vbs_1"] = abs(lepton.DeltaPhi(vbsjets[1])) output["deltaphi_lep_vjet_0"] = abs(lepton.DeltaPhi(vjets[0])) output["deltaphi_lep_vjet_1"] = abs(lepton.DeltaPhi(vjets[1])) # Delta Eta with lepton output["deltaeta_lep_vbs_0"] = abs(lepton.Eta() - vbs_etas[0]) output["deltaeta_lep_vbs_1"] = abs(lepton.Eta() - vbs_etas[1]) output["deltaeta_lep_vjet_0"] = abs(lepton.Eta() - vjet_etas[0]) output["deltaeta_lep_vjet_1"] = abs(lepton.Eta() - vjet_etas[1]) # Look for nearest vbs jet from lepton output["deltaR_lep_vbs"] = min( [ lepton.DrEtaPhi(vbsjets[0]), lepton.DrEtaPhi(vbsjets[1])]) output["deltaR_lep_vjet"] = min( [ lepton.DrEtaPhi(vjets[0]), lepton.DrEtaPhi(vjets[1])]) # Zeppenfeld variables if deltaeta_vbs != 0: output["Zvjets_0"] = (vjet_etas[0] - mean_eta_vbs)/ deltaeta_vbs output["Zvjets_1"] = (vjet_etas[1] - mean_eta_vbs)/ deltaeta_vbs output["Zlep"] = (lepton.Eta() - mean_eta_vbs)/ deltaeta_vbs #R variables ptvbs01 = vbsjets[0].Pt() * vbsjets[1].Pt() output["Rvjets_0"] = (lepton.Pt() * vjets[0].Pt()) / ptvbs01 output["Rvjets_1"] = (lepton.Pt() * vjets[1].Pt()) / ptvbs01 #Asymmetry output["Asym_vbs"] = (vbs_pts[0] - vbs_pts[1]) / sum(vbs_pts) output["Asym_vjet"] = (vjet_pts[0] - vjet_pts[1]) / sum(vjet_pts) #WW variables w_lep = lepton + reco_neutrino w_had = vjets[0] + vjets[1] w_lep_t = w_lep.Vect() w_lep_t.SetZ(0) w_had_t = w_had.Vect() w_had_t.SetZ(0) ww_vec = w_lep + w_had output["w_lep_pt"] = w_lep.Pt() output["Mw_lep"] = w_lep.M() #output["Mtw_lep"] = w_lep_t.M() output["Mtw_lep"] = sqrt(2 * lepton.Pt() * met.Pt() * (1 - cos( lepton.DeltaPhi(met)))); output["Mww"] = ww_vec.M() output["R_ww"] = (w_lep.Pt() * w_lep.Pt()) / ptvbs01 output["R_mw"] = ww_vec.M() / ptvbs01 output["A_ww"] = (w_lep_t + w_had_t).Pt() / (w_lep.Pt() + w_had.Pt()) #Centrality eta_ww = (w_lep.Eta() + w_had.Eta())/2 if deltaeta_vbs != 0.: output["Centr_vbs"] = abs(vbs_etas[0] - eta_ww - vbs_etas[1]) / deltaeta_vbs deltaeta_plus = max(vbs_etas) - max([w_lep.Eta(), w_had.Eta()]) deltaeta_minus = min([w_lep.Eta(), w_had.Eta()]) - min(vbs_etas) output["Centr_ww"] = min([deltaeta_plus, deltaeta_minus]) #Lepton projection lep_vec_t = lepton.Vect() lep_vec_t.SetZ(0) output["Lep_proj"] = (w_lep_t * lep_vec_t) / w_lep.Pt() output["Lep_projw"] = (w_lep_t * lep_vec_t) / (lepton.Pt() * w_lep.Pt()) # Ht and number of jets with Pt> 20 # using uncut jets Njets = len(other_jets) N_jets_forward = 0 N_jets_central = 0 for oj in other_jets: j_eta, j_pt = oj.Eta(), oj.Pt() # Looking only to jets != vbs & vjets if deltaeta_vbs != 0.: Z = abs((j_eta - mean_eta_vbs)/ deltaeta_vbs) if Z > 0.5: N_jets_forward += 1 else: N_jets_central += 1 output["N_jets"] = Njets output["N_jets_central"] = N_jets_central output["N_jets_forward"] = N_jets_forward output["other_jets_index"] = other_jets_ind return output def getVBSkin_boosted(vbsjets, fatjet, lepton, met, reco_neutrino, other_jets, other_jets_ind, debug=False): output = getDefault() # variables extraction total_vbs = TLorentzVector(0,0,0,0) vbs_etas = [] vbs_phis = [] vbs_pts = [] vbs_Es = [] for i, j in enumerate(vbsjets): total_vbs+= j vbs_etas.append(j.Eta()) vbs_phis.append(j.Phi()) vbs_pts.append(j.Pt()) vbs_Es.append(j.E()) if debug: print "VBS pts", vbs_pts print "VBS etas", vbs_etas deltaeta_vbs = abs(vbs_etas[0]- vbs_etas[1]) mean_eta_vbs = sum(vbs_etas) / 2 output["vbs_0_pt"] = vbs_pts[0] output["vbs_1_pt"] = vbs_pts[1] output["vbs_0_eta"] = abs(vbs_etas[0]) output["vbs_1_eta"] = abs(vbs_etas[1]) output["vbs_0_phi"] = abs(vbs_phis[0]) output["vbs_1_phi"] = abs(vbs_phis[1]) output["vbs_0_E"] = abs(vbs_Es[0]) output["vbs_1_E"] = abs(vbs_Es[1]) output["mjj_vbs"] = total_vbs.M() output["deltaeta_vbs"] = deltaeta_vbs output["deltaphi_vbs"] = abs(vbsjets[0].DeltaPhi(vbsjets[1])) output["deltaR_vbs"] = vbsjets[0].DrEtaPhi(vbsjets[1]) total_vjet = fatjet vjet_eta = fatjet.Eta() vjet_pt = fatjet.Pt() if debug: print "Vjet pts", vjet_pts print "Vjet etas", vjet_etas output["vjet_0_pt"] = vjet_pt output["vjet_0_eta"] = vjet_eta output["vjet_0_phi"] = fatjet.Phi() output["vjet_0_E"] = fatjet.E() output["mjj_vjet"] = total_vjet.M() output["recoMET"] = reco_neutrino.Pt() output["recoMET_pz"] = reco_neutrino.Pz() output["deltaphi_lep_nu"] = abs(lepton.DeltaPhi(reco_neutrino)) output["deltaeta_lep_nu"] = abs(lepton.Eta() - reco_neutrino.Eta()) output["deltaR_lep_nu"] = lepton.DrEtaPhi(reco_neutrino) # Delta Phi with lepton output["deltaphi_lep_vbs_0"] = abs(lepton.DeltaPhi(vbsjets[0])) output["deltaphi_lep_vbs_1"] = abs(lepton.DeltaPhi(vbsjets[1])) output["deltaphi_lep_vjet_0"] = abs(lepton.DeltaPhi(fatjet)) # Delta Eta with lepton output["deltaeta_lep_vbs_0"] = abs(lepton.Eta() - vbs_etas[0]) output["deltaeta_lep_vbs_1"] = abs(lepton.Eta() - vbs_etas[1]) output["deltaeta_lep_vjet_0"] = abs(lepton.Eta() - vjet_eta) # Look for nearest vbs jet from lepton output["deltaR_lep_vbs"] = min( [ lepton.DrEtaPhi(vbsjets[0]), lepton.DrEtaPhi(vbsjets[1])]) output["deltaR_lep_vjet"] = lepton.DrEtaPhi(fatjet) # Zeppenfeld variables if deltaeta_vbs != 0.: output["Zvjets_0"] = (vjet_eta - mean_eta_vbs)/ deltaeta_vbs output["Zlep"] = (lepton.Eta() - mean_eta_vbs)/ deltaeta_vbs #R variables ptvbs01 = vbsjets[0].Pt() * vbsjets[1].Pt() output["Rvjets_0"] = (lepton.Pt() * vjet_pt) / ptvbs01 #Asymmetry output["Asym_vbs"] = (vbs_pts[0] - vbs_pts[1]) / sum(vbs_pts) #WW variables w_lep = lepton + reco_neutrino w_had = fatjet w_lep_t = w_lep.Vect() w_lep_t.SetZ(0) w_had_t = w_had.Vect() w_had_t.SetZ(0) ww_vec = w_lep + w_had output["w_lep_pt"] = w_lep.Pt() output["Mw_lep"] = w_lep.M() #output["Mtw_lep"] = w_lep_t.M() output["Mtw_lep"] = sqrt(2 * lepton.Pt() * met.Pt() * (1 - cos( lepton.DeltaPhi(met)))); output["Mww"] = ww_vec.M() output["R_ww"] = (w_lep.Pt() * w_lep.Pt()) / ptvbs01 output["R_mw"] = ww_vec.M() / ptvbs01 output["A_ww"] = (w_lep_t + w_had_t).Pt() / (w_lep.Pt() + w_had.Pt()) #Centrality eta_ww = (w_lep.Eta() + w_had.Eta())/2 if deltaeta_vbs != 0.: output["Centr_vbs"] = abs(vbs_etas[0] - eta_ww - vbs_etas[1]) / deltaeta_vbs deltaeta_plus = max(vbs_etas) - max([w_lep.Eta(), w_had.Eta()]) deltaeta_minus = min([w_lep.Eta(), w_had.Eta()]) - min(vbs_etas) output["Centr_ww"] = min([deltaeta_plus, deltaeta_minus]) #Lepton projection lep_vec_t = lepton.Vect() lep_vec_t.SetZ(0) output["Lep_proj"] = (w_lep_t * lep_vec_t) / w_lep.Pt() output["Lep_projw"] = (w_lep_t * lep_vec_t) / (lepton.Pt() * w_lep.Pt()) # Ht and number of jets with Pt> 20 # using uncut jets Njets = len(other_jets) N_jets_forward = 0 N_jets_central = 0 for oj in other_jets: j_eta, j_pt = oj.Eta(), oj.Pt() # Looking only to jets != vbs & vjets if deltaeta_vbs != 0.: Z = abs((j_eta - mean_eta_vbs)/ deltaeta_vbs) if Z > 0.5: N_jets_forward += 1 else: N_jets_central += 1 output["N_jets"] = Njets output["N_jets_central"] = N_jets_central output["N_jets_forward"] = N_jets_forward output["other_jets_index"] = other_jets_ind return output
0437af2fc9abfade8c541e3cbbf7a3bb016088c4
7e1257fd3a05089fdf80fd67e8ec75eed955e711
/prdnn/tests/test_ddnn.py
0b926f85d28e063ea1cbc2844139b9507b9f34a7
[ "MIT" ]
permissive
terminiter/PRDNN
f1ea156f01957bda3d8205c4ca4bcd26882e0c09
b6ca37ba8fd617c7cf9620faac88484603e5d2fe
refs/heads/master
2023-06-05T09:18:08.448607
2021-06-30T04:01:56
2021-06-30T04:01:56
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,966
py
"""Tests the methods in ddnn.py.""" # pylint: disable=import-error import numpy as np import torch from pysyrenn import ReluLayer, FullyConnectedLayer, ArgMaxLayer from pysyrenn import HardTanhLayer, MaxPoolLayer, StridedWindowData try: from external.bazel_python.pytest_helper import main IN_BAZEL = True except ImportError: IN_BAZEL = False from prdnn.ddnn import DDNN def test_compute(): """Tests that it works for a simple example.""" activation_layers = [ FullyConnectedLayer(np.eye(2), np.ones(shape=(2,))), ReluLayer(), FullyConnectedLayer(2.0 * np.eye(2), np.zeros(shape=(2,))), ReluLayer(), ] value_layers = activation_layers[:2] + [ FullyConnectedLayer(3.0 * np.eye(2), np.zeros(shape=(2,))), ReluLayer(), ] network = DDNN(activation_layers, value_layers) assert network.differ_index == 2 output = network.compute([[-2.0, 1.0]]) assert np.allclose(output, [[0.0, 6.0]]) output = network.compute(torch.tensor([[-2.0, 1.0]])).numpy() assert np.allclose(output, [[0.0, 6.0]]) activation_layers = [ FullyConnectedLayer(np.eye(2), np.ones(shape=(2,))), HardTanhLayer(), ] value_layers = [ FullyConnectedLayer(2.0 * np.eye(2), np.zeros(shape=(2,))), HardTanhLayer(), ] network = DDNN(activation_layers, value_layers) output = network.compute([[0.5, -0.9]]) assert np.allclose(output, [[1.0, -1.8]]) # Test HardTanh activation_layers = [ FullyConnectedLayer(np.eye(2), np.ones(shape=(2,))), HardTanhLayer(), ] value_layers = [ FullyConnectedLayer(2.0 * np.eye(2), np.zeros(shape=(2,))), HardTanhLayer(), ] network = DDNN(activation_layers, value_layers) output = network.compute([[0.5, -0.9]]) assert np.allclose(output, [[1.0, -1.8]]) # Test MaxPool width, height, channels = 2, 2, 2 window_data = StridedWindowData((height, width, channels), (2, 2), (2, 2), (0, 0), channels) maxpool_layer = MaxPoolLayer(window_data) activation_layers = [ FullyConnectedLayer(np.eye(8), np.ones(shape=(8,))), maxpool_layer, ] value_layers = [ FullyConnectedLayer(-1. * np.eye(8), np.zeros(shape=(8,))), maxpool_layer, ] network = DDNN(activation_layers, value_layers) output = network.compute([[1.0, 2.0, -1.0, -2.5, 0.0, 0.5, 1.5, -3.5]]) # NHWC, so the two channels are: [1, -1, 0, 1.5] and [2, -2.5, 0.5, -3.5] # So the maxes are 1.5 and 2.0, so the value layer outputs -1.5, -2.0 assert np.allclose(output, [[-1.5, -2.0]]) def test_compute_representatives(): """Tests that the linear-region endpoints work.""" activation_layers = [ FullyConnectedLayer(np.eye(1), np.zeros(shape=(1,))), ReluLayer(), ] value_layers = [ FullyConnectedLayer(np.eye(1), np.ones(shape=(1,))), ReluLayer(), ] network = DDNN(activation_layers, value_layers) assert network.differ_index == 0 points = np.array([[0.0], [0.0]]) representatives = np.array([[1.0], [-1.0]]) output = network.compute(points, representatives=representatives) assert np.array_equal(output, [[1.], [0.]]) def test_nodiffer(): """Tests the it works if activation and value layers are identical.""" activation_layers = [ FullyConnectedLayer(np.eye(2), np.ones(shape=(2,))), ReluLayer(), FullyConnectedLayer(2.0 * np.eye(2), np.zeros(shape=(2,))), ReluLayer(), ] value_layers = activation_layers network = DDNN(activation_layers, value_layers) assert network.differ_index == 4 output = network.compute([[-2.0, 1.0]]) assert np.allclose(output, [[0.0, 4.0]]) def test_bad_layer(): """Tests that unspported layers after differ_index fail.""" # It should work if it's before the differ_index. activation_layers = [ FullyConnectedLayer(np.eye(2), np.ones(shape=(2,))), ReluLayer(), FullyConnectedLayer(2.0 * np.eye(2), np.zeros(shape=(2,))), ArgMaxLayer(), ] value_layers = activation_layers network = DDNN(activation_layers, value_layers) assert network.differ_index == 4 output = network.compute([[-2.0, 1.0]]) assert np.allclose(output, [[1.0]]) # But not after the differ_index. activation_layers = [ FullyConnectedLayer(np.eye(2), np.ones(shape=(2,))), ReluLayer(), FullyConnectedLayer(2.0 * np.eye(2), np.zeros(shape=(2,))), ArgMaxLayer(), ] value_layers = activation_layers[:2] + [ FullyConnectedLayer(3.0 * np.eye(2), np.zeros(shape=(2,))), ReluLayer(), ] network = DDNN(activation_layers, value_layers) assert network.differ_index == 2 try: output = network.compute([[-2.0, 1.0]]) assert False except NotImplementedError: pass def test_serialization(): """Tests that it correctly (de)serializes.""" activation_layers = [ FullyConnectedLayer(np.eye(2), np.ones(shape=(2,))), ReluLayer(), FullyConnectedLayer(2.0 * np.eye(2), np.zeros(shape=(2,))), ReluLayer(), ] value_layers = activation_layers[:2] + [ FullyConnectedLayer(3.0 * np.eye(2), np.zeros(shape=(2,))), ReluLayer(), ] network = DDNN(activation_layers, value_layers) serialized = network.serialize() assert all(serialized == layer.serialize() for serialized, layer in zip(serialized.activation_layers, activation_layers)) assert all(serialized == layer.serialize() for serialized, layer in zip(serialized.value_layers, value_layers[2:])) assert serialized.differ_index == 2 assert DDNN.deserialize(serialized).serialize() == serialized if IN_BAZEL: main(__name__, __file__)
0408eac5351346b0ff7ff4b4a372638c9d42f71c
142696a656d98f2028f6fadd7af2e88ac9627f6e
/setup.py
08abbc801c6e16e22857dcfa2a8e0bc67156b9b0
[ "MIT" ]
permissive
akeshavan/mindlogger-build-applet
2806b32032361dcfa8d0afc41ec41e286ecc6f13
02cc39a0f1dad57dae096ef9897c8e2daba90aee
refs/heads/master
2020-06-10T04:58:19.316646
2019-07-01T23:05:21
2019-07-01T23:05:21
193,588,997
1
5
MIT
2019-08-08T21:06:04
2019-06-24T22:22:59
Python
UTF-8
Python
false
false
1,260
py
import io import os import re from setuptools import find_packages from setuptools import setup def read(filename): filename = os.path.join(os.path.dirname(__file__), filename) text_type = type(u"") with io.open(filename, mode="r", encoding='utf-8') as fd: return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read()) setup( name="mindlogger_build_applet", version="0.1.0", url="https://github.com/akeshavan/mindlogger-build-applet", license='MIT', author="akeshavan", author_email="[email protected]", description="build your mindlogger survey in python", long_description=read("README.rst"), packages=find_packages(exclude=('tests',)), install_requires=[], classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', ], )
c61dd8153595d6524f1c0e3b80656d527633d6db
7e11caaad1281f2310da9ad4f2cd9f4993ffb011
/shop/wsgi.py
a0c369bbcf815d8088a3478fa65651207e94945f
[]
no_license
pabloparejo/djangoShop
12c4384868ce17f5f8d4c88e9b9ab01d384b3ea0
958cf6203948a5098f0823eec487fae6dbda2765
refs/heads/master
2016-09-08T00:23:48.798809
2014-09-15T18:21:23
2014-09-15T18:21:23
18,875,015
1
2
null
null
null
null
UTF-8
Python
false
false
417
py
""" WSGI config for shop project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/ """ import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "shop.settings") from django.core.wsgi import get_wsgi_application from dj_static import Cling application = Cling(get_wsgi_application())
19cede5124e49deb4814d49a47c9a1de937157cd
5cfa25aec2161d40df7fb850ed8d405738aaed35
/ya_disk.py
d13ca54e921bf14f63a64ccd879ca0f4d0aa9b1b
[]
no_license
fatrunner-39/netology-course-work
08673ef28b8bf18656ee7a34958208480227f967
9df34efbb1a3d62bc6d1ca9975918edd60996115
refs/heads/master
2023-08-15T15:43:25.373906
2021-10-09T09:05:58
2021-10-09T09:05:58
408,377,198
0
0
null
null
null
null
UTF-8
Python
false
false
1,769
py
import requests import os from tqdm import tqdm # folder = os.chdir(r"C:\Users\alexa_000\PycharmProjects\course_project_python_first\avatars") # files_list = os.listdir(path=folder) # print(files_list) from pprint import pprint TOKEN = '' class YaUploader: def __init__(self, token: str): self.token = token def get_headers(self): return { 'Content-Type': 'application/json', 'Authorization': 'OAuth {}'.format(self.token) } def create_folder(self, name): folder_url = "https://cloud-api.yandex.net/v1/disk/resources" headers = self.get_headers() params = { "path": name } folder = requests.put(folder_url, headers=headers, params=params) return name def _get_upload_link(self, disk_file_path): upload_url = "https://cloud-api.yandex.net/v1/disk/resources/upload" headers = self.get_headers() params = {"path": disk_file_path, "overwrite": "true"} response = requests.get(upload_url, headers=headers, params=params) pprint(response.json()) return response.json() def upload_file_to_disk(self, disk_file_path, filename): href = self._get_upload_link(disk_file_path=disk_file_path).get("href", "") response = requests.put(href, data=open(filename, 'rb')) response.raise_for_status() if response.status_code == 201: print("Success") if __name__ == '__main__': ya = YaUploader(token=TOKEN) folder = os.chdir(os.getcwd() + "\\" + "None") files_list = os.listdir(path=folder) print(files_list) ya.create_folder('None') for file in tqdm(files_list[-1:-6:-1]): ya.upload_file_to_disk(f"None/{file}", file)
de0138bc9e16846f031b9be1b5ebb4076f803c7f
a7bd4d4592ce0f6bf7603476f7a401507d8d0b0f
/Recommender/test.py
8d198beff932e6a99593321c0f87d72c0ec1032b
[]
no_license
shsheep/Data_Science_Study
2ee1dd9e96924cf8284fc82d6b4b47ba03f22e7c
4fea310be4e3959654d5515a47036d13cda5e84d
refs/heads/master
2020-06-11T01:56:37.033674
2019-06-26T03:16:55
2019-06-26T03:16:55
193,820,942
0
0
null
null
null
null
UTF-8
Python
false
false
177
py
import numpy as np normal_array = [[1,2,3], [4,5,6], [7,8,9]] obj_array = np.array([[1,2,3], [4,5,6], [7,8,9]]) print(normal_array) print(obj_array) print(obj_array.shape())
69c51de4be08b8d11da57eac0f5f0acc6f14e3b9
ef64586d1ffda27abd7b4b1b41ac3264d611b6c5
/src/panda-3.py
b91d4168afc7ab7b66e4039788f8de81f254f666
[]
no_license
diegoami/Udacity-data-analisis
bccdcb6e4c913fd61f89a4ebfce444f48ffaccdf
8547d2c0ea474d637bd09396648f58e66df0d3e4
refs/heads/master
2021-01-21T11:30:33.791834
2017-06-15T17:36:51
2017-06-15T17:36:51
91,745,196
0
0
null
null
null
null
UTF-8
Python
false
false
454
py
import pandas as pd s1 = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd']) s2 = pd.Series([10, 20, 30, 40], index=['c', 'd', 'e', 'f']) # Try to write code that will add the 2 previous series together, # but treating missing values from either series as 0. The result # when printed out should be similar to the following line: print pd.Series([1, 2, 13, 24, 30, 40], index=['a', 'b', 'c', 'd', 'e', 'f']) print s1.add(s2, fill_value=0).astype(int)
6be799833292d2cc19957234d12a5cb7b6e2778b
681f7a4c9d83a02ae5663898649070820d84a2cd
/dream.py
02852dd8bd37e556db80219b07fc30a95ce03328
[]
no_license
romanbelaire/DeepDream
9b04e3fcbaa52c4657ef1235946474bb327f3ffd
2b334758a84b83fdd06314013a7019bfd64a5221
refs/heads/master
2020-04-25T01:02:42.957515
2019-03-10T21:06:16
2019-03-10T21:06:16
172,397,152
0
0
null
null
null
null
UTF-8
Python
false
false
17,150
py
#DEEP DREAM PROJECT #ROMAN BELAIRE import numpy as np import scipy import PIL.Image import os import h5py import argparse import tensorflow as tf from tensorflow import keras from keras.preprocessing.image import load_img, save_img, img_to_array from keras.applications import inception_v3 from keras import backend as K #argument parser #parser = argparse.ArgumentParser() #parser.add_argument("-r", "--retrain", help="Retrain the model.", action="store_true") #parser.add_argument("-d", "--dataset_directory", help="Directory containing dataset. Should be sorted into [directory]/train, [directory]/validate, [directory]/test.", # default="resources/dataset/") # To fix FailedPreconditionError: sess = tf.InteractiveSession() #show connected devices: sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) print("GPUs: ") K.tensorflow_backend._get_available_gpus() ######## The following code is based on the keras documentation site #aimed at making my life easier and creating my retrained inception model without raw tensorflow from keras.applications.inception_v3 import InceptionV3 from keras.preprocessing import image from keras.models import Model from keras import Sequential from keras.layers import Dense, GlobalAveragePooling2D from keras.preprocessing.image import ImageDataGenerator from keras.models import load_model #constant vars NUM_CLASSES = 6 batchsize = 32 EPOCHS = 30 #from keras import backend as K def retrain_model(): # create the base pre-trained model base_model = InceptionV3(weights=None, include_top=False)#weights should be None for new model, 'imagenet' for pre-trained # add a global spatial average pooling layer x = base_model.output x = GlobalAveragePooling2D()(x) # let's add a fully-connected layer x = Dense(1024, activation='relu')(x) # and a logistic layer -- let's say we have 200 classes predictions = Dense(NUM_CLASSES, activation='softmax')(x) #had to make sure the number of classes matched up. fuckin keras doc hard-coded 200 classes # this is the model we will train model = Model(inputs=base_model.input, outputs=predictions) # first: train only the top layers (which were randomly initialized) # i.e. freeze all convolutional InceptionV3 layers for layer in base_model.layers: layer.trainable = False # compile the model (should be done *after* setting layers to non-trainable) model.compile(optimizer='rmsprop', loss='categorical_crossentropy') data_aug = ImageDataGenerator(rotation_range=20, zoom_range=0.15, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.15, horizontal_flip=True, fill_mode="nearest") # train the model on the new data for a few epochs #data generators train_gen = data_aug.flow_from_directory(directory = 'resources/dataset/train', target_size = (255, 255), color_mode='rgb', batch_size=batchsize, class_mode='categorical', shuffle='True', seed=420) #its important that the seed is an int and not a string lol val_gen = data_aug.flow_from_directory(directory = 'resources/dataset/validate', target_size = (255, 255), color_mode='rgb', batch_size=batchsize, class_mode='categorical', shuffle='True', seed=69) print("data generators loaded.") STEP_SIZE_TRAIN=train_gen.n//train_gen.batch_size STEP_SIZE_VALID=val_gen.n//val_gen.batch_size model.fit_generator(generator=train_gen, steps_per_epoch=STEP_SIZE_TRAIN, validation_data=val_gen, validation_steps=STEP_SIZE_VALID, epochs=EPOCHS) # at this point, the top layers are well trained and we can start fine-tuning # convolutional layers from inception V3. We will freeze the bottom N layers # and train the remaining top layers. # let's visualize layer names and layer indices to see how many layers # we should freeze: for i, layer in enumerate(base_model.layers): print(i, layer.name) # we chose to train the top 2 inception blocks, i.e. we will freeze # the first 249 layers and unfreeze the rest: for layer in model.layers[:249]: layer.trainable = False for layer in model.layers[249:]: layer.trainable = True # we need to recompile the model for these modifications to take effect # we use SGD with a low learning rate from keras.optimizers import SGD model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy') # we train our model again (this time fine-tuning the top 2 inception blocks # alongside the top Dense layers model.fit_generator(generator=train_gen, steps_per_epoch=STEP_SIZE_TRAIN, validation_data=val_gen, validation_steps=STEP_SIZE_VALID, epochs=EPOCHS) print("finished generator successfully") #save our stuff model_json = model.to_json() with open("resources/saved_models/sea1/sea_model.json", "w") as json_file: json_file.write(model_json) model.save_weights("resources/saved_models/sea1/sea_model_weights.h5") print("weights saved.") model.save("resources/saved_models/sea1/sea_full_model.h5") print("full model saved") return model ########end def load_full_model(path): return load_model(path) # Disable all training specific operations K.set_learning_phase(0) # The model will be loaded with pre-trained inceptionv3 weights. #JK WE USIN MY BRAND NEW SHARK TRAINED MODEL #model = inception_v3.InceptionV3(weights='resources/output_model.h5', include_top=False) model = load_full_model("resources/saved_models/model1/full_model.h5") #model = retrain_model() dream = model.input print('Model loaded.') # You can tweak these setting to obtain new visual effects. settings = { 'features': { 'mixed2': 3, #wavy layers 'mixed3': 6, #smooth circles 'mixed4': 2, #kind of jagged 'mixed5': 1.5, #wrinkle/fur texture }, } # Set a function to load, resize and convert the image. def preprocess_image(image_path): # Util function to open, resize and format pictures # into appropriate tensors. img = load_img(image_path) img = img_to_array(img) img = np.expand_dims(img, axis=0) img = inception_v3.preprocess_input(img) return img def preprocess_array(arr): #version of preprocess_image, except for a numpy img_to_array img = np.expand_dims(arr, axis=0) img = inception_v3.preprocess_input(img) return img # And a function to do the opposite: convert a tensor into an image. def deprocess_image(x): # Util function to convert a tensor into a valid image. if K.image_data_format() == 'channels_first': print("Deprocess color first") x = x.reshape((3, x.shape[2], x.shape[3])) x = x.transpose((1, 2, 0)) else: x = x.reshape((x.shape[1], x.shape[2], 3)) np.true_divide(x, 2., x, casting='unsafe') #x /= 2. #swap for line 191? np.add(x, 0.5, x, casting='unsafe') #x += 0.5 np.multiply(x, 255., x, casting='unsafe') #x *= 255. x = np.clip(x, 0, 255).astype('uint8') return x # Set a dictionary that maps the layer name to the layer instance. # get the symbolic outputs of each "key" layer (we gave them unique names). layer_dict = dict([(layer.name, layer) for layer in model.layers]) # Define the loss. The way this works is first the scalar variable *loss* is set. # Then the loss will be defined by adding layer contributions to this variable. loss = K.variable(0.) for layer_name in settings['features']: # Add the L2 norm of the features of a layer to the loss. assert (layer_name in layer_dict.keys(), 'Layer ' + layer_name + ' not found in model.') coeff = settings['features'][layer_name] x = layer_dict[layer_name].output # We avoid border artifacts by only involving non-border pixels in the loss. scaling = K.prod(K.cast(K.shape(x), 'float32')) if K.image_data_format() == 'channels_first': loss += coeff * K.sum(K.square(x[:, :, 2: -2, 2: -2])) / scaling else: loss += coeff * K.sum(K.square(x[:, 2: -2, 2: -2, :])) / scaling # Compute the gradients of the dream wrt the loss. grads = K.gradients(loss, dream)[0] # Normalize gradients. grads /= K.maximum(K.mean(K.abs(grads)), K.epsilon()) # Set up function to retrieve the value of the loss and gradients given an input image. outputs = [loss, grads] fetch_loss_and_grads = K.function([dream], outputs) def eval_loss_and_grads(x): outs = fetch_loss_and_grads([x]) loss_value = outs[0] grad_values = outs[1] return loss_value, grad_values # Helper funtion to resize def resize_img(img, size): img = np.copy(img) if K.image_data_format() == 'channels_first': factors = (1, 1, float(size[0]) / img.shape[2], float(size[1]) / img.shape[3]) else: factors = (1, float(size[0]) / img.shape[1], float(size[1]) / img.shape[2], 1) return scipy.ndimage.zoom(img, factors, order=1) # Define the gradient ascent function over a number of iterations. def gradient_ascent(x, iterations, step, max_loss=None): for i in range(iterations): loss_value, grad_values = eval_loss_and_grads(x) if max_loss is not None and loss_value > max_loss: break print('..Loss value at', i, ':', loss_value) x += step * grad_values return x def rgb2gray(rgb): return np.dot(rgb[...,:3], [0.299, 0.587, 0.114]) def gray2rgb(gray): gray = gray.transpose((1, 2, 0)) w, h, c = gray.shape rgb = np.empty((w, h, 3), dtype=np.float32) rgb[:, :, 2] = rgb[:, :, 1] = rgb[:, :, 0] = gray[:,:,0] return rgb def transfer_color(dream_img, original_img): original_image = np.clip(original_img, 0, 255) styled_image = np.clip(dream_img, 0, 255) original_image = original_image[0] # Luminosity transfer steps: # 1. Convert stylized RGB->grayscale accoriding to Rec.601 luma (0.299, 0.587, 0.114) # 2. Convert stylized grayscale into YUV (YCbCr) # 3. Convert original image into YUV (YCbCr) # 4. Recombine (stylizedYUV.Y, originalYUV.U, originalYUV.V) # 5. Convert recombined image from YUV back to RGB # 1 styled_grayscale = rgb2gray(styled_image) styled_grayscale_rgb = gray2rgb(styled_grayscale) # 2 styled_grayscale_yuv = np.array(PIL.Image.fromarray(styled_grayscale_rgb.astype(np.uint8)).convert('YCbCr')) # 3 original_yuv = np.array(PIL.Image.fromarray(original_image.astype(np.uint8)).convert('YCbCr')) # 4 w, h, _ = original_yuv.shape combined_yuv = np.empty((1, w, h, 3), dtype=np.uint8) #print([styled_grayscale_yuv].shape) combined_yuv[0, ..., 0] = styled_grayscale_yuv[..., 0] combined_yuv[0, ..., 1] = original_yuv[..., 1] combined_yuv[0, ..., 2] = original_yuv[..., 2] # 5 print("cy" + str(combined_yuv.shape)) img_out = np.array(PIL.Image.fromarray(combined_yuv[0], 'YCbCr').convert('RGB')) return [img_out] # Set hyperparameters. The ocatave_scale is the ratio between each successive scale (remember the upscaling mentioned before?). # Playing with these hyperparameters will also allow you to achieve new effects step = 0.008 # Gradient ascent step size num_octave = 5 # Number of scales at which to run gradient ascent octave_scale = 1.4 # Size ratio between scales iterations = 20 # Number of ascent steps per scale max_loss = 10. base_image_path = "resources/images/orion.jpg" print('opening ' + base_image_path) img = PIL.Image.open(base_image_path) #img def dream_image(img, save): img = preprocess_image(base_image_path) if K.image_data_format() == 'channels_first': original_shape = img.shape[2:] else: original_shape = img.shape[1:3] successive_shapes = [original_shape] for i in range(1, num_octave): shape = tuple([int(dim / (octave_scale ** i)) for dim in original_shape]) successive_shapes.append(shape) successive_shapes = successive_shapes[::-1] original_img = np.copy(img) shrunk_original_img = resize_img(img, successive_shapes[0]) for shape in successive_shapes[:4]: #remove the indexing to have full resolution print('Processing image shape', shape) img = resize_img(img, shape) img = gradient_ascent(img, iterations=iterations, step=step, max_loss=max_loss) upscaled_shrunk_original_img = resize_img(shrunk_original_img, shape) same_size_original = resize_img(original_img, shape) lost_detail = same_size_original - upscaled_shrunk_original_img img += lost_detail shrunk_original_img = resize_img(original_img, shape) if save: save_img('dream.jpg',deprocess_image(np.copy(img))) print('saved dream') dreamout = PIL.Image.open('dream.jpg') return deprocess_image(np.copy(img)) #dreamout #dream_image(img, True) def dream_video(frames): downsampling = 1 #how many resolution ratios down to go preserve_color = True new_frames = [] for i, frame in enumerate(frames): original_frame = frame print('PROCESSING FRAME ' + str(i) + " of " + str(len(frames))) if i > 0: print("averaging two frames") frame = (np.array(frames[i-1]) + np.array(frame))/2.0 frame = preprocess_array(frame) if K.image_data_format() == 'channels_first': original_shape = frame.shape[2:] else: original_shape = frame.shape[1:3] successive_shapes = [original_shape] for i in range(1, num_octave): shape = tuple([int(dim / (octave_scale ** i)) for dim in original_shape]) successive_shapes.append(shape) successive_shapes = successive_shapes[::-1] max_size = len(successive_shapes) - downsampling original_img = np.copy(frame) shrunk_original_img = resize_img(frame, successive_shapes[0]) for shape in successive_shapes[:max_size]: print('Processing image shape', shape) print(frame.shape) frame = resize_img(frame, shape) frame = gradient_ascent(frame, iterations=iterations, step=step, max_loss=max_loss) upscaled_shrunk_original_img = resize_img(shrunk_original_img, shape) same_size_original = resize_img(original_img, shape) lost_detail = same_size_original - upscaled_shrunk_original_img frame += lost_detail shrunk_original_img = resize_img(original_img, shape) if preserve_color: #following two lines for weird colors frame = transfer_color(frame, resize_img([original_frame], frame[0,...,0].shape)) #frame = frame.transpose(2, 0, 1) #frame = [frame] print(frame.shape) new_frames.append(deprocess_image(np.copy(frame))) return new_frames def dream_video_from_image(img, num_frames): new_frames = [] new_frames.append(img) for i in range(0, num_frames): print('frame ' + str(i)) frame = new_frames[i] frame = preprocess_array(frame) if K.image_data_format() == 'channels_first': original_shape = frame.shape[2:] else: original_shape = frame.shape[1:3] successive_shapes = [original_shape] for i in range(1, num_octave): shape = tuple([int(dim / (octave_scale ** i)) for dim in original_shape]) successive_shapes.append(shape) successive_shapes = successive_shapes[::-1] original_img = np.copy(frame) shrunk_original_img = resize_img(frame, successive_shapes[0]) for shape in successive_shapes[:4]: print('Processing image shape', shape) frame = resize_img(frame, shape) frame = gradient_ascent(frame, iterations=iterations, step=step, max_loss=max_loss) upscaled_shrunk_original_img = resize_img(shrunk_original_img, shape) same_size_original = resize_img(original_img, shape) lost_detail = same_size_original - upscaled_shrunk_original_img frame += lost_detail shrunk_original_img = resize_img(original_img, shape) new_frames.append(deprocess_image(np.copy(frame))) return np.delete(new_frames, 0)
ebd1f22351359902b00f6777b74785355bb7f50f
b867e7996b27f2ba23139baa4f98e3ac87379936
/Moon.py
55d5a37af20a742cef8d91f2177b24484a6d07e8
[]
no_license
msawhney97/Space-Game
509f855678072e04197f98a4f802393a3eb65001
c05292e412c0f18be2d5023dadf65ffac202c4d1
refs/heads/master
2021-01-25T07:44:31.185069
2017-06-07T17:10:12
2017-06-07T17:10:12
93,659,493
0
0
null
null
null
null
UTF-8
Python
false
false
505
py
import pygame from math import sin, cos from GameObject import GameObject class Moon(GameObject): @staticmethod def init(angle=90): Moon.moonImage = pygame.transform.rotate( pygame.transform.scale( pygame.image.load('images/cartoon-moon.png').convert_alpha(), (100, 100)),angle) def __init__(self,x,y): super(Moon,self). __init__(x,y, Moon.moonImage, 20) def update(self,x,y, r,angle): super(Moon, self). __init__(x, y, Moon.moonImage,30) Moon.init(angle)
9ba654fc94ab5bd647ac7e9bf67182c4e3b39850
bb37574bc39e1e90b43b8fe874c96955dc88a814
/mats/__init__.py
2401b9e1a7c1c81ebec4afaebfeeec1e75c8fef6
[ "MIT" ]
permissive
martinbra/mats
a7975cf4323be55b94087d51ba746b247a83eaed
5fae5cdd405be586bfad821c2335ee980a851f4c
refs/heads/master
2022-11-21T16:01:33.235316
2020-07-29T19:13:37
2020-07-29T19:13:37
258,060,134
0
0
MIT
2020-07-29T19:13:39
2020-04-23T01:14:40
null
UTF-8
Python
false
false
379
py
import coloredlogs import logging from mats.archiving import ArchiveManager from mats.test import Test from mats.test_sequence import TestSequence from mats.tkwidgets import MatsFrame from mats.version import __version__ __all__ = ['Test', 'TestSequence', 'ArchiveManager', 'MatsFrame', '__version__'] coloredlogs.install(level='DEBUG') logger = logging.getLogger(__name__)
22e72933ceb95fe59a488becf877035854228656
594349a97cf47ef1e70bac053b1be5762e16e8ac
/project/__init__.py
ccd0818cb24ae4a50b47d105e3eb7155ca14274f
[]
no_license
ctma/flask_tutorial
9b9b17fde629f79f3b511255450409372754b5a5
df3e395ba00053886751c1948acc7abd1e4af654
refs/heads/master
2021-07-15T22:27:01.004567
2017-10-22T20:45:52
2017-10-22T20:45:52
107,901,257
1
1
null
null
null
null
UTF-8
Python
false
false
946
py
import os import datetime from flask import Flask, jsonify from flask_sqlalchemy import SQLAlchemy # instantiate the app app = Flask(__name__) # set config app_settings = os.getenv('APP_SETTINGS') app.config.from_object(app_settings) # instantiate the db db = SQLAlchemy(app) # model class User(db.Model): __tablename__ = "users" id = db.Column(db.Integer, primary_key=True, autoincrement=True) username = db.Column(db.String(128), nullable=False) email = db.Column(db.String(128), nullable=False) active = db.Column(db.Boolean(), default=False, nullable=False) created_at = db.Column(db.DateTime, nullable=False) def __init__(self, username, email): self.username = username self.email = email self.created_at = datetime.datetime.utcnow() # routes @app.route('/ping', methods=['GET']) def ping_pong(): return jsonify({ 'status': 'success', 'message': 'pong!' })
55e1835554328c64f71bbf8f81aa177437af3cea
95a4eede263cd24d8a31ff5e478cb86d79877c3c
/Zgony.py
28bcdb6518121a0560b2d5ebbdc2157cb4bf35f8
[]
no_license
lewiis252/baza_covid19_Polska
a91cae0822a8595c8eb12490df4348dc7d554d3f
875967368e8a3563379e83a34588a3f0359ecccb
refs/heads/main
2023-08-04T18:18:33.346238
2021-09-14T06:22:00
2021-09-14T06:22:00
406,246,154
0
0
null
null
null
null
UTF-8
Python
false
false
1,721
py
import matplotlib.pyplot as plt import numpy as np import pandas as pd from datetime import date import Dane import seaborn as sb sb.set() print("...\n") start_date = date(2020,3,3) end_date = date.today() end_date = date(2021,6,3) dzien = pd.date_range(start_date, end_date) zgony = Dane.zgony sr_7_dni = [0,0,0,0,0] for i in range(5, len(zgony)): nowa_dana = np.mean(zgony[i-6:i+1]) sr_7_dni.append(nowa_dana) srednia_7_dniowa_zgonow = np.array(sr_7_dni) zgony_lacznie = [0] for i in range(1, len(zgony)): nowa_suma = zgony[i] + zgony_lacznie[i-1] zgony_lacznie.append(nowa_suma) zgony_lacznie = np.array(zgony_lacznie) print('RAPORT ZGONÓW Z OSTATNICH 8 DNI:') d = {'data':dzien, 'nowe zgony':zgony, 'średnia tygodniowa zgonów':np.round(srednia_7_dniowa_zgonow,0), 'zgony łącznie':zgony_lacznie} raport_zgonow = pd.DataFrame(data=d) raport_zgonow = raport_zgonow.set_index(raport_zgonow.columns[0]) print(raport_zgonow[-8:]) fig, ax = plt.subplots(figsize=(20,8)) plt.bar(dzien, zgony, color='c',label='Liczba zgonów') plt.ylabel('Zgony', size=15) ax2 = ax.twinx() plt.plot(dzien, zgony_lacznie, color='r', label='Zgony łącznie') plt.ylabel('Zgony łącznie', size=15) plt.title('Zgony', size=20) fig.legend(loc='lower center', ncol=3) plt.savefig("raport\Zgony.svg") fig, ax = plt.subplots(figsize=(20,8)) plt.bar(dzien, srednia_7_dniowa_zgonow, color='c',label='Liczba zgonów średnia 7-dniowa') plt.ylabel('Zgony', size=15) ax2 = ax.twinx() plt.plot(dzien, zgony_lacznie, color='r', label='Zgony łącznie') plt.ylabel('Zgony łącznie', size=15) plt.title('Zgony', size=20) fig.legend(loc='lower center', ncol=3) plt.savefig("raport\Zgony średnia 7-dniowa.svg")
c14444582fc73b4d58d39e69413552ec5593874a
88994e2e840a70ec702cee09e1a13813aa6f800c
/cg/models/orders/excel_sample.py
b7230139a95a916dd518cb68a5db3854ed5d761e
[]
no_license
Clinical-Genomics/cg
1e9eb0852f742d555a48e8696914ebe177f7d436
d2ec6d25b577dd6938bbf92317aeff1d6b3c5b08
refs/heads/master
2023-09-01T02:04:04.229120
2023-08-31T13:50:31
2023-08-31T13:50:31
82,567,026
19
8
null
2023-09-14T15:24:13
2017-02-20T14:29:43
Python
UTF-8
Python
false
false
4,657
py
from typing import List, Optional from cg.models.orders.sample_base import OrderSample from cg.models.orders.validators.excel_sample_validators import ( convert_sex, convert_to_date, convert_to_lower, convert_to_priority, numeric_value, parse_panels, validate_data_analysis, validate_parent, validate_source, ) from pydantic import AfterValidator, BeforeValidator, Field from typing_extensions import Annotated class ExcelSample(OrderSample): age_at_sampling: str = Field(None, alias="UDF/age_at_sampling") application: str = Field(..., alias="UDF/Sequencing Analysis") capture_kit: str = Field(None, alias="UDF/Capture Library version") cohorts: List[str] = Field(None, alias="UDF/cohorts") collection_date: Annotated[str, AfterValidator(convert_to_date)] = Field( None, alias="UDF/Collection Date" ) comment: str = Field(None, alias="UDF/Comment") concentration: Annotated[str, AfterValidator(numeric_value)] = Field( None, alias="UDF/Concentration (nM)" ) concentration_sample: Annotated[str, AfterValidator(numeric_value)] = Field( None, alias="UDF/Sample Conc." ) container: str = Field(None, alias="Container/Type") container_name: str = Field(None, alias="Container/Name") control: str = Field(None, alias="UDF/Control") custom_index: str = Field(None, alias="UDF/Custom index") customer: str = Field(..., alias="UDF/customer") data_analysis: Annotated[str, AfterValidator(validate_data_analysis)] = Field( "MIP DNA", alias="UDF/Data Analysis" ) data_delivery: Annotated[str, AfterValidator(convert_to_lower)] = Field( None, alias="UDF/Data Delivery" ) elution_buffer: str = Field(None, alias="UDF/Sample Buffer") extraction_method: str = Field(None, alias="UDF/Extraction method") family_name: str = Field(None, alias="UDF/familyID") father: Annotated[str, AfterValidator(validate_parent)] = Field(None, alias="UDF/fatherID") formalin_fixation_time: str = Field(None, alias="UDF/Formalin Fixation Time") index: str = Field(None, alias="UDF/Index type") index_number: Annotated[str, AfterValidator(numeric_value)] = Field( None, alias="UDF/Index number" ) lab_code: str = Field(None, alias="UDF/Lab Code") mother: Annotated[str, AfterValidator(validate_parent)] = Field(None, alias="UDF/motherID") name: str = Field(..., alias="Sample/Name") organism: str = Field(None, alias="UDF/Strain") organism_other: str = Field(None, alias="UDF/Other species") original_lab: str = Field(None, alias="UDF/Original Lab") original_lab_address: str = Field(None, alias="UDF/Original Lab Address") panels: Annotated[Optional[List[str]], BeforeValidator(parse_panels)] = Field( None, alias="UDF/Gene List" ) pool: str = Field(None, alias="UDF/pool name") post_formalin_fixation_time: str = Field(None, alias="UDF/Post Formalin Fixation Time") pre_processing_method: str = Field(None, alias="UDF/Pre Processing Method") primer: str = Field(None, alias="UDF/Primer") priority: Annotated[ str, AfterValidator(convert_to_lower), AfterValidator(convert_to_priority), ] = Field(None, alias="UDF/priority") quantity: Annotated[str, AfterValidator(numeric_value)] = Field(None, alias="UDF/Quantity") reagent_label: str = Field(None, alias="Sample/Reagent Label") reference_genome: str = Field(None, alias="UDF/Reference Genome Microbial") region: str = Field(None, alias="UDF/Region") region_code: str = Field(None, alias="UDF/Region Code") require_qc_ok: bool = Field(None, alias="UDF/Process only if QC OK") rml_plate_name: str = Field(None, alias="UDF/RML plate name") selection_criteria: str = Field(None, alias="UDF/Selection Criteria") sex: Annotated[str, AfterValidator(convert_sex)] = Field(None, alias="UDF/Gender") source: Annotated[str, AfterValidator(validate_source)] = Field(None, alias="UDF/Source") status: Annotated[str, AfterValidator(convert_to_lower)] = Field(None, alias="UDF/Status") subject_id: str = Field(None, alias="UDF/subjectID") synopsis: str = Field(None, alias="UDF/synopsis") tissue_block_size: str = Field(None, alias="UDF/Tissue Block Size") tumour: bool = Field(None, alias="UDF/tumor") tumour_purity: str = Field(None, alias="UDF/tumour purity") volume: Annotated[str, AfterValidator(numeric_value)] = Field(None, alias="UDF/Volume (uL)") well_position: str = Field(None, alias="Sample/Well Location") well_position_rml: str = Field(None, alias="UDF/RML well position")
68079a55bb86f3c2389cad7e87cb8b8aeaf5d183
ead3bb5d63ce9106bc34b1f2a883fdcd21c99839
/blackbox/vae/loss.py
e71b695711f5831b25382751a9f2d154d8978f7f
[]
no_license
AkashGanesan/generic-blackbox
91fed27d7aa5b21b2f6a99a770c135ebdaa30613
62d5840635f4e1a1a5252091bc7f334853acb67f
refs/heads/master
2020-04-26T05:44:13.408760
2019-03-04T05:44:55
2019-03-04T05:44:55
173,343,235
0
0
null
null
null
null
UTF-8
Python
false
false
1,606
py
import torch import torch.nn.functional as F import torch.nn as nn def bce_loss(input, target): """ Numerically stable version of the binary cross-entropy loss function. As per https://github.com/pytorch/pytorch/issues/751 See the TensorFlow docs for a derivation of this formula: https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits Input: - input: PyTorch Tensor of shape (N, ) giving scores. - target: PyTorch Tensor of shape (N,) containing 0 and 1 giving targets. Output: - A PyTorch Tensor containing the mean BCE loss over the minibatch of input data. """ neg_abs = -input.abs() loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log() return loss.mean() def kld_loss(mean, log_var): ''' KLD loss ''' KLD = - 0.5 * torch.sum(1 + log_var - mean.pow(2) - log_var.exp(), dim=1) return KLD def l2_loss(y_pred, y_true, mode='sum'): """ Input: - y_pred: Tensor of shape (seq_len, batch, 2). Predicted trajectory. - y_true: Tensor of shape (seq_len, batch, 2). Groud truth predictions. - loss_mask: Tensor of shape (batch, seq_len) - mode: Can be one of sum, average, raw Output: - loss: l2 loss depending on mode """ batch, _, seq_len = y_pred.size() loss = (y_true - y_pred).norm(dim=1) # if mode == 'sum': return torch.sum(loss, dim=1) / seq_len # elif mode == 'average': # return torch.sum(loss) / torch.numel(loss_mask.data) # elif mode == 'raw': # return loss.sum(dim=2).sum(dim=1)
f27f76e7fef07ec8bb0ae3de9b75b49542d1e03c
4a2990a954e9158d09ac8985bec18289fb684a39
/DigitClassifier.py
b3f6f8bdcafbdb47abccb7d1ecc7da52b06b7781
[]
no_license
aslakey/DataScience
cb1715fdcafa0afe82c99f389a114aa08315e586
881b55489e25751b46ec6e08c218f55ebb16ea03
refs/heads/master
2020-04-17T02:25:16.185685
2016-08-17T23:11:13
2016-08-17T23:11:13
45,635,744
1
1
null
2015-11-05T20:27:55
2015-11-05T19:56:41
null
UTF-8
Python
false
false
948
py
from sklearn import svm from sklearn import datasets ''' ->importing SVM and datasets from sklearn ->classify digits using support vector classification ->manually chose gamma, but could have used grid search ->train data using clf.fit(data,target) method ''' #load iris = datasets.load_iris() digits = datasets.load_digits() #print(digits.data) #classify clf = svm.SVC(gamma=0.001, C=100.) #classifier using support vector classification #train clf.fit(digits.data[:-1],digits.target[:-1]) ''' OUT: SVC(C=100.0, cache_size=200, class_weight=None, coef0=0.0, degree=3, gamma=0.001, kernel='rbf', max_iter=-1, probability=False, random_state=None, shrinking=True, tol=0.001, verbose=False) **Can store using: >>> from sklearn.externals import joblib >>> joblib.dump(clf, 'filename.pkl') **then later retrive that file: >>> clf = joblib.load('filename.pkl') ''' #now classify some digs clf.predict(digits.data[-1]) #last image looks like an 8!
7457a9afae19893e2d1e10d12355c26f4a7818df
8e3ca9617020be18b9922757486aca85e22a6b44
/Tareas/PYTHON_2021-[9] Regiones ricas en AT-2984/Zara Paulina Martínez Sánchez_10047_assignsubmission_file_/regiones_at.py
e4fae38123730a03b1271bdcbb090512098fdaff
[]
no_license
AnaBVA/pythonCCG_2021
04609078fdd40bd68684ce5514a78e959d02ff3c
47677549eec0e1a3460941ef97ace8d7d0bac185
refs/heads/main
2023-05-29T02:39:24.780609
2021-06-17T06:24:44
2021-06-17T06:24:44
344,536,557
0
1
null
null
null
null
UTF-8
Python
false
false
2,170
py
""" ## NAME regiones_at.py ## VERSION [1.0] ## AUTHOR Zara Paulina Martinez Sanchez < zaram042001 @ gmail.com > ## DATE [08/06/2021] ## DESCRIPTION Programa que analiza una secuencia de DNA para buscar regiones ricas en AT las cuales contengan 5 o mas As y/o Ts. En caso de contener en la secuencia caracteres diferentes a ATGC se le notifica al usuario ## CATEGORY Sequence analysis ## USAGE regiones_at.py no requiere argumentos ## FUNCTIONS def analizar_sec(seq): no_bases = re.findall(r"[^ATGC]", seq) region_at = re.findall(r"[AT]{5,}", seq) try: if no_bases: raise ValueError except ValueError: print(f'La secuencia que ingresó cuenta con caracteres no validos: {no_bases}') else: if region_at: print(f'Las regiones ricas en AT son: {region_at}') else: print("No existen regiones ricas en AT en su secuencia") ## EXAMPLES Input: CTGCATTATATCGTACGAAATTATACGCGCG Output: Las regiones ricas en AT son: ['ATTATAT', 'AAATTATA'] ## GITHUB LINK https://github.com/zara-ms/python_class/tree/master/src """ # Libreria a usar import re def analizar_sec(seq): no_bases = re.findall(r"[^ATGC]", seq) region_at = re.findall(r"[AT]{5,}", seq) # Reconocer caracteres invalidos y marcar error try: if no_bases: raise ValueError except ValueError: print(f'La secuencia que ingresó cuenta con caracteres no validos: {no_bases}') # Buscar secuencias ricas en AT si la secuencia es correcta else: if region_at: print(f'Las regiones ricas en AT son: {region_at}') else: print("No existen regiones ricas en AT en su secuencia") print("Ingrese la secuencia a analizar") secuencia = input() secuencia = secuencia.upper() # Llamar a la funcion analizar_sec(secuencia)
fd015340592d6a9d46508ab0c1abb1b030137c5d
a90b05c59c119102bb93aa2162585a38b5ae9c84
/testCaffe.py
3122805f3561abe06095bf477ec6fa0d1073840d
[]
no_license
quanweikikai/deconv-net
2fd07b4c7178109eeef21e4258ddb2820bfe3cb9
625fce6d34356014ed5da3c74cd7acddb1eac7c1
refs/heads/master
2021-01-12T12:25:03.115828
2016-11-01T23:03:58
2016-11-01T23:03:58
72,485,146
0
0
null
null
null
null
UTF-8
Python
false
false
1,691
py
import caffe import matplotlib.pyplot as plt import numpy as np import sys def plotAll(inputArr, plotShape): for ii in xrange(plotShape[0]): startImg = inputArr[0,ii*plotShape[1]+1,...] for jj in xrange(1,plotShape[1]): startImg = np.append(startImg,inputArr[0,ii*plotShape[1]+jj,...],axis=0) if (ii == 0): lineImg = startImg else: lineImg = np.append(lineImg,startImg,axis=1) return lineImg net = caffe.Net('../deconvTest/lenet.prototxt','../caffe/examples/mnist/lenet_iter_10000.caffemodel',caffe.TEST) invNet = caffe.Net('../deconvTest/inverseLenet.prototxt',caffe.TEST) for layer in invNet.params: invNet.params[layer][0].data[...] = net.params[layer[2:]][0].data f, ((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2) c=np.array( [0.299, 0.58, 0.114] ) a = net.params['conv1'][0].data img = caffe.io.load_image(sys.argv[1]) img = np.dot(img,c) img = caffe.io.resize(img,(28,28)) * 255 #forward net.blobs['test1'].data[0] = img output = net.forward(['conv1','conv2','SoftmaxOut','ip2']) #inverse forward invNet.blobs['input1'].data[0] = output['conv2'] inverseOutput = invNet.forward(['deconv2']) input2 = inverseOutput['deconv2'][0,...] input2 = input2/float(np.max(input2)) input2 = caffe.io.resize(input2,(20,24,24)) tmp = np.zeros((1,20,24,24)) tmp[0] = input2 invNet.blobs['input2'].data[0] = tmp invResult = invNet.forward(['result']) plotImg1 = plotAll(output['conv2'],(10,5)) plotImg2 = plotAll(output['conv1'],(5,4)) plotImg3 = invResult['result'] ax1.imshow(plotImg1) ax2.imshow(plotImg2) ax4.imshow(plotImg3[0,0,...]*255) ax3.imshow(img) print output['SoftmaxOut'] print output['ip2'] plt.show()
968c71ff6d547560d6976d04238b5e4ca27acfc8
b39d9ef9175077ac6f03b66d97b073d85b6bc4d0
/Cabergoline_CT_2_mg_tablet_SmPC.py
1fc6492eca70783a261c02b5d7847e96b5dc2672
[]
no_license
urudaro/data-ue
2d840fdce8ba7e759b5551cb3ee277d046464fe0
176c57533b66754ee05a96a7429c3e610188e4aa
refs/heads/master
2021-01-22T12:02:16.931087
2013-07-16T14:05:41
2013-07-16T14:05:41
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,671
py
{'_data': [['Unknown', [['GI', u'administration site conditions Patients on Adjunct Levodopa Therapy MedDRA Frequency Undesirable Effects System Organ Class Psychiatric disorders Common Confusion, hallucinations Nervous system disorders Common Dizziness, dyskinesia Uncommon Hyperkinesia Cardiac disorders Common Angina Vascular disorders Common Postural hypotension Uncommon Erythromelalgia Respiratory, thoracic and Uncommon Pleural effusion, pulmonary fibrosis mediastinal disorders Gastrointestinal disorders Very common Nausea Common Dyspepsia, gastritis, vomiting General disorders and Common Peripheral edema administration site conditions Investigations Common Decreased hemoglobin, hematocrit, and/or red blood cell (>15% vs baseline) Post-marketing Surveillance MedDRA Frequency Undesirable Effects System Organ Class Immune system disorders Uncommon Hypersensitivity reaction Psychiatric disorders Common Increased libido Uncommon Delusions, psychotic disorder Not Known Aggression, hypersexuality, pathological gambling Nervous system disorders Common Headache, somnolence Not Known Sudden sleep onset, syncope Cardiac disorders Very common Valvulopathy (including regurgitation) and related disorders (pericarditis and pericardial effusion) Vascular disorders Not Known Digital vasospasm Respiratory, thoracic and Common Dyspnea mediastinal disorders Very rare Fibrosis Not Known Respiratory disorder, respiratory failure Hepato-biliary disorders Uncommon Hepatic function abnormal Skin and subcutaneous Uncommon Rash tissue disorders Not Known Alopecia Musculoskeletal and Not Known Leg cramps connective tissue disorders General disorders and Common Asthenia administration site Uncommon Edema, fatigue conditions Investigations Common Liver function tests abnormal Not Known Blood creatinine phosphokinase increased There have been reports of fibrotic and serosal inflammatory conditions, such as pleuritis, pleural effusion, pleural fibrosis, pulmonary fibrosis, pericarditis, pericardial effusion, cardiac valvulopathy and retroperitoneal fibrosis, in patients taking cabergoline (see Secion 4.4). There is limited information available on the reversibility of these reactions. Gastric upset was more frequent in female than in male patients, while CNS events were more frequent in the elderly. A blood pressure decrease of clinical relevance was observed mainly on standing in a minority of patients. The effect was mainly evident in the first weeks of therapy. Neither modification of heart rate nor consistent changes of ECG tracing were observed during cabergoline treatment. Alterations in standard laboratory tests are uncommon during long term therapy with cabergoline. In clinical studies, increases of triglycerides greater than 30% above the upper limit of the laboratory reference range were observed in 6.8% of the cabergoline-treated patients who had values within the normal range at baseline. In most cases the increases were transient. No clear indications of increases over time or significant shifts from normal to abnormal values were observed in the overall group of patients treated with cabergoline. Other: Adverse events have been reported with lower doses of cabergoline (0.25 \u2013 2 mg per week) that are not listed above including: Common (>1/100 to <1/10) Nervous system disorders: Depression, paresthesia. Cardiac disorders: Palpitations Skin and subcutaneous tissue disorders: Facial redness Uncommon (>1/1000 to <1/100) Eye disorders: Hemianopsia Vascular disorders: Nose bleeding Rare (>1/10000 to <1/1000) Vascular disorders: Fainting']]]], '_pages': [6, 8], u'_rank': 1, u'_type': u'LSFU'}
44ff9007fe06e2f8d446711a3c996a7d34bb494a
ca5a08c91d070b649be6236b23261e3dbe3d9742
/Chapter08/08_03_MagicIndex_BruteForce.py
fd9db96cd27eb0be035d53fcddd359ba1b3b5391
[]
no_license
tdesfont/CtCI-6th-Edition-Python
3751c9d5137c696661492568fc7fd4e09a64652d
0464574fdf5591b5c5d25b5777fee0f401c06955
refs/heads/master
2020-08-06T12:50:54.586891
2019-12-04T16:29:24
2019-12-04T16:29:24
212,981,529
0
0
null
2019-10-05T10:30:28
2019-10-05T10:30:28
null
UTF-8
Python
false
false
202
py
def magicIndex(A): for i in range(0, len(A)): if A[i] == i: print(i, A[i]) return True return False if __name__ == "__main__": magicIndex([2, 4, 5, 5, 5, 5])
9ce90a6b93e13fbc8d927da0f14756a67b83c503
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
/indices/nntriton.py
c9e4c9e38a2d8a648637823e5cb28fec8650247e
[]
no_license
psdh/WhatsintheVector
e8aabacc054a88b4cb25303548980af9a10c12a8
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
refs/heads/master
2021-01-25T10:34:22.651619
2015-09-23T11:54:06
2015-09-23T11:54:06
42,749,205
2
3
null
2015-09-23T11:54:07
2015-09-18T22:06:38
Python
UTF-8
Python
false
false
493
py
ii = [('CookGHP3.py', 1), ('RogePAV2.py', 4), ('RogePAV.py', 2), ('RennJIT.py', 1), ('LeakWTI2.py', 6), ('AubePRP.py', 2), ('FitzRNS3.py', 1), ('ClarGE2.py', 2), ('GellWPT2.py', 1), ('WilkJMC2.py', 3), ('RoscTTI2.py', 3), ('BuckWGM.py', 1), ('LyelCPG.py', 1), ('KirbWPW2.py', 1), ('BachARE.py', 1), ('BuckWGM2.py', 1), ('MereHHB3.py', 1), ('HogaGMM.py', 1), ('MartHRW.py', 1), ('FitzRNS.py', 2), ('RoscTTI.py', 1), ('RogeSIP.py', 1), ('FitzRNS2.py', 1), ('HogaGMM2.py', 1), ('LyelCPG3.py', 6)]
f049797e8ab64f9c8a15f7f1a6ffd77072370038
133b2fb99be0d75fcd3543118bf323f927a7624b
/django-for-development/base/tests/test_middleware.py
201608718c7d0c75659464e6606d49413cfc173d
[ "MIT" ]
permissive
xfenix/django-hmin
78139035321a94ddfff0767dd66f4fa2b0c42306
cfea5ebb2c3382ba05fd2af860d2e2d2e421f0f1
refs/heads/master
2022-12-13T23:31:29.822759
2020-11-15T01:14:17
2020-11-15T01:14:17
25,469,854
12
3
MIT
2022-12-08T11:02:30
2014-10-20T14:22:01
Python
UTF-8
Python
false
false
457
py
"""Test django integration.""" from django.test import Client from django.http import HttpResponse def test_middleware_indexpage(): """Test.""" view_response: HttpResponse = Client().get("/") assert ( view_response.content == b'<!DOCTYPE html><html lang="en"><head><meta charset="UTF-8"><meta name="viewport" content="width=device-width, initial-scale=1.0"><title>Document</title></head><body>This is test</body></html>' )
57aa130bd1f08c4c19e526d12f189f65810e10e8
852b57a1a2a0fa6b0d23bef16c4a989d369936e9
/playwright/_impl/_local_utils.py
af0683ed2898a6543419183742002991b600fa47
[ "Apache-2.0" ]
permissive
microsoft/playwright-python
e28badf23e20f948b4063a314e906006dcdff7fa
42c0bf19d7ae415552172d7c04cdb7afd9dad7fb
refs/heads/main
2023-08-22T17:49:04.645213
2023-08-14T12:52:46
2023-08-14T12:52:46
276,414,382
9,615
870
Apache-2.0
2023-09-05T17:07:48
2020-07-01T15:28:13
Python
UTF-8
Python
false
false
2,781
py
# Copyright (c) Microsoft Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 from typing import Dict, List, Optional, cast from playwright._impl._api_structures import HeadersArray from playwright._impl._connection import ChannelOwner, StackFrame from playwright._impl._helper import HarLookupResult, locals_to_params class LocalUtils(ChannelOwner): def __init__( self, parent: ChannelOwner, type: str, guid: str, initializer: Dict ) -> None: super().__init__(parent, type, guid, initializer) async def zip(self, params: Dict) -> None: await self._channel.send("zip", params) async def har_open(self, file: str) -> None: params = locals_to_params(locals()) await self._channel.send("harOpen", params) async def har_lookup( self, harId: str, url: str, method: str, headers: HeadersArray, isNavigationRequest: bool, postData: Optional[bytes] = None, ) -> HarLookupResult: params = locals_to_params(locals()) if "postData" in params: params["postData"] = base64.b64encode(params["postData"]).decode() return cast( HarLookupResult, await self._channel.send_return_as_dict("harLookup", params), ) async def har_close(self, harId: str) -> None: params = locals_to_params(locals()) await self._channel.send("harClose", params) async def har_unzip(self, zipFile: str, harFile: str) -> None: params = locals_to_params(locals()) await self._channel.send("harUnzip", params) async def tracing_started(self, tracesDir: Optional[str], traceName: str) -> str: params = locals_to_params(locals()) return await self._channel.send("tracingStarted", params) async def trace_discarded(self, stacks_id: str) -> None: return await self._channel.send("traceDiscarded", {"stacksId": stacks_id}) def add_stack_to_tracing_no_reply(self, id: int, frames: List[StackFrame]) -> None: self._channel.send_no_reply( "addStackToTracingNoReply", { "callData": { "stack": frames, "id": id, } }, )
e2d5a3327596d21c570daa59ee7d6e2157b0c335
f3693916a8b118bf139364604dac3f51235ed613
/functional/Components/Groups/Groups_PATCH_ID/test_TC_44521_Groups_PATCH_Group_Valid_Provisioning_Policy.py
d6a8a3e05363440a478bb96584c8f3358c700adf
[]
no_license
muktabehera/QE
e7d62284889d8241d22506f6ee20547f1cfe6db1
3fedde591568e35f7b80c5bf6cd6732f8eeab4f8
refs/heads/master
2021-03-31T02:19:15.369562
2018-03-13T02:45:10
2018-03-13T02:45:10
124,984,177
0
0
null
null
null
null
UTF-8
Python
false
false
4,418
py
# -*- coding: UTF-8 -*- """PFE Component Tests - Groups. * TC-44521 - Groups PATCH: Verify that user is able to modify group on providing valid values in parameter 'provisioningPolicy' using request PATCH '/groups'. Equivalent test CURL command: curl -H "Host: <client_host>" -H "Authorization: Bearer <valid_token>" -X PATCH -d @<JSON_data_file> -H "Content-Type: application/json" "<PF_host>://<client_host>/groups/updateGroup" Same, with test data: curl -H "Host: <client_host>" -H "Authorization: Bearer <valid_token>" -X PATCH -d @<JSON_data_file> -H "Content-Type: application/json" "<PF_host>://<client_host>/groups/updateGroup" JSON data sent to PathFinder in this test: {'configAdminCanEdit': True, 'configurations': [], 'deliveryLoadBalancePolicy': 'DNS_NAME', 'dnsName': 'autoQEDVCC1', 'edgeDeviceRoles': ['EDGE', 'ORIGIN', 'DISTRIBUTION'], 'members': [{'id': 'Device_Test_API'}], 'name': 'Updated Group valid Provisioning Policy', 'originLoadBalancePolicy': 'ALL_MEMBERS', 'provisioningPolicy': 'ONE_OR_MORE', 'visibleInAllConfigurations': True} """ import pytest from qe_common import * logger = init_logger() @pytest.mark.components @pytest.allure.story('Groups') @pytest.allure.feature('PATCH') class Test_PFE_Components(object): """PFE Groups test cases.""" @pytest.allure.link('https://jira.qumu.com/browse/TC-44521') @pytest.mark.Groups @pytest.mark.PATCH def test_TC_44521_PATCH_Groups_Group_Valid_Provisioning_Policy(self, context): """TC-44521 - Groups-PATCH Verify that user is able to modify group on providing valid values in parameter 'provisioningPolicy' using request PATCH '/groups'.""" # Define a test step with pytest.allure.step("""Test1: Verify that user is able to modify group on providing valid values in parameter 'provisioningPolicy' using request PATCH '/groups'."""): # Test case configuration edgeDeviceGroupDetails = context.sc.EdgeDeviceGroupDetails( configAdminCanEdit=True, configurations=[], deliveryLoadBalancePolicy='DNS_NAME', dnsName='autoQEDVCC1', edgeDeviceRoles=['EDGE', 'ORIGIN', 'DISTRIBUTION'], id=None, members=[{ 'id': 'POST_veDevices_AllConfigAdminMulticastTrue' }], name='Updated Group valid Provisioning Policy', originLoadBalancePolicy='ALL_MEMBERS', provisioningPolicy='ONE_OR_MORE', proximityDetails=None, visibleInAllConfigurations=True) # updateEntity the Groups. # The `check` call validates return code # and some of the swagger schema. # Most schema checks are disabled. response = check( context.cl.Groups.updateEntity( id='GroupforPatch2', body=edgeDeviceGroupDetails ) ) # Define a test step with pytest.allure.step("""Test2: Verify that user is able to modify group on providing valid values in parameter 'provisioningPolicy' using request PATCH '/groups'."""): # Test case configuration edgeDeviceGroupDetails = context.sc.EdgeDeviceGroupDetails( configAdminCanEdit=True, configurations=[], deliveryLoadBalancePolicy='DNS_NAME', dnsName='autoQEDVCC1', edgeDeviceRoles=['EDGE', 'ORIGIN', 'DISTRIBUTION'], id=None, members=[{ 'id': 'POST_veDevices_AllConfigAdminMulticastTrue' }], name='Updated Group valid Provisioning Policy', originLoadBalancePolicy='ALL_MEMBERS', provisioningPolicy='ALL_MEMBERS', proximityDetails=None, visibleInAllConfigurations=True) # updateEntity the Groups. # The `check` call validates return code # and some of the swagger schema. # Most schema checks are disabled. response = check( context.cl.Groups.updateEntity( id='GroupforPatch2', body=edgeDeviceGroupDetails ) )
048956f2abf5397292a390d9c48f8da1e6ffdd7a
6e9ce707772643f1c3c0a6cd35de4d94e78b8a8e
/ann.py
b86b2364fee92e29ffa36e47511f1ca3bff0e41b
[]
no_license
Campos1098/scenario-responses
764f8e20a97166bb3134822a4913449c5b3e0867
cb4e306f400f4a72e744c848f422687ac5b5fa35
refs/heads/main
2023-09-04T23:12:59.660091
2021-11-12T12:35:31
2021-11-12T12:35:31
427,221,721
0
0
null
null
null
null
UTF-8
Python
false
false
12,550
py
import numpy as np import train_parser import test_parser import math import warnings import torch import torch.nn.functional as F from torch import nn from torch import optim from torch.utils.data import DataLoader, TensorDataset warnings.filterwarnings ("ignore") class Tennis_NN(nn.Module): def __init__(self, lr): super().__init__() self.lr = lr self.lin_stack = nn.Sequential( nn.Linear(111, lr, True), nn.BatchNorm1d(lr), nn.ReLU(), nn.Linear(lr, 2, True), nn.Sigmoid() ) def forward(self, xb): return self.lin_stack(xb) # Returns the model and its associated optimiser def get_model(lr, num): model = Tennis_NN(num) return model, optim.SGD(model.parameters(), lr = lr, momentum = 0.9) # Returns the training and validation data def get_data(train_ds, bs): return ( DataLoader(train_ds, batch_size = bs, shuffle = False) ) # Computes the accuracy of the model def accuracy(out, yb): preds = torch.argmax(out, dim=1) return (preds == yb).float().mean() def loss_batch(model, loss_func, xb, yb, opt = None): loss = loss_func(model(xb), yb) if opt is not None: loss.backward() opt.step() opt.zero_grad() return loss.item(), len(xb) def fit(epochs, model, loss_func, opt, train_dl, x_train, y_train, x_valid, y_valid): train = np.zeros(epochs) train_loss = np.zeros(epochs) valid = np.zeros(epochs) valid_loss = np.zeros(epochs) inc_count = 0 max_acc = 0 for epoch in range(epochs): model.train() for xb, yb in train_dl: loss_batch(model, loss_func, xb, yb, opt) model.eval() train[epoch] = accuracy(model(x_train), y_train) train_loss[epoch] = loss_func(model(x_train), y_train) valid[epoch] = accuracy(model(x_valid), y_valid) valid_loss[epoch] = loss_func(model(x_valid), y_valid) if valid[epoch] > max_acc: max_acc = valid[epoch] inc_count = 0 torch.save(model, "test") else: inc_count += 1 if inc_count == 300: print("EARLY STOPPING: " + str(epoch - 300) + " " + str(valid[epoch - 300])) inc_count = 0 break model = torch.load("test") model.eval() print("SAVED: " + str(accuracy(model(x_valid), y_valid))) def train_test(player, opponent, train_offset, valid_offset, test_offset, indices_file): # Retrieve rally data parser = train_parser.Parser(player, opponent, "charting-m-points.csv", "charting-m-matches.csv") x, y = parser.parse() # Splitting the data and transofmring them into the appropriate types x = np.array(x) y = np.array(y) x_train = x[train_offset:valid_offset] y_train = y[train_offset:valid_offset] with open(indices_file, "r") as f: indices = f.readline() indices = indices[1: -1] indices = indices.split(", ") indices = [int(index) for index in indices] f.close() valid_indices = indices[: math.ceil(len(indices) / 2)] test_indices = indices[math.ceil(len(indices) / 2) :] x_valid = x[valid_indices] x_test = x[test_indices] y_valid = y[valid_indices] y_test = y[test_indices] x_train, x_valid, x_test, y_train, y_valid, y_test = map(torch.tensor, (x_train, x_valid, x_test, y_train, y_valid, y_test)) x_train = x_train.float() x_valid = x_valid.float() x_test = x_test.float() y_train = y_train.long() y_valid = y_valid.long() y_test = y_test.long() # Setting some model training parameters lr = 0.0097 # learning rate epochs = 3000 # how many epochs to train for bs = 64 # batch size loss_func = F.cross_entropy n = len(x_train) model, opt = get_model(lr, 66) # Loading the data train_ds = TensorDataset(x_train, y_train) train_dl = get_data(train_ds, bs) # Train the model model.train() fit(epochs, model, loss_func, opt, train_dl, x_train, y_train, x_valid, y_valid) # Reporting post-training model performance model = torch.load("test") model.eval() return accuracy(model(x_test), y_test).item() def opt(num, x_train, y_train, x_valid, y_valid): # Setting some model training parameters lr = num # learning rate epochs = 3000 # how many epochs to train for bs = 64 # batch size loss_func = F.cross_entropy n = len(x_train) model, opt = get_model(lr, 217) # Loading the data train_ds = TensorDataset(x_train, y_train) train_dl = get_data(train_ds, bs) # Train the model model.train() fit(epochs, model, loss_func, opt, train_dl, x_train, y_train, x_valid, y_valid) # Reporting post-training model performance model = torch.load("test") model.eval() out = model(x_valid) print(accuracy(model(x_valid), y_valid).item()) return accuracy(model(x_valid), y_valid).item() def self_test(model_path, player, opponent, train_offset, valid_offset, test_offset, indices_file): model = torch.load(model_path) # Retrieve rally data parser = train_parser.Parser(player, opponent, "charting-m-points.csv", "charting-m-matches.csv") x, y = parser.parse() # Splitting the data and transofmring them into the appropriate types x = np.array(x) y = np.array(y) with open(indices_file, "r") as f: indices = f.readline() indices = indices[1: -1] indices = indices.split(", ") indices = [int(index) for index in indices] f.close() test_indices = indices[math.ceil(len(indices) / 2) :] x_test = x[test_indices] y_test = y[test_indices] x_test, y_test = map(torch.tensor, (x_test, y_test)) x_test = x_test.float() y_test = y_test.long() # Reporting post-training model performance model.eval() acc = accuracy(model(x_test), y_test) result = " ".join(player.split("_")) + " vs. " + " ".join(opponent.split("_")) + ": " + str(np.round_(acc.item() * 100, 1)) + "%" return result def test(model_path, player, opponent, cutoff): model = torch.load(model_path) # Retrieve rally data parser = test_parser.Parser(player, opponent, "charting-m-points.csv", "charting-m-matches.csv", cutoff) x, y = parser.parse() # Splitting the data and transofmring them into the appropriate types x = np.array(x) y = np.array(y) x, y = map(torch.tensor, (x, y)) x = x.float() y = y.long() model.eval() acc = accuracy(model(x), y) result = " ".join(player.split("_")) + " vs. " + " ".join(opponent.split("_")) + ": " + str(np.round_(acc.item() * 100, 1)) + "%" return result # model - the model to generate action probabilities with # scenario - the scenario to generate action probabilities for def evaluate_scenario(model_path, scenario): # Possible stroke and direction actions that can be taken stroke = ["Forehand groundstroke", "Backhand groundstroke", "Forehand slice", "Backhand slice", "Forehand volley", "Backhand volley", "Standard overhead/smash", "Backhand overhead/smash", "Forehand drop shot", "Backhand drop shot", "Forehand lob", "Backhand lob", "Forehand half-volley", "Backhand half-volley", "Forehand swinging volley", "Backhand swinging volley"] modifier = ["", "(approach shot), ", "(stop volley), ", "(approach shot, stop volley), "] direction = ["to the opponents right", "down the middle of the court", "to the opponents left"] print("The success probabilities for all responses to this scenario are:") model = torch.load(model_path) model.eval() opt_shot = [(0, 0, 0), 0] # Evluate each possible action that the player can take for i in range(len(stroke)): for j in range(len(direction)): scenario[0][18 + i] = 1 scenario[0][36 + j] = 1 if i in [4, 5, 12, 13, 14, 15]: for k in range(len(modifier)): scenario[0][34 + k] = 1 out = model(scenario).detach().numpy() p = np.max(out, 1) * 100 if p[0] > opt_shot[1]: opt_shot[0] = (i, k, j) opt_shot[1] = p[0] print(stroke[i] + ", " + modifier[k] + direction[j] + ": " + str(np.round(p[0], decimals = 2)) + "%") scenario[0][21 + i] = 0 scenario[0][35 + j] = 0 scenario[0][34 + k] = 0 else: for k in range(len(modifier) - 2): scenario[0][34 + k] = 1 out = model(scenario).detach().numpy() p = np.max(out, 1) * 100 if p[0] > opt_shot[1]: opt_shot[0] = (i, k, j) opt_shot[1] = p[0] print(stroke[i] + ", " + modifier[k] + direction[j] + ": " + str(np.round(p[0], decimals = 2)) + "%") scenario[0][21 + i] = 0 scenario[0][35 + j] = 0 scenario[0][34 + k] = 0 print("") print("The optimal response to this scenario is:") print(stroke[opt_shot[0][0]] + ", " + modifier[opt_shot[0][1]] + direction[opt_shot[0][2]] + ": " + str(np.round(opt_shot[1], decimals = 2)) + "%") def opt_outer(): # Retrieve rally data parser = train_parser.Parser("Roger_Federer", "Novak_Djokovic", "charting-m-points.csv", "charting-m-matches.csv") x, y = parser.parse() # Splitting the data and transofmring them into the appropriate types x = np.array(x) # x = x[:, :79] y = np.array(y) x_train = x[0:2186] y_train = y[0:2186] with open("./ELEC4712-3/thesis/indices/rf_nd_indices.txt", "r") as f: indices = f.readline() indices = indices[1: -1] indices = indices.split(", ") indices = [int(index) for index in indices] f.close() valid_indices = indices[: math.ceil(len(indices) / 2)] test_indices = indices[math.ceil(len(indices) / 2) :] x_valid = x[valid_indices] x_test = x[test_indices] y_valid = y[valid_indices] y_test = y[test_indices] x_train, x_valid, x_test, y_train, y_valid, y_test = map(torch.tensor, (x_train, x_valid, x_test, y_train, y_valid, y_test)) x_train = x_train.float() x_valid = x_valid.float() x_test = x_test.float() y_train = y_train.long() y_valid = y_valid.long() y_test = y_test.long() with open("./ELEC4712-3/thesis/indices/lr_results.txt", "a") as f: x = np.arange(1e-4, 1e-1, 0.0004) for i in range(214, len(x)): f.write(str(x[i]) + " " + str(opt(x[i], x_train, y_train, x_valid, y_valid)) + "\n") f.close() print(test("p_o_rf_nd", "Novak_Djokovic", "Roger_Federer", "20151117")) print(test("p_o_rf_nd", "Novak_Djokovic", "Rafael_Nadal", "20151117")) print(self_test("p_o_rf_nd", "Roger_Federer", "Novak_Djokovic", 0, 2186, 0, "rf_nd_indices.txt")) print(test("p_o_rf_nd", "Roger_Federer", "Rafael_Nadal", "20151117")) print(test("p_o_rf_nd", "Rafael_Nadal", "Roger_Federer", "20151117")) print(test("p_o_rf_nd", "Rafael_Nadal", "Novak_Djokovic", "20151117")) # evaluate_scenario("p_o_rf_nd", torch.tensor([[ # 0, 0, 0, 0, 0, 0, 1, 0, 0, # Current player position (0 - 8) # 1, 0, 0, 0, 0, 0, 0, 0, 0, # Previous player position (9 - 17) # 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # Player stroke type (18 - 33) # 0, 0, # Player shot modifier (34 - 35) # 0, 0, 0, # Player shot direction (36 - 38) # 0, 0, 0, 1, 0, 0, 0, 0, 0, # Current opponent position (39 - 47) # 0, 0, 0, 1, 0, 0, 0, 0, 0, # Previous opponent position (48 - 56) # 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # Opponent stroke type (57 - 72) # 0, 0, 0, # Opponent shot modifier (73 - 75) # 1, 0, 0, # Opponent shot direction (76 - 78) # -0.16755696984219248, # Rally legnth (79) # 0, 0, 1, # Court surface (80 - 82) # 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # Game score (83 - 100) # 1, 0, 0, 0, 0, 0, 0, 0, 0, # Set score (101 - 109) # 1 # Best of (110) # ]]))
59d4bd3560e27c55efa24a0e2641c4c522652d9c
641f1cc9e827d879489cacd49b5eaeb0dd27d196
/spaceship.py
3f802c2a5fdedd2f6b8f24376692f8c73228276f
[]
no_license
Yodaskywall/online_spaceship
0ee2ef716e5cdd7dd555fbe028806eac439e3873
e00f59602d066b53d762da831183945e25bef015
refs/heads/master
2022-02-25T16:23:52.104749
2019-11-03T23:25:08
2019-11-03T23:25:08
219,359,226
0
0
null
null
null
null
UTF-8
Python
false
false
2,635
py
import pygame IMAGE_DIR = "images/" DIM = (150, 135) # Dimensions of the spaceship sprite BDIM = (10, 30) class Bullet: def __init__(self, id, ship_x, ship_y): self.id = id self.x = ship_x + DIM[0] / 2 - BDIM[0] / 2 self.y = ship_y - 20 self.speed = 100 def draw(self, win, clientId): if clientId == self.id: sprite = f"{IMAGE_DIR}/bullet.png" else: sprite = f"{IMAGE_DIR}/bullet2.png" loaded_sprite = pygame.image.load(sprite) win.blit(loaded_sprite, (self.x, self.y)) def check_hit(self, game, clientId): spaceship = game.spaceships[clientId] if (self.y + BDIM[1] > spaceship.y + DIM[1] // 2 and spaceship.x - BDIM[0] <= self.x <= spaceship.x + DIM[0] and self.id != spaceship.id): aspaceship = game.spaceships[spaceship.id] aspaceship.hp -= 1 return aspaceship def update(self, clientId): if clientId == self.id: self.y -= self.speed else: self.y += self.speed class SpaceShip: def __init__(self, p, clientId): self.id = clientId self.width = DIM[0] self.height = DIM[1] self.speed = 3 self.hp = 10 self.cooldown = 500 self.last = 0 if p == 0: self.sprite = f"{IMAGE_DIR}/nave.png" self.x = round((1200 / 2) - (self.width / 2)) self.y = round(900 * 0.98 - self.height) else: self.sprite = f"{IMAGE_DIR}/nave2.png" self.x = round((1200 / 2) - (self.width / 2)) self.y = round(900 * 0.02) self.rect = (self.x, self.x + self.width, self.y, self.y + self.width) def draw(self, win): loaded_sprite = pygame.image.load(self.sprite) win.blit(loaded_sprite, (self.x, self.y)) def move(self): keys = pygame.key.get_pressed() if self.rect[1] < 1200 and keys[pygame.K_RIGHT]: self.x += self.speed self.rect = (self.x, self.x + self.width, self.y, self.y + self.width) if self.rect[0] > 0 and keys[pygame.K_LEFT]: self.x -= self.speed self.rect = (self.x, self.x + self.width, self.y, self.y + self.width) def shoot(self, bullet_l, n): keys = pygame.key.get_pressed() now = pygame.time.get_ticks() diff = abs(now - self.last) if keys[pygame.K_SPACE] and diff >= self.cooldown: bullet_l.append(Bullet(self.id, self.x, self.y)) self.last = now return n.communicate(Bullet(self.id, self.x, self.y))
6d9ee5db943f3c3f810828b0812b4844581e2d50
74ff8919dee51454dbbdf7ca25eefd5081ea6016
/onodera/py/008_w_random_search.py
06985201946043aae1569ccdeee89b19d7e8a977
[ "MIT" ]
permissive
Sprinterzzj/Santa2017
1bcf8a84348ad00027cc25ae7e3eb5b5aaf9dfbe
9d6efe166267a9ea3efe6d0c210a87b4049dbad2
refs/heads/master
2020-05-15T21:27:38.664149
2018-03-23T03:06:47
2018-03-23T03:06:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
15,148
py
# This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load in """ nohup python -u 008_w_random_search.py > log1.txt & nohup python -u 008_w_random_search.py > log2.txt & nohup python -u 008_w_random_search.py > log3.txt & nohup python -u 008_w_random_search.py > log4.txt & """ import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import math from collections import Counter from ortools.graph import pywrapgraph from subprocess import check_output print(check_output(["ls", "../input"]).decode("utf8")) n_children = 1000000 # n children to give n_gift_type = 1000 # n types of gifts available n_gift_quantity = 1000 # each type of gifts are limited to this quantity n_gift_pref = 100 # number of gifts a child ranks n_child_pref = 1000 # number of children a gift ranks twins = math.ceil(0.04 * n_children / 2.) * 2 # 4% of all population, rounded to the closest number triplets = math.ceil(0.005 * n_children / 3.) * 3 # 0.5% of all population, rounded to the closest number ratio_gift_happiness = 2 ratio_child_happiness = 2 seed = np.random.randint(9999) print('seed:',seed) gift_pref = pd.read_csv('../input/child_wishlist_v2.csv.zip',header=None).drop(0, 1).values child_pref = pd.read_csv('../input/gift_goodkids_v2.csv.zip',header=None).drop(0, 1).values def lcm(a, b): """Compute the lowest common multiple of a and b""" # in case of large numbers, using floor division return a * b // math.gcd(a, b) def avg_normalized_happiness(pred, child_pref, gift_pref): # check if number of each gift exceeds n_gift_quantity gift_counts = Counter(elem[1] for elem in pred) for count in gift_counts.values(): assert count <= n_gift_quantity # check if triplets have the same gift for t1 in np.arange(0,triplets,3): triplet1 = pred[t1] triplet2 = pred[t1+1] triplet3 = pred[t1+2] # print(t1, triplet1, triplet2, triplet3) assert triplet1[1] == triplet1[1] and triplet2[1] == triplet3[1] # check if twins have the same gift for t1 in np.arange(triplets,triplets+twins,2): twin1 = pred[t1] twin2 = pred[t1+1] # print(t1) assert twin1[1] == twin2[1] max_child_happiness = n_gift_pref * ratio_child_happiness max_gift_happiness = n_child_pref * ratio_gift_happiness total_child_happiness = 0 total_gift_happiness = np.zeros(n_gift_type) for row in pred: child_id = row[0] gift_id = row[1] # check if child_id and gift_id exist assert child_id < n_children assert gift_id < n_gift_type assert child_id >= 0 assert gift_id >= 0 child_happiness = (n_gift_pref - np.where(gift_pref[child_id]==gift_id)[0]) * ratio_child_happiness if not child_happiness: child_happiness = -1 gift_happiness = ( n_child_pref - np.where(child_pref[gift_id]==child_id)[0]) * ratio_gift_happiness if not gift_happiness: gift_happiness = -1 total_child_happiness += child_happiness total_gift_happiness[gift_id] += gift_happiness print('normalized child happiness=',float(total_child_happiness)/(float(n_children)*float(max_child_happiness)) , \ ', normalized gift happiness',np.mean(total_gift_happiness) / float(max_gift_happiness*n_gift_quantity)) # to avoid float rounding error # find common denominator # NOTE: I used this code to experiment different parameters, so it was necessary to get the multiplier # Note: You should hard-code the multipler to speed up, now that the parameters are finalized denominator1 = n_children*max_child_happiness denominator2 = n_gift_quantity*max_gift_happiness*n_gift_type common_denom = lcm(denominator1, denominator2) multiplier = common_denom / denominator1 # # usually denom1 > demon2 return float(math.pow(total_child_happiness*multiplier,3) + math.pow(np.sum(total_gift_happiness),3)) / float(math.pow(common_denom,3)) # return math.pow(float(total_child_happiness)/(float(n_children)*float(max_child_happiness)),2) + math.pow(np.mean(total_gift_happiness) / float(max_gift_happiness*n_gift_quantity),2) #random_sub = pd.read_csv('../input/sample_submission_random_v2.csv').values.tolist() #print(avg_normalized_happiness(random_sub, child_pref, gift_pref)) #gift_pref.shape, child_pref.shape class Child(object): def __init__(self, idx, prefer): self.idx = idx self.prefer_dict = dict() for i in range(prefer.shape[0]): self.prefer_dict[prefer[i]] = [12*(prefer.shape[0] - i), -6] def add_gifts_prefer(self, giftid, score): if giftid in self.prefer_dict.keys(): self.prefer_dict[giftid][1] = 6*score else: self.prefer_dict[giftid] = [-6, 6*score] return None def happiness(self, giftid): return self.prefer_dict.get(giftid, [-6, -6]) class Child_twin(object): def __init__(self, idx, prefer1, prefer2): self.idx = idx self.prefer_dict = dict() for p in list(set(list(prefer1) + list(prefer2))): score = 0 if p in list(prefer1): score += 2*(100 - list(prefer1).index(p)) else: score -= 1 if p in list(prefer2): score += 2*(100 - list(prefer2).index(p)) else: score -= 1 self.prefer_dict[p] = [3*score, -6] def add_gifts_prefer(self, giftid, score): if giftid in self.prefer_dict.keys(): self.prefer_dict[giftid][1] = 3*score else: self.prefer_dict[giftid] = [-6, 3*score] return None def happiness(self, giftid): return self.prefer_dict.get(giftid, [-6, -6]) class Child_triplet(object): def __init__(self, idx, prefer1, prefer2, prefer3): self.idx = idx self.prefer_dict = dict() for p in list(set(list(prefer1) + list(prefer2) + list(prefer3))): score = 0 if p in list(prefer1): score += 2*(100 - list(prefer1).index(p)) else: score -= 1 if p in list(prefer2): score += 2*(100 - list(prefer2).index(p)) else: score -= 1 if p in list(prefer3): score += 2*(100 - list(prefer3).index(p)) else: score -= 1 self.prefer_dict[p] = [2*score, -6] def add_gifts_prefer(self, giftid, score): if giftid in self.prefer_dict.keys(): self.prefer_dict[giftid][1] = 2*score else: self.prefer_dict[giftid] = [-6, 2*score] return None def happiness(self, giftid): return self.prefer_dict.get(giftid, [-6, -6]) Children = [] for i in range(0, 5001, 3): Children.append(Child_triplet(i, gift_pref[i], gift_pref[i+1], gift_pref[i+2])) Children.append(Child_triplet(i+1, gift_pref[i], gift_pref[i+1], gift_pref[i+2])) Children.append(Child_triplet(i+2, gift_pref[i], gift_pref[i+1], gift_pref[i+2])) for i in range(5001, 45001, 2): Children.append(Child_twin(i, gift_pref[i], gift_pref[i+1])) Children.append(Child_twin(i+1, gift_pref[i], gift_pref[i+1])) Children = Children + [Child(i, gift_pref[i]) for i in range(45001, 1000000)] for j in range(1000): cf = child_pref[j] done_list = [] for i in range(cf.shape[0]): if cf[i] <= 5000 and cf[i] not in done_list: if cf[i] % 3 == 0: cid1 = cf[i] cid2 = cf[i] + 1 cid3 = cf[i] + 2 done_list.append(cid2) done_list.append(cid3) elif cf[i] % 3 == 1: cid1 = cf[i] - 1 cid2 = cf[i] cid3 = cf[i] + 1 done_list.append(cid1) done_list.append(cid3) else: cid1 = cf[i] - 2 cid2 = cf[i] - 1 cid3 = cf[i] done_list.append(cid1) done_list.append(cid2) if cid1 in list(cf): score_ = 2*(cf.shape[0] - list(cf).index(cid1)) else: score_ = -1 if cid2 in list(cf): score_ += 2*(cf.shape[0] - list(cf).index(cid2)) else: score_ += -1 if cid3 in list(cf): score_ += 2*(cf.shape[0] - list(cf).index(cid3)) else: score_ += -1 Children[cid1].add_gifts_prefer(j, score_) Children[cid2].add_gifts_prefer(j, score_) Children[cid3].add_gifts_prefer(j, score_) elif cf[i] <= 45000 and cf[i] not in done_list: if cf[i] % 2 == 0: cid1 = cf[i] cid2 = cf[i] + 1 done_list.append(cid2) else: cid1 = cf[i] - 1 cid2 = cf[i] done_list.append(cid1) if cid1 in list(cf): score_ = 2*(cf.shape[0] - list(cf).index(cid1)) else: score_ = -1 if cid2 in list(cf): score_ += 2*(cf.shape[0] - list(cf).index(cid2)) else: score_ += -1 Children[cid1].add_gifts_prefer(j, score_) Children[cid2].add_gifts_prefer(j, score_) elif cf[i] > 45000: Children[cf[i]].add_gifts_prefer(j, 2*(cf.shape[0] - i)) print("W_CHILD, W_GIFTS, W_CHILD/W_GIFTS, score") while True: W_CHILD = np.random.randint(10000, 99999999) W_GIFTS = int(W_CHILD * np.random.uniform(2/3*0.9, 2/3*1.1)) start_nodes = [] end_nodes = [] capacities = [] unit_costs = [] # triplets for i in range(0, 5001, 3): for g in Children[i].prefer_dict.keys(): start_nodes.append(1000000+g) end_nodes.append(i) capacities.append(3) unit_costs.append(-W_CHILD*(Children[i].prefer_dict[g][0] + 6)-W_GIFTS*(Children[i].prefer_dict[g][1] + 6)) # triplets for i in range(5001, 45001, 2): for g in Children[i].prefer_dict.keys(): start_nodes.append(1000000+g) end_nodes.append(i) capacities.append(2) unit_costs.append(-W_CHILD*(Children[i].prefer_dict[g][0] + 6)-W_GIFTS*(Children[i].prefer_dict[g][1] + 6)) # other children for i in range(45001, 1000000): for g in Children[i].prefer_dict.keys(): start_nodes.append(1000000+g) end_nodes.append(i) capacities.append(1) unit_costs.append(-W_CHILD*(Children[i].prefer_dict[g][0] + 6)-W_GIFTS*(Children[i].prefer_dict[g][1] + 6)) min_cost_flow_1 = pywrapgraph.SimpleMinCostFlow() # add Arc # gift -> children for i in range(len(start_nodes)): min_cost_flow_1.AddArcWithCapacityAndUnitCost( int(start_nodes[i]), int(end_nodes[i]), int(capacities[i]), int(unit_costs[i]) ) # children -> 1001000 : collection for i in range(0, 5001, 3): min_cost_flow_1.AddArcWithCapacityAndUnitCost( int(i), int(1001000), int(3), int(0) ) for i in range(5001, 45001, 2): min_cost_flow_1.AddArcWithCapacityAndUnitCost( int(i), int(1001000), int(2), int(0) ) for i in range(45001, 1000000): min_cost_flow_1.AddArcWithCapacityAndUnitCost( int(i), int(1001000), int(1), int(0) ) # gift -> 1001001 : dust_gift for i in range(1000): min_cost_flow_1.AddArcWithCapacityAndUnitCost( int(1000000+i), int(1001001), int(1000), int(0) ) # 1001001 -> 1001000 : dust_path min_cost_flow_1.AddArcWithCapacityAndUnitCost( int(1001001), int(1001000), int(1000000), int(0) ) # add Supply for i in range(1000): min_cost_flow_1.SetNodeSupply(int(1000000+i), int(1000)) # children for i in range(0, 5001, 3): min_cost_flow_1.SetNodeSupply(int(i), int(0)) for i in range(5001, 45001, 2): min_cost_flow_1.SetNodeSupply(int(i), int(0)) for i in range(45001, 1000000): min_cost_flow_1.SetNodeSupply(int(i), int(0)) min_cost_flow_1.SetNodeSupply(int(1001001), int(0)) min_cost_flow_1.SetNodeSupply(int(1001000), int(-1000000)) min_cost_flow_1.Solve() assignment = [-1]*1000000 twins_differ = [] triplets_differ = [] for i in range(min_cost_flow_1.NumArcs()): if min_cost_flow_1.Flow(i) != 0 and min_cost_flow_1.Head(i) < 1000000: c = min_cost_flow_1.Head(i) g = min_cost_flow_1.Tail(i) f = min_cost_flow_1.Flow(i) if c >= 45001: assignment[c] = g - 1000000 elif c >= 5001: if f == 1: if assignment[c] == -1: assignment[c] = g - 1000000 twins_differ.append([c, c+1]) else: assignment[c+1] = g - 1000000 elif f == 2: assignment[c] = g - 1000000 assignment[c+1] = g - 1000000 else: if f == 1: if assignment[c] == -1: assignment[c] = g - 1000000 triplets_differ.append([c, c+1, c+2]) elif assignment[c+1] == -1: assignment[c+1] = g - 1000000 else: assignment[c+2] = g - 1000000 elif f == 2: if assignment[c] == -1: assignment[c] = g - 1000000 assignment[c+1] = g - 1000000 triplets_differ.append([c, c+1, c+2]) else: assignment[c+1] = g - 1000000 assignment[c+2] = g - 1000000 elif f == 3: assignment[c] = g - 1000000 assignment[c+1] = g - 1000000 assignment[c+2] = g - 1000000 CHILD_HAPPINESS = sum([Children[i].happiness(assignment[i])[0] for i in range(1000000)])*10 SANTA_HAPPINESS = sum([Children[i].happiness(assignment[i])[1] for i in range(1000000)]) OBJ = CHILD_HAPPINESS**3 + SANTA_HAPPINESS**3 score = OBJ / (12000000000**3) print('{}, {}, {:.5f}, {}'.format(W_CHILD, W_GIFTS, (W_CHILD/W_GIFTS), score)) # wata: 0.9362730938
91e8aa631768c6b2d163460ed75174c88256162a
fab14fae2b494068aa793901d76464afb965df7e
/benchmarks/f3_wrong_hints/scaling_ltl_timed_transition_system/18-sender_receiver_5.py
df88016e3e38fa248461fa4e0917379e9383acde
[ "MIT" ]
permissive
teodorov/F3
673f6f9ccc25acdfdecbfc180f439253474ba250
c863215c318d7d5f258eb9be38c6962cf6863b52
refs/heads/master
2023-08-04T17:37:38.771863
2021-09-16T07:38:28
2021-09-16T07:38:28
null
0
0
null
null
null
null
UTF-8
Python
false
false
19,274
py
from typing import FrozenSet from collections import Iterable from math import log, ceil from mathsat import msat_term, msat_env from mathsat import msat_make_constant, msat_declare_function from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff from mathsat import msat_make_leq, msat_make_equal, msat_make_true from mathsat import msat_make_number, msat_make_plus, msat_make_times from pysmt.environment import Environment as PysmtEnv import pysmt.typing as types from ltl.ltl import TermMap, LTLEncoder from utils import name_next, symb_to_next from hint import Hint, Location delta_name = "delta" def decl_consts(menv: msat_env, name: str, c_type) -> tuple: assert not name.startswith("_"), name s = msat_declare_function(menv, name, c_type) s = msat_make_constant(menv, s) x_s = msat_declare_function(menv, name_next(name), c_type) x_s = msat_make_constant(menv, x_s) return s, x_s def make_enum(menv, v_name: str, enum_size: int): bool_type = msat_get_bool_type(menv) num_bits = ceil(log(enum_size, 2)) b_vars = [] for idx in range(num_bits): c_name = "{}{}".format(v_name, idx) b_vars.append(tuple(decl_consts(menv, c_name, bool_type))) vals = [] x_vals = [] for enum_val in range(enum_size): bit_val = format(enum_val, '0{}b'.format(num_bits)) assert len(bit_val) == num_bits assert all(c in {'0', '1'} for c in bit_val) assign = [b_vars[idx] if c == '1' else (msat_make_not(menv, b_vars[idx][0]), msat_make_not(menv, b_vars[idx][1])) for idx, c in enumerate(reversed(bit_val))] pred = assign[0][0] x_pred = assign[0][1] for it in assign[1:]: pred = msat_make_and(menv, pred, it[0]) x_pred = msat_make_and(menv, x_pred, it[1]) vals.append(pred) x_vals.append(x_pred) assert len(vals) == enum_size assert len(x_vals) == enum_size return b_vars, vals, x_vals def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term): m_one = msat_make_number(menv, "-1") arg1 = msat_make_times(menv, arg1, m_one) return msat_make_plus(menv, arg0, arg1) def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term): geq = msat_make_geq(menv, arg0, arg1) return msat_make_not(menv, geq) def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term): return msat_make_leq(menv, arg1, arg0) def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term): leq = msat_make_leq(menv, arg0, arg1) return msat_make_not(menv, leq) def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term): n_arg0 = msat_make_not(menv, arg0) return msat_make_or(menv, n_arg0, arg1) def diverging_symbs(menv: msat_env) -> frozenset: real_type = msat_get_rational_type(menv) delta = msat_declare_function(menv, delta_name, real_type) delta = msat_make_constant(menv, delta) return frozenset([delta]) def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term, msat_term, msat_term): assert menv assert isinstance(menv, msat_env) assert enc assert isinstance(enc, LTLEncoder) int_type = msat_get_integer_type(menv) real_type = msat_get_rational_type(menv) r2s, x_r2s = decl_consts(menv, "r2s", int_type) s2r, x_s2r = decl_consts(menv, "s2r", int_type) delta, x_delta = decl_consts(menv, delta_name, real_type) sender = Sender("s", menv, enc, r2s, x_r2s, s2r, x_s2r, delta) receiver = Receiver("r", menv, enc, s2r, x_s2r, r2s, x_r2s, delta) curr2next = {r2s: x_r2s, s2r: x_s2r, delta: x_delta} for comp in [sender, receiver]: for s, x_s in comp.symb2next.items(): curr2next[s] = x_s zero = msat_make_number(menv, "0") init = msat_make_and(menv, receiver.init, sender.init) trans = msat_make_and(menv, receiver.trans, sender.trans) # invar delta >= 0 init = msat_make_and(menv, init, msat_make_geq(menv, delta, zero)) trans = msat_make_and(menv, trans, msat_make_geq(menv, x_delta, zero)) # delta > 0 -> (r2s' = r2s & s2r' = s2r) lhs = msat_make_gt(menv, delta, zero) rhs = msat_make_and(menv, msat_make_equal(menv, x_r2s, r2s), msat_make_equal(menv, x_s2r, s2r)) trans = msat_make_and(menv, trans, msat_make_impl(menv, lhs, rhs)) # (G F !s.stutter) -> G (s.wait_ack -> F s.send) lhs = enc.make_G(enc.make_F(msat_make_not(menv, sender.stutter))) rhs = enc.make_G(msat_make_impl(menv, sender.wait_ack, enc.make_F(sender.send))) ltl = msat_make_impl(menv, lhs, rhs) return TermMap(curr2next), init, trans, ltl class Module: def __init__(self, name: str, menv: msat_env, enc: LTLEncoder, *args, **kwargs): self.name = name self.menv = menv self.enc = enc self.symb2next = {} true = msat_make_true(menv) self.init = true self.trans = true def _symb(self, v_name, v_type): v_name = "{}_{}".format(self.name, v_name) return decl_consts(self.menv, v_name, v_type) def _enum(self, v_name: str, enum_size: int): c_name = "{}_{}".format(self.name, v_name) return make_enum(self.menv, c_name, enum_size) class Sender(Module): def __init__(self, name: str, menv: msat_env, enc: LTLEncoder, in_c, x_in_c, out_c, x_out_c, delta): super().__init__(name, menv, enc) bool_type = msat_get_bool_type(menv) int_type = msat_get_integer_type(menv) real_type = msat_get_rational_type(menv) loc, x_loc = self._symb("l", bool_type) evt, x_evt = self._symb("evt", bool_type) msg_id, x_msg_id = self._symb("msg_id", int_type) timeout, x_timeout = self._symb("timeout", real_type) c, x_c = self._symb("c", real_type) self.move = evt self.stutter = msat_make_not(menv, evt) self.x_move = x_evt self.x_stutter = msat_make_not(menv, x_evt) self.send = loc self.wait_ack = msat_make_not(menv, loc) self.x_send = x_loc self.x_wait_ack = msat_make_not(menv, x_loc) self.symb2next = {loc: x_loc, evt: x_evt, msg_id: x_msg_id, timeout: x_timeout, c: x_c} zero = msat_make_number(menv, "0") one = msat_make_number(menv, "1") base_timeout = one # send & c = 0 & msg_id = 0 self.init = msat_make_and(menv, msat_make_and(menv, self.send, msat_make_equal(menv, c, zero)), msat_make_equal(menv, msg_id, zero)) # invar: wait_ack -> c <= timeout self.init = msat_make_and( menv, self.init, msat_make_impl(menv, self.wait_ack, msat_make_leq(menv, c, timeout))) self.trans = msat_make_impl(menv, self.x_wait_ack, msat_make_leq(menv, x_c, x_timeout)) # delta > 0 | stutter -> l' = l & msg_id' = msg_id & timeout' = timeout & # c' = c + delta & out_c' = out_c lhs = msat_make_or(menv, msat_make_gt(menv, delta, zero), self.stutter) rhs = msat_make_and( menv, msat_make_and(menv, msat_make_iff(menv, x_loc, loc), msat_make_equal(menv, x_msg_id, msg_id)), msat_make_and(menv, msat_make_equal(menv, x_timeout, timeout), msat_make_equal(menv, x_c, msat_make_plus(menv, c, delta)))) rhs = msat_make_and(menv, rhs, msat_make_equal(menv, x_out_c, out_c)) self.trans = msat_make_and(menv, self.trans, msat_make_impl(menv, lhs, rhs)) disc_t = msat_make_and(menv, self.move, msat_make_equal(menv, delta, zero)) # (send & send') -> # (msg_id' = msg_id & timeout' = base_timeout & c' = 0 & out_c' = out_c) lhs = msat_make_and(menv, disc_t, msat_make_and(menv, self.send, self.x_send)) rhs = msat_make_and( menv, msat_make_and(menv, msat_make_equal(menv, x_msg_id, msg_id), msat_make_equal(menv, x_timeout, base_timeout)), msat_make_and(menv, msat_make_equal(menv, x_c, zero), msat_make_equal(menv, x_out_c, out_c))) self.trans = msat_make_and(menv, self.trans, msat_make_impl(menv, lhs, rhs)) # (send & wait_ack') -> # (msg_id' = msg_id + 1 & timeout' = base_timeout & c' = 0 & out_c' = out_c) lhs = msat_make_and(menv, disc_t, msat_make_and(menv, self.send, self.x_wait_ack)) rhs = msat_make_and( menv, msat_make_and(menv, msat_make_equal(menv, x_msg_id, msat_make_plus(menv, msg_id, one)), msat_make_equal(menv, x_timeout, base_timeout)), msat_make_and(menv, msat_make_equal(menv, x_c, zero), msat_make_equal(menv, x_out_c, out_c))) self.trans = msat_make_and(menv, self.trans, msat_make_impl(menv, lhs, rhs)) # (wait_ack) -> (c' = 0 & out_c' = out_c & # (wait_ack' <-> (in_c != msg_id & c > timeout)) lhs = msat_make_and(menv, disc_t, self.wait_ack) rhs_iff = msat_make_and(menv, msat_make_not(menv, msat_make_equal(menv, in_c, msg_id)), msat_make_geq(menv, c, timeout)) rhs_iff = msat_make_iff(menv, self.x_wait_ack, rhs_iff) rhs = msat_make_and(menv, msat_make_and(menv, msat_make_equal(menv, x_c, zero), msat_make_equal(menv, x_out_c, out_c)), rhs_iff) self.trans = msat_make_and(menv, self.trans, msat_make_impl(menv, lhs, rhs)) # (wait_ack & wait_ack') -> (timeout' > timeout) lhs = msat_make_and(menv, disc_t, msat_make_and(menv, self.wait_ack, self.x_wait_ack)) rhs = msat_make_gt(menv, x_timeout, timeout) self.trans = msat_make_and(menv, self.trans, msat_make_impl(menv, lhs, rhs)) # (wait_ack) -> (send' <-> (in_c = msg_id & c < timeout)) lhs = msat_make_and(menv, disc_t, self.wait_ack) rhs = msat_make_iff(menv, self.x_send, msat_make_and(menv, msat_make_equal(menv, in_c, msg_id), msat_make_lt(menv, c, timeout))) self.trans = msat_make_and(menv, self.trans, msat_make_impl(menv, lhs, rhs)) # (wait_ack & send') -> (timeout' = base_timeout) lhs = msat_make_and(menv, disc_t, msat_make_and(menv, self.wait_ack, self.x_send)) rhs = msat_make_equal(menv, x_timeout, base_timeout) self.trans = msat_make_and(menv, self.trans, msat_make_impl(menv, lhs, rhs)) class Receiver(Module): def __init__(self, name: str, menv: msat_env, enc: LTLEncoder, in_c, x_in_c, out_c, x_out_c, delta): super().__init__(name, menv, enc) bool_type = msat_get_bool_type(menv) loc, x_loc = self._symb("l", bool_type) self.wait = loc self.work = msat_make_not(menv, loc) self.x_wait = x_loc self.x_work = msat_make_not(menv, x_loc) self.symb2next = {loc: x_loc} zero = msat_make_number(menv, "0") # wait self.init = self.wait # delta > 0 -> loc' = loc & out_c' = out_c lhs = msat_make_gt(menv, delta, zero) rhs = msat_make_and(menv, msat_make_iff(menv, x_loc, loc), msat_make_equal(menv, x_out_c, out_c)) self.trans = msat_make_impl(menv, lhs, rhs) disc_t = msat_make_equal(menv, delta, zero) # wait -> (wait' <-> in_c = out_c) lhs = msat_make_and(menv, disc_t, self.wait) rhs = msat_make_iff(menv, self.x_wait, msat_make_equal(menv, in_c, out_c)) self.trans = msat_make_and(menv, self.trans, msat_make_impl(menv, lhs, rhs)) # (wait & wait') -> (out_c' = out_c) lhs = msat_make_and(menv, disc_t, msat_make_and(menv, self.wait, self.x_wait)) rhs = msat_make_equal(menv, x_out_c, out_c) self.trans = msat_make_and(menv, self.trans, msat_make_impl(menv, lhs, rhs)) # (wait & work') -> out_c' = in_c lhs = msat_make_and(menv, disc_t, msat_make_and(menv, self.wait, self.x_work)) rhs = msat_make_equal(menv, x_out_c, in_c) self.trans = msat_make_and(menv, self.trans, msat_make_impl(menv, lhs, rhs)) # work -> out_c' = out_c lhs = msat_make_and(menv, disc_t, self.work) rhs = msat_make_equal(menv, x_out_c, out_c) self.trans = msat_make_and(menv, self.trans, msat_make_impl(menv, lhs, rhs)) def hints(env: PysmtEnv) -> FrozenSet[Hint]: assert isinstance(env, PysmtEnv) mgr = env.formula_manager delta = mgr.Symbol(delta_name, types.REAL) r2s = mgr.Symbol("r2s", types.INT) s2r = mgr.Symbol("r2s", types.INT) s_l = mgr.Symbol("s_l", types.BOOL) s_evt = mgr.Symbol("s_evt", types.BOOL) s_msg_id = mgr.Symbol("s_msg_id", types.INT) s_timeout = mgr.Symbol("s_timeout", types.REAL) s_c = mgr.Symbol("s_c", types.REAL) r_l = mgr.Symbol("r_l", types.BOOL) symbs = frozenset([delta, r2s, s2r, s_l, s_evt, s_msg_id, s_timeout, s_c, r_l]) x_delta = symb_to_next(mgr, delta) x_r2s = symb_to_next(mgr, r2s) x_s2r = symb_to_next(mgr, s2r) x_s_l = symb_to_next(mgr, s_l) x_s_evt = symb_to_next(mgr, s_evt) x_s_msg_id = symb_to_next(mgr, s_msg_id) x_s_timeout = symb_to_next(mgr, s_timeout) x_s_c = symb_to_next(mgr, s_c) x_r_l = symb_to_next(mgr, r_l) res = [] r0 = mgr.Real(0) r1 = mgr.Real(1) i0 = mgr.Int(0) i1 = mgr.Int(1) loc0 = Location(env, mgr.Equals(delta, r0)) loc0.set_progress(0, mgr.Equals(x_delta, r0)) hint = Hint("h_delta0", env, frozenset([delta]), symbs) hint.set_locs([loc0]) res.append(hint) loc0 = Location(env, mgr.Equals(s2r, i0)) loc0.set_progress(0, mgr.Equals(x_s2r, i0)) hint = Hint("h_s2r0", env, frozenset([s2r]), symbs) hint.set_locs([loc0]) res.append(hint) loc0 = Location(env, mgr.Equals(r2s, i0)) loc0.set_progress(0, mgr.Equals(x_r2s, i0)) hint = Hint("h_r2s0", env, frozenset([r2s]), symbs) hint.set_locs([loc0]) res.append(hint) loc0 = Location(env, s_l) loc0.set_progress(0, x_s_l) hint = Hint("h_s_l0", env, frozenset([s_l]), symbs) hint.set_locs([loc0]) res.append(hint) loc0 = Location(env, mgr.Equals(s_msg_id, i0)) loc0.set_progress(0, mgr.Equals(x_s_msg_id, i0)) hint = Hint("h_s_msg_id0", env, frozenset([s_msg_id]), symbs) hint.set_locs([loc0]) res.append(hint) loc0 = Location(env, mgr.Equals(s_timeout, r0)) loc0.set_progress(0, mgr.Equals(x_s_timeout, r0)) hint = Hint("h_s_timeout0", env, frozenset([s_timeout]), symbs) hint.set_locs([loc0]) res.append(hint) loc0 = Location(env, mgr.Equals(s_c, r0)) loc0.set_progress(0, mgr.Equals(x_s_c, r0)) hint = Hint("h_s_c0", env, frozenset([s_c]), symbs) hint.set_locs([loc0]) res.append(hint) loc0 = Location(env, r_l) loc0.set_progress(0, x_r_l) hint = Hint("h_r_l0", env, frozenset([r_l]), symbs) hint.set_locs([loc0]) res.append(hint) loc0 = Location(env, mgr.GE(delta, r0)) loc0.set_progress(0, mgr.Equals(x_delta, r1)) hint = Hint("h_delta1", env, frozenset([delta]), symbs) hint.set_locs([loc0]) res.append(hint) loc0 = Location(env, mgr.GE(s2r, i0)) loc0.set_progress(0, mgr.Equals(x_s2r, i1)) hint = Hint("h_s2r1", env, frozenset([s2r]), symbs) hint.set_locs([loc0]) res.append(hint) loc0 = Location(env, mgr.GE(r2s, i0)) loc0.set_progress(0, mgr.Equals(x_r2s, i1)) hint = Hint("h_r2s1", env, frozenset([r2s]), symbs) hint.set_locs([loc0]) res.append(hint) loc0 = Location(env, s_l) loc0.set_progress(1, mgr.Not(x_s_l)) loc1 = Location(env, mgr.Not(s_l)) loc1.set_progress(0, x_s_l) hint = Hint("h_s_l1", env, frozenset([s_l]), symbs) hint.set_locs([loc0, loc1]) res.append(hint) loc0 = Location(env, s_evt) loc0.set_progress(1, mgr.Not(x_s_evt)) loc1 = Location(env, mgr.Not(s_evt)) loc1.set_progress(0, x_s_evt) hint = Hint("h_s_evt1", env, frozenset([s_evt]), symbs) hint.set_locs([loc0, loc1]) res.append(hint) loc0 = Location(env, mgr.GE(s_msg_id, i0)) loc0.set_progress(0, mgr.Equals(x_s_msg_id, mgr.Plus(s_msg_id, i1))) hint = Hint("h_s_msg_id1", env, frozenset([s_msg_id]), symbs) hint.set_locs([loc0]) res.append(hint) loc0 = Location(env, mgr.GE(s_timeout, r0)) loc0.set_progress(0, mgr.Equals(x_s_timeout, mgr.Plus(s_timeout, r1))) hint = Hint("h_s_timeout1", env, frozenset([s_timeout]), symbs) hint.set_locs([loc0]) res.append(hint) loc0 = Location(env, mgr.GE(s_c, r0)) loc0.set_progress(0, mgr.Equals(x_s_c, mgr.Plus(s_c, r1))) hint = Hint("h_s_c1", env, frozenset([s_c]), symbs) hint.set_locs([loc0]) res.append(hint) loc0 = Location(env, r_l) loc0.set_progress(1, mgr.Not(x_r_l)) loc1 = Location(env, mgr.Not(r_l)) loc1.set_progress(0, x_r_l) hint = Hint("h_r_l1", env, frozenset([r_l]), symbs) hint.set_locs([loc0, loc1]) res.append(hint) loc0 = Location(env, mgr.GE(delta, r0)) loc0.set_progress(0, mgr.Equals(x_delta, mgr.Plus(delta, r1))) hint = Hint("h_delta2", env, frozenset([delta]), symbs) hint.set_locs([loc0]) res.append(hint) return frozenset(res)
dd054ec24319a017c4b3be563688e9b6f157981c
3c751e5bebd9ee3602b41a41d0fdba968eaadf38
/08월/08_16/4874.py
8da111dfd2d6d4e274b27f47fca1a0374f418c72
[]
no_license
ohsean93/algo
423b25e52f638540039bd6e57706f45ab71871c8
8f4e20a0d955610427db9273d1eb138c7ae1e534
refs/heads/master
2020-06-27T01:58:36.484367
2019-11-29T00:04:02
2019-11-29T00:04:02
199,815,006
0
1
null
null
null
null
UTF-8
Python
false
false
966
py
import sys sys.stdin = open("input.txt", "r") T = int(input()) for test_case in range(T): num_list = [0] * 129 operator = ('+', '-', '/', '*') i = -1 for char in input().split(): if char.isdigit(): i += 1 num_list[i] = int(char) elif char == '.': continue elif char in operator: if i < 1: ans = 'error' break if char == '+': num_list[i-1] += num_list[i] i -= 1 elif char == '-': num_list[i-1] -= num_list[i] i -= 1 elif char == '/': num_list[i-1] //= num_list[i] i -= 1 elif char == '*': num_list[i-1] *= num_list[i] i -= 1 else: if i == 0: ans = num_list[0] else: ans = 'error' print('#{} {}'.format(test_case+1, ans))
a0c3fb9611547ff89d8af7bcbaed1d9775f86348
8e115d2de6e7904d92a7a81bc8232fa3bb4c04f7
/s_vae_pytorch/examples/mnist.py
7dc2a5792c50f29482d7afce0c0dc587831b4835
[ "MIT" ]
permissive
P4ppenheimer/circle_slice_flow_and_variational_determinant_estimator
9c9ef8fd2cee1175ae33fe91cced7d824645a9be
6d42c7641e9e060802b69c8c9a89aeb02c46c922
refs/heads/main
2023-02-06T03:09:09.548158
2020-12-31T12:11:10
2020-12-31T12:11:10
322,137,213
5
0
null
null
null
null
UTF-8
Python
false
false
7,373
py
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.utils.data from torchvision import datasets, transforms from collections import defaultdict from hyperspherical_vae.distributions import VonMisesFisher from hyperspherical_vae.distributions import HypersphericalUniform DIM_MNIST = 784 train_loader = torch.utils.data.DataLoader(datasets.MNIST('./data', train=True, download=True, transform=transforms.ToTensor()), batch_size=64, shuffle=True) test_loader = torch.utils.data.DataLoader(datasets.MNIST('./data', train=False, download=True, transform=transforms.ToTensor()), batch_size=64) class ModelVAE(torch.nn.Module): def __init__(self, h_dim, z_dim, activation=F.relu, distribution='normal'): """ ModelVAE initializer :param h_dim: dimension of the hidden layers :param z_dim: dimension of the latent representation :param activation: callable activation function :param distribution: string either `normal` or `vmf`, indicates which distribution to use """ super(ModelVAE, self).__init__() self.z_dim, self.activation, self.distribution = z_dim, activation, distribution # 2 hidden layers encoder self.fc_e0 = nn.Linear(784, h_dim * 2) self.fc_e1 = nn.Linear(h_dim * 2, h_dim) if self.distribution == 'normal': # compute mean and std of the normal distribution self.fc_mean = nn.Linear(h_dim, z_dim) self.fc_var = nn.Linear(h_dim, z_dim) elif self.distribution == 'vmf': # compute mean and concentration of the von Mises-Fisher self.fc_mean = nn.Linear(h_dim, z_dim) self.fc_var = nn.Linear(h_dim, 1) else: raise NotImplemented # 2 hidden layers decoder self.fc_d0 = nn.Linear(z_dim, h_dim) self.fc_d1 = nn.Linear(h_dim, h_dim * 2) self.fc_logits = nn.Linear(h_dim * 2, DIM_MNIST) def encode(self, x): # 2 hidden layers encoder x = self.activation(self.fc_e0(x)) x = self.activation(self.fc_e1(x)) if self.distribution == 'normal': # compute mean and std of the normal distribution z_mean = self.fc_mean(x) z_var = F.softplus(self.fc_var(x)) elif self.distribution == 'vmf': # compute mean and concentration of the von Mises-Fisher z_mean = self.fc_mean(x) z_mean = z_mean / z_mean.norm(dim=-1, keepdim=True) # the `+ 1` prevent collapsing behaviors z_var = F.softplus(self.fc_var(x)) + 1 else: raise NotImplemented return z_mean, z_var def decode(self, z): x = self.activation(self.fc_d0(z)) x = self.activation(self.fc_d1(x)) x = self.fc_logits(x) return x def reparameterize(self, z_mean, z_var): if self.distribution == 'normal': q_z = torch.distributions.normal.Normal(z_mean, z_var) p_z = torch.distributions.normal.Normal(torch.zeros_like(z_mean), torch.ones_like(z_var)) elif self.distribution == 'vmf': q_z = VonMisesFisher(z_mean, z_var) p_z = HypersphericalUniform(self.z_dim - 1) else: raise NotImplemented return q_z, p_z def forward(self, x): z_mean, z_var = self.encode(x) q_z, p_z = self.reparameterize(z_mean, z_var) z = q_z.rsample() x_ = self.decode(z) return (z_mean, z_var), (q_z, p_z), z, x_ def log_likelihood(model, x, n=10): """ :param model: model object :param optimizer: optimizer object :param n: number of MC samples :return: MC estimate of log-likelihood """ z_mean, z_var = model.encode(x.reshape(-1, 784)) q_z, p_z = model.reparameterize(z_mean, z_var) z = q_z.rsample(torch.Size([n])) x_mb_ = model.decode(z) log_p_z = p_z.log_prob(z) if model.distribution == 'normal': log_p_z = log_p_z.sum(-1) log_p_x_z = -nn.BCEWithLogitsLoss(reduction='none')(x_mb_, x.reshape(-1, 784).repeat((n, 1, 1))).sum(-1) log_q_z_x = q_z.log_prob(z) if model.distribution == 'normal': log_q_z_x = log_q_z_x.sum(-1) return ((log_p_x_z + log_p_z - log_q_z_x).t().logsumexp(-1) - np.log(n)).mean() def train(model, optimizer): for i, (x_mb, y_mb) in enumerate(train_loader): optimizer.zero_grad() # dynamic binarization x_mb = (x_mb > torch.distributions.Uniform(0, 1).sample(x_mb.shape)).float() _, (q_z, p_z), _, x_mb_ = model(x_mb.reshape(-1, 784)) print('q_z',q_z) print('p_z',p_z) print('x_mb',x_mb) print('x_mb_',x_mb_) # BCEWithLogits is BCE with sigmoid and # Loss_BCE(x,y) = y log[sigmoid(x)] + (1 - y) log[1 - sigmoid(x)] # that means the output of the model, which is the decoder, gets mapped to x, which itself gets mapped to [0,1] via sigmoid loss_recon = nn.BCEWithLogitsLoss(reduction='none')(x_mb_, x_mb.reshape(-1, 784)).sum(-1).mean() if model.distribution == 'normal': loss_KL = torch.distributions.kl.kl_divergence(q_z, p_z).sum(-1).mean() elif model.distribution == 'vmf': loss_KL = torch.distributions.kl.kl_divergence(q_z, p_z).mean() else: raise NotImplemented loss = loss_recon + loss_KL loss.backward() optimizer.step() def test(model, optimizer): print_ = defaultdict(list) for x_mb, y_mb in test_loader: # dynamic binarization x_mb = (x_mb > torch.distributions.Uniform(0, 1).sample(x_mb.shape)).float() _, (q_z, p_z), _, x_mb_ = model(x_mb.reshape(-1, 784)) print_['recon loss'].append(float(nn.BCEWithLogitsLoss(reduction='none')(x_mb_, x_mb.reshape(-1, 784)).sum(-1).mean().data)) if model.distribution == 'normal': print_['KL'].append(float(torch.distributions.kl.kl_divergence(q_z, p_z).sum(-1).mean().data)) elif model.distribution == 'vmf': print_['KL'].append(float(torch.distributions.kl.kl_divergence(q_z, p_z).mean().data)) else: raise NotImplemented print_['ELBO'].append(- print_['recon loss'][-1] - print_['KL'][-1]) print_['LL'].append(float(log_likelihood(model, x_mb).data)) print({k: np.mean(v) for k, v in print_.items()}) # hidden dimension and dimension of latent space H_DIM = 128 Z_DIM = 5 # normal VAE modelN = ModelVAE(h_dim=H_DIM, z_dim=Z_DIM, distribution='normal') optimizerN = optim.Adam(modelN.parameters(), lr=1e-3) print('##### Normal VAE #####') # training for 1 epoch train(modelN, optimizerN) # test test(modelN, optimizerN) print() # hyper-spherical VAE modelS = ModelVAE(h_dim=H_DIM, z_dim=Z_DIM + 1, distribution='vmf') optimizerS = optim.Adam(modelS.parameters(), lr=1e-3) print('##### Hyper-spherical VAE #####') # training for 1 epoch train(modelS, optimizerS) # test test(modelS, optimizerS)
1d2d063d4047b17f3d2b4865e4865ecf6051c468
84c04d74c934cf6e857617745589e974a2d3d733
/hang man.py
b19161e1c7946d738e6a01ceec7e611ecaafbc33
[]
no_license
Ryan525600/hangman
f68466517fec7c614a95d04b6a3be83f4ca37c00
80b33d7127f4766a49a8543f27982ccdd2f4dfa8
refs/heads/master
2023-02-13T06:45:36.395184
2021-01-07T14:08:12
2021-01-07T14:08:12
325,546,519
0
0
null
null
null
null
UTF-8
Python
false
false
1,708
py
import random HANGMAN_PICS = [''' +---+ | | | ===''', ''' +---+ O | | | ===''', ''' +---+ O | | | | ===''', ''' +---+ O | /| | | ===''', ''' +---+ O | /|\ | | ===''', ''' +---+ O | /|\ | / | ===''', ''' +---+ O | /|\ | / \ | ==='''] words = 'ant baboon badger bat bear beaver camel cat clam cobra cougar coyote crow deer dog donkey duck eagle ferret fox frog goat goose hawk lion lizard llama mole monkey moose mouse mule newt otter owl panda parrot pigeon python rabbit ram rat raven rhino salmon seal shark sheep skunk sloth snake spider stork swan tiger toad trout turkey turtle weasel whale wolf wombat zebra'.split() #스플릿을 사용하여 단어를 편리하게 입력했다 def getRandomWord(wordlist): # This function returns a random string from the passed list of strings. wordIndex = random.randint(0, len(wordList) - 1) #랜덤으로 수를 하나 뽑았다. 다만, 배열은 0부터 시작하니 1을 빼주어 배열과 수를 일치시켰다. wordList에는 인풋이 들어올거고, 그 인풋에서 1을 빼서 배열과 맞출거다. 페러미터는 곧 인풋. return wordList[wordIndex] #wordIndex가 랜덤으로 수를 뽑아 글자수를 리턴해 주었다. def displayBoard(missedLetters, correctLetters, secretWord): print(HANGMAN_PICS[len (missedLetters)]) print() print('Missed letters:', end=' ') for letter in missedLetters: print(letter, end=' ') print() #this for loop is going to display the missed letters.
74e3646c2f02af5d9e071403693416be9eef3e59
31d9f7debbc2e1e42df5d1c1dc6ef963ea690165
/archiv_wgan_GP.py
b199ad62b30a4dde7eb6ffa826cc0692f7639f76
[]
no_license
im-Kitsch/DLMB
d044fc0b97b73b570ada44b83e9f295c1d31e03b
6144d673c63dc179b0b0a4603fd5b361c660f6f4
refs/heads/main
2023-03-07T11:14:16.581536
2021-02-22T19:20:22
2021-02-22T19:20:22
323,430,276
0
0
null
null
null
null
UTF-8
Python
false
false
13,026
py
import argparse import torchvision import torch from torch.utils import data from tqdm import tqdm from torch.utils.tensorboard import SummaryWriter import torchsummary import util.dataset_util IF_CUDA = True if torch.cuda.is_available() else False DEVICE = torch.device('cuda') if IF_CUDA else torch.device('cpu') TRANS_MEAN = [0.485, 0.456, 0.406] TRANS_STD = [0.229, 0.224, 0.225] # src, experimental setting: # https://github.com/facebookarchive/fb.resnet.torch/blob/master/datasets/imagenet.lua#L69 def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: torch.nn.init.normal_(m.weight, 0.0, 0.02) elif classname.find('BatchNorm') != -1: torch.nn.init.normal_(m.weight, 1.0, 0.02) torch.nn.init.zeros_(m.bias) class ConvDiscriminator(torch.nn.Module): def __init__(self, n_ch, img_size): super(ConvDiscriminator, self).__init__() self.n_ch = n_ch self.img_size = img_size self.main = torch.nn.Sequential( # input is (n_ch) x 64 x 64 torch.nn.Conv2d(n_ch, img_size, 4, 2, 1, bias=False), torch.nn.LeakyReLU(0.2, inplace=True), # state size. (img_size) x 32 x 32 torch.nn.Conv2d(img_size, img_size * 2, 4, 2, 1, bias=False), torch.nn.BatchNorm2d(img_size * 2), torch.nn.LeakyReLU(0.2, inplace=True), # state size. (img_size*2) x 16 x 16 torch.nn.Conv2d(img_size * 2, img_size * 4, 4, 2, 1, bias=False), torch.nn.BatchNorm2d(img_size * 4), torch.nn.LeakyReLU(0.2, inplace=True), # state size. (img_size*4) x 8 x 8 torch.nn.Conv2d(img_size * 4, img_size * 8, 4, 2, 1, bias=False), torch.nn.BatchNorm2d(img_size * 8), torch.nn.LeakyReLU(0.2, inplace=True), # state size. (img_size*8) x 4 x 4 torch.nn.Conv2d(img_size * 8, 1, 4, 1, 0, bias=False), # torch.nn.Sigmoid() ) # self.main_activation = torch.nn.Sigmoid() return def forward(self, x): return self.main(x).view(-1, 1) class ConvGenerator(torch.nn.Module): def __init__(self, n_ch, img_size, z_dim): super(ConvGenerator, self).__init__() self.n_ch = n_ch self.img_size = img_size self.z_dim = z_dim self.main = torch.nn.Sequential( # input is Z, going into a convolution torch.nn.ConvTranspose2d(z_dim, img_size * 8, 4, 1, 0, bias=False), torch.nn.BatchNorm2d(img_size * 8), torch.nn.ReLU(True), # state size. (img_size*8) x 4 x 4 torch.nn.ConvTranspose2d(img_size * 8, img_size * 4, 4, 2, 1, bias=False), torch.nn.BatchNorm2d(img_size * 4), torch.nn.ReLU(True), # state size. (img_size*4) x 8 x 8 torch.nn.ConvTranspose2d(img_size * 4, img_size * 2, 4, 2, 1, bias=False), torch.nn.BatchNorm2d(img_size * 2), torch.nn.ReLU(True), # state size. (img_size*2) x 16 x 16 torch.nn.ConvTranspose2d(img_size * 2, img_size, 4, 2, 1, bias=False), torch.nn.BatchNorm2d(img_size), torch.nn.ReLU(True), # state size. (img_size) x 32 x 32 torch.nn.ConvTranspose2d(img_size, n_ch, 4, 2, 1, bias=False), torch.nn.Tanh() # state size. (n_ch) x 64 x 64 ) return def forward(self, noise): return self.main(noise) class WGanGP(torch.nn.Module): def __init__(self, data_name, n_ch, img_size, z_dim, lr_g, lr_d, lr_beta1, lr_beta2, d_step): super(WGanGP, self).__init__() self.data_name = data_name self.img_shape = (n_ch, img_size, img_size) self.z_dim = z_dim self.d_step = d_step self.gp_lambda = 10. self.conv_gen = ConvGenerator(n_ch=n_ch, img_size=img_size, z_dim=z_dim) self.conv_dis = ConvDiscriminator(n_ch=n_ch, img_size=img_size) # TODO not sure if it is needed to use weight init, but seems better than without init self.conv_gen.main.apply(weights_init) # TODO to find a better method to initialization instead of using main self.conv_dis.main.apply(weights_init) if IF_CUDA: self.conv_gen.cuda() self.conv_dis.cuda() self.opt_G = torch.optim.Adam(self.conv_gen.parameters(), lr=lr_g, betas=(lr_beta1, lr_beta2)) self.opt_D = torch.optim.Adam(self.conv_dis.parameters(), lr=lr_d, betas=(lr_beta1, lr_beta2)) self.criterion = torch.nn.BCELoss() return def train_net(self, train_loader, n_epoc): writer = SummaryWriter(comment=f'_WGAN_GP_{self.data_name}') # TODO to add hyper parmeters test_noise = self.generate_noise(64) n_sample = len(train_loader.dataset) for i in range(n_epoc): epoc_l_d, epoc_l_g, epoc_score_p, epoc_score_f1, epoc_score_f2 = 0., 0., 0., 0., 0. self.conv_gen.train(), self.conv_dis.train() with tqdm(total=len(train_loader), desc=f"epoc: {i + 1}") as pbar: for k, (real_img, _) in enumerate(train_loader): if IF_CUDA: real_img = real_img.cuda() d_loss, p_score, f_score1 = self.train_d_step(real_img) g_loss, f_score2 = self.train_g_step(real_img.shape[0]) batch_size = real_img.shape[0] epoc_l_d += d_loss * batch_size epoc_l_g += g_loss * batch_size epoc_score_p += p_score * batch_size epoc_score_f1 += f_score1 * batch_size epoc_score_f2 += f_score2 * batch_size pbar.set_postfix({"d_loss": d_loss, "g_loss": g_loss, "p_score": p_score, "f_score D": f_score1, 'G': f_score2}) pbar.update() epoc_l_d /= n_sample epoc_l_g /= n_sample epoc_score_p /= n_sample epoc_score_f1 /= n_sample epoc_score_f2 /= n_sample pbar.set_postfix({"epoch: d_loss": epoc_l_d, "g_loss": epoc_l_g, "p_score": epoc_score_p, "f_score D": epoc_score_f1, 'G': epoc_score_f2}) writer.add_scalar('loss/generator', epoc_l_g, i) writer.add_scalar('loss/discriminator', epoc_l_d, i) writer.add_scalar('score/real', epoc_score_p, i) writer.add_scalar('score/fake_D', epoc_score_f1, i) writer.add_scalar('score/fake_G', epoc_score_f2, i) self.conv_gen.eval(), self.conv_dis.eval() test_img = self.conv_gen(test_noise) test_img = (test_img + 1.0) / 2.0 # Note that this is important to recover the range test_img = test_img.reshape(64, *self.img_shape) writer.add_images('img', test_img, i + 1) writer.close() return def train_g_step(self, batch_size): fake = self.generate_fake(batch_size) lbl = torch.ones(batch_size, device=DEVICE) p_f = self.conv_dis(fake) loss = -p_f.mean() # loss = self.criterion(p_f.reshape(-1), lbl) self.opt_G.zero_grad() loss.backward() self.opt_G.step() return loss.item(), p_f.mean().item() def train_d_step(self, data_real): d_step = self.d_step batch_size = data_real.shape[0] score_real, score_fake, d_loss = 0., 0., 0. for _d in range(d_step): data_fake = self.generate_fake(batch_size).detach() mix_noise = torch.rand(batch_size, 1, 1, 1).cuda() data_mixed = (1-mix_noise) * data_real + mix_noise * data_fake data_mixed = data_mixed.detach() data_mixed.requires_grad_() p_f = self.conv_dis(data_fake) p_p = self.conv_dis(data_real) p_mix = self.conv_dis(data_mixed) loss_1 = p_f - p_p # gradient penalty grad_p_x = torch.autograd.grad(p_mix.sum(), data_mixed, retain_graph=True, create_graph=True)[0] # p_mix.sum(), trick to cal \par y_i / \parx_i independentl assert grad_p_x.shape == data_mixed.shape # print(grad_p_x.shape, data_mixed.shape) grad_norm = torch.sqrt(grad_p_x.square().sum(axis=(1, 2, 3)) + 1e-14) loss_2 = self.gp_lambda * torch.square(grad_norm - 1.) loss = loss_1 + loss_2 loss = loss.mean() self.opt_D.zero_grad() loss.backward() self.opt_D.step() score_real += p_p.mean().item() score_fake += p_f.mean().item() d_loss += loss.item() return d_loss / d_step, score_real / d_step, score_fake / d_step # TODO different method to generate noise def generate_noise(self, batch_size): return torch.randn(batch_size, self.z_dim, 1, 1, device=DEVICE) def generate_fake(self, batch_size): return self.conv_gen(self.generate_noise(batch_size)) def main(args): if args.data == 'MNIST': trans = torchvision.transforms.Compose( [torchvision.transforms.Resize(args.img_size), torchvision.transforms.ToTensor(), torchvision.transforms.Normalize([0.5], [0.5])]) elif args.data == 'CIFAR10': trans = torchvision.transforms.Compose( [torchvision.transforms.Resize(args.img_size), torchvision.transforms.ToTensor(), torchvision.transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]) elif args.data == 'HAM10000': if args.data_aug is True: trans = torchvision.transforms.Compose([ torchvision.transforms.RandomResizedCrop(size=(args.img_size, args.img_size), scale=(0.7, 1.0), ratio=(4 / 5, 5 / 4), interpolation=2), torchvision.transforms.RandomHorizontalFlip(p=0.5), torchvision.transforms.RandomVerticalFlip(p=0.5), torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(TRANS_MEAN, TRANS_STD) ]) else: trans = torchvision.transforms.Compose([ torchvision.transforms.Resize((args.img_size, args.img_size)), torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(TRANS_MEAN, TRANS_STD) # torchvision.transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]) ]) else: raise Exception('dataset not right') train_data, _, img_shape = util.dataset_util.load_dataset( dataset_name=args.data, root=args.root, transform=trans, csv_file=args.csv_file) n_ch, img_size, _ = img_shape train_loader = data.DataLoader(dataset=train_data, batch_size=args.batch_size, shuffle=True, drop_last=False, num_workers=4, pin_memory=True) dc_gan = WGanGP(data_name=args.data, n_ch=n_ch, img_size=img_size, z_dim=args.z_dim, lr_g=args.lr_g, lr_d=args.lr_d, lr_beta1=args.lr_beta1, lr_beta2=args.lr_beta2, d_step=args.d_step) torchsummary.summary(dc_gan.conv_dis, input_size=dc_gan.img_shape, batch_size=-1, device='cuda' if IF_CUDA else 'cpu') torchsummary.summary(dc_gan.conv_gen, input_size=(dc_gan.z_dim, 1, 1), batch_size=-1, device='cuda' if IF_CUDA else 'cpu') dc_gan.train_net(train_loader=train_loader, n_epoc=args.n_epoc) return if __name__ == '__main__': parser = argparse.ArgumentParser(description="parse args") parser.add_argument('--data', required=True, help='MNIST|CIFAR10|HAM10000') parser.add_argument('--root', default='/home/yuan/Documents/datas/', help='root') parser.add_argument('--csv-file', default='/home/yuan/Documents/datas/HAM10000/HAM10000_metadata.csv') parser.add_argument('--n-epoc', default=25, type=int) parser.add_argument('--d-step', default=1, type=int) parser.add_argument('--batch-size', default=256, type=int) parser.add_argument('--z-dim', default=64, type=int, help='noise shape') parser.add_argument('--lr-g', default=3e-4, type=float) parser.add_argument('--lr-d', default=3e-4, type=float) parser.add_argument('--lr-beta1', default=0.5, type=float) parser.add_argument('--lr-beta2', default=0.999, type=float) # img_size could not be changed here parser.add_argument('--img-size', default=64, type=int, help='resize the img size') parser.add_argument('--data-percentage', default=1.0, type=float) parser.add_argument('--data-aug', action='store_true', help='if use data augmentation or not') para_args = parser.parse_args() main(para_args) # TODO torchsummarpy ; catch ctl-c; recover from last(writer path, model, optimizer, hyperparameter) hyperparameter
8b08cdeae9a7ca5cd9efe3869115cdb0b331fcc9
7a5a78ede21be8e78a19eb1e48797fa6d6e8642f
/detect_rtsp.py
49b0ea43532112bdb6045a922fc0e6da1fe24de7
[ "MIT" ]
permissive
abc873693/yolov3-tf2
cf8230af5b6683817d6064bc86b7dc98f3e4453f
24ab3eccf55e8ed108fc83335c1ca12a998ff3a7
refs/heads/master
2021-10-26T05:31:55.368987
2019-12-03T02:24:47
2019-12-03T02:24:47
201,893,053
0
0
MIT
2019-12-02T02:17:31
2019-08-12T08:52:15
Python
UTF-8
Python
false
false
2,600
py
import time from absl import app, flags, logging from absl.flags import FLAGS import cv2 import tensorflow as tf from yolov3_tf2.models import ( YoloV3, YoloV3Tiny ) from yolov3_tf2.dataset import transform_images from yolov3_tf2.utils import draw_outputs import os import numpy as np flags.DEFINE_string('classes', './data/coco.names', 'path to classes file') flags.DEFINE_string('weights', './checkpoints/yolov3.tf', 'path to weights file') flags.DEFINE_boolean('tiny', False, 'yolov3 or yolov3-tiny') flags.DEFINE_integer('url', 'rtsp://192.168.100.10/h264/ch1/main/av_stream', 'rtsp url') flags.DEFINE_integer('size', 416, 'resize images to') flags.DEFINE_string('video', './data/video.mp4', 'path to video file or number for webcam)') def main(_argv): #%% if FLAGS.tiny: yolo = YoloV3Tiny() else: yolo = YoloV3() yolo.load_weights(FLAGS.weights) logging.info('weights loaded') class_names = [c.strip() for c in open(FLAGS.classes).readlines()] logging.info('classes loaded') times = [] cap = cv2.VideoCapture(FLAGS.url) out = cv2.VideoWriter('appsrc ! videoconvert ! ' 'x264enc noise-reduction=10000 speed-preset=ultrafast tune=zerolatency ! ' 'rtph264pay config-interval=1 pt=96 !' 'tcpserversink host=140.117.169.194 port=5000 sync=false', 0, 25, (640, 480)) out_path = './out/' if not os.path.exists(out_path): os.makedirs(out_path) #%% while(cap.isOpened()): ret, img = cap.read() if cv2.waitKey(20) & 0xFF == ord('q'): break img_in = tf.expand_dims(img, 0) img_in = transform_images(img_in, FLAGS.size) t1 = time.time() boxes, scores, classes, nums = yolo.predict(img_in) t2 = time.time() img = draw_outputs(img, (boxes, scores, classes, nums), class_names) img = cv2.putText(img, "Time: {:.2f}ms".format(sum(times)/len(times)*1000), (0, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2) # if(nums > 0): # cv2.imwrite(out_path + 'frame{0}.jpg'.format(index), img) frameOfWindows = cv2.resize( img, (800, 600), interpolation=cv2.INTER_CUBIC) out.write(frameOfWindows) cv2.imshow('output', frameOfWindows) if cv2.waitKey(1) == ord('q'): break cap.release() if __name__ == '__main__': try: app.run(main) except SystemExit: pass
7f7a45fffcfb19dd0b215f1bc5b2c2fa35e9030b
c8e82c528dfe45d5c8beb0bcebd70968ab76fec0
/ftp/mount.py
a3c29ac28b4ef0b0117f98ff7e638b54db02e2d9
[]
no_license
BeatifulLife/otatool
938d26480ada3bc087f7f6d06fdf96c30dbc24cc
54212e6ea908efb1a8b75584d0d12b22913eafc1
refs/heads/master
2021-05-19T05:40:23.087747
2020-03-31T09:13:57
2020-03-31T09:13:57
251,551,629
0
0
null
null
null
null
UTF-8
Python
false
false
649
py
from otautil import * class Mount: 'sudo mount -t cifs -o ro,username=xuzhaoyou,password=mobile#3 //192.168.8.206/data/data server' def __init__(self,localdir,server,username,password): self.server=server self.localdir=localdir self.username=username self.password=password def doMount(self): assert(self.server is not None) assert(self.localdir is not None) assert(self.username is not None) assert(self.password is not None) _,recode=runCommand("sudo mount -t cifs -o ro,username="+self.username+",password=" + self.password + " " + self.server + " " + self.localdir) if recode == 0: return True else: return False
4d5c6fccf789eecb33e8337992743ceee6b298af
dfc292644081c4a12a8c4ab407cf90a2c2dd9a48
/travelpro/travelpro/wsgi.py
bb09e6633dd334633017544dbd6c731ef6015940
[]
no_license
safirmeeyana/safirproject
90326bcb58f0557dfc7fbbf877bb5d44ecb32201
7c393ac7597a3347a456befd94c9b2633d6ae4e7
refs/heads/master
2023-02-13T06:31:32.673452
2021-01-14T06:22:42
2021-01-14T06:22:42
329,524,725
1
0
null
null
null
null
UTF-8
Python
false
false
395
py
""" WSGI config for travelpro project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'travelpro.settings') application = get_wsgi_application()
745fc542dfa60b44270bb9f54dbb2a5d6b4dbbfa
7f398550c5676aa917198f01d2ccc1f59fe047a0
/coffee.py
1ef729d116dee5758aa9591e4065fe8615793557
[]
no_license
lsteiner9/python-chapters-1-to-3
0ee38e2e44389e67c4e85c0aaa10b19850bb9ee3
8b830135d5ee41d9a28915705545a38e710db2af
refs/heads/master
2023-03-17T20:53:33.779359
2021-03-18T02:00:34
2021-03-18T02:00:34
348,909,438
0
0
null
null
null
null
UTF-8
Python
false
false
239
py
# coffee.py def main(): print("This program calculates the cost of a coffee order.") pounds = float(input("Enter the number of pounds of coffee ordered: ")) print("The price of this order is:", pounds * 11.36 + 1.50) main()
d62187ceef71a3b7a888fd8d1a7051f01e50144c
d2d4b3e707a483b25c741396069923fcccccb993
/smartmarket/shops/migrations/0001_initial.py
bc6f3f54e32f679a21d989a2f3a82829ec0a0e9f
[]
no_license
DiegoRinconC/tesis
7cda2f47190fc31b325825e632134211105d405f
2daf738e2101f27253c6e90fe1c71d2f8f80cf2d
refs/heads/master
2020-04-02T03:18:40.700140
2018-10-23T03:38:47
2018-10-23T03:38:47
153,957,609
0
0
null
null
null
null
UTF-8
Python
false
false
1,601
py
# Generated by Django 2.1.2 on 2018-10-23 01:43 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('users', '0002_auto_20181020_1816'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='BrandShop', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('brand_shop', models.CharField(max_length=200)), ('modified_date', models.DateField(auto_now=True)), ('modified_by', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Shop', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('shop', models.CharField(max_length=200)), ('modified_date', models.DateField(auto_now=True)), ('brand_shop', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='shops.BrandShop')), ('city', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='users.City')), ('modified_by', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)), ], ), ]
4425abce78cc5ab8f0241155c9e5248cd9b9b861
8cacef299fbbedd6e46ec02d274b1baa82433ef8
/DriverFiles/load_test_session.py
7c72d2936131ac52e3ab793382bd9ff079eb79f1
[]
no_license
PotionSell/Buzsaki-Data-Import
37cdf4ccf440b2e60153d3aa1435611709ea987a
af5f5db900d76bffccfdc5e835425060fb65d69b
refs/heads/master
2021-01-19T04:25:26.993240
2016-07-11T15:44:37
2016-07-11T15:44:37
60,122,373
0
0
null
null
null
null
UTF-8
Python
false
false
662
py
execfile('BuzsakiSession.py') execfile('write_nwb.py') #session = Session('ec013.156') #session.load_LFPdata() #lfp = session.get_shankLFP(0, True) #csd = session.get_CSD(4) write_nwb('ec012ec.356') write_nwb('ec013.156') write_nwb('ec013.157') write_nwb('ec013.756') write_nwb('ec013.965') write_nwb('ec014.468') write_nwb('ec014.639') write_nwb('ec016.234') write_nwb('ec016.749') #os.chdir(cwd) #execfile('dict_to_arr.py') #execfile('plot_Signal.py') #execfile('filter_LFP.py') #execfile('hilbert.py') #execfile('dict_to_arr.py') #t = session.LFP_timestamps #filt = filter_LFP(t, lfp, session.LFP_rate, 'theta', False) #phase, amp, hilbData = hilbert(filt)
56b674ad741b5ec115b231c05945478a3cee3b59
9a104370627671e0549913194c79329920b76342
/attention_guidance/ag_models/wandb_utils.py
98591d8111044cd210b4138bff1894e0d52a282b
[ "Apache-2.0", "MIT" ]
permissive
ameet-1997/AttentionGuidance
8dcd115ce6be0752de108b68cd798f6200fa62d5
8e1e6c3855125fe8f1485fbe57d51285edebfade
refs/heads/main
2023-02-15T08:08:38.248050
2023-02-08T03:07:03
2023-02-08T03:07:03
300,993,462
9
1
null
null
null
null
UTF-8
Python
false
false
580
py
import wandb import os def wandb_init_setup(args): ''' Uses API key and sets initial config and hyperparameters ''' # Ameet's wandb key os.environ["WANDB_API_KEY"] = "a8d4de02e5bbee944cdfa143d1dba8f1a7b63fb4" os.environ["WANDB_WATCH"] = "false" os.environ["WANDB_PROJECT"] = args.wandb_project os.environ["WANDB_NAME"] = args.wandb_name if args.disable_wandb: os.environ["WANDB_DISABLED"] = 'true' # # Initialize with hyperparameters and project name # wandb.init(config=args, name=args.wandb_name, project=args.wandb_project)
6466a29180d397b35f5306a979bfa235487516c3
06a50cfded23b760d5b2a5ae7d5c4761ae2d4dc8
/auto_upgrade.py
2b5eb7a276e28b168e17571ac64e9aeaa69017d4
[ "Apache-2.0" ]
permissive
spencerzhang91/coconuts-on-fire
b0655b3dd2b310b5e62f8cef524c6fddb481e758
407d61b3583c472707a4e7b077a9a3ab12743996
refs/heads/master
2021-09-21T07:37:07.879409
2018-08-22T03:24:36
2018-08-22T03:24:36
null
0
0
null
null
null
null
UTF-8
Python
false
false
253
py
#! /usr/local/bin/python3 # can not be used on windows due to line end difference. import pip from subprocess import call for dist in pip.get_installed_distributions(): call("pip3 install --upgrade --no-cache-dir " + dist.project_name, shell=True)
e73c0dcd93ba153ddfdbac5cdf8ed995b6b030ab
fbb3a1843b541ee118d4ba686552c063152fb3b2
/sorting.py
73a2d82e7c52c0228e4098e54217b6435193a240
[]
no_license
yusmasv/Quick---Selection-Sort-Visualization
2bf661ab8ea31b77826a0664ab3bdba995b37a67
ee82b345d43dcd1105db323bb4ee7a9282fe39ed
refs/heads/main
2023-01-29T09:21:39.972824
2020-12-09T12:25:25
2020-12-09T12:25:25
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,423
py
import random import time import matplotlib.pyplot as plt import matplotlib.animation as animation plt.style.use('dark_background') def swap(A, i, j): if i != j: A[i], A[j] = A[j], A[i] def quicksort(A, start, end): """In-place quicksort.""" if start >= end: return pivot = A[end] pivotIdx = start for i in range(start, end): if A[i] < pivot: swap(A, i, pivotIdx) pivotIdx += 1 yield A swap(A, end, pivotIdx) yield A yield from quicksort(A, start, pivotIdx - 1) yield from quicksort(A, pivotIdx + 1, end) def selectionsort(A): """In-place selection sort.""" if len(A) == 1: return for i in range(len(A)): # Find minimum unsorted value. minVal = A[i] minIdx = i for j in range(i, len(A)): if A[j] < minVal: minVal = A[j] minIdx = j yield A swap(A, i, minIdx) yield A if __name__ == "__main__": # Get user input to determine range of integers (1 to N) and desired # sorting method (algorithm). N = int(input("Enter number of integers: ")) method_msg = "Enter sorting method:\n(q)uick\n(s)election\n" method = input(method_msg) # Build and randomly shuffle list of integers. A = [x + 1 for x in range(N)] random.seed(time.time()) random.shuffle(A) # Get appropriate generator to supply to matplotlib FuncAnimation method. if method == "q": title = "Quicksort" generator = quicksort(A, 0, N - 1) else: title = "Selection sort" generator = selectionsort(A) fig, ax = plt.subplots() ax.set_title(title) bar_rects = ax.bar(range(len(A)), A, align="edge") ax.set_xlim(0, N) ax.set_ylim(0, int(1.07 * N)) text = ax.text(0.02, 0.95, "", transform=ax.transAxes) iteration = [0] def update_fig(A, rects, iteration): for rect, val in zip(rects, A): rect.set_height(val) iteration[0] += 1 text.set_text("# of operations: {}".format(iteration[0])) anim = animation.FuncAnimation(fig, func=update_fig, fargs=(bar_rects, iteration), frames=generator, interval=1, repeat=False) fig.savefig('my_figure.jpg') plt.show()
a75a731523cf01b16cc1565e94dbc9f9a578895a
9b76741992c13b661dd9c70522d7fe9ad6086cde
/holoviews_test.py
8de05da2b020393a64ef6d564dda844f85ac53e5
[]
no_license
rafaelha/py_models
681078d52a76da20ed29f0a498b77c8a4fb88ae0
0b5267bc824567de7495c432255ec88139cdd17a
refs/heads/master
2023-02-22T03:43:02.617451
2018-10-04T18:33:11
2018-10-04T18:33:11
126,767,708
0
0
null
null
null
null
UTF-8
Python
false
false
562
py
import holoviews as hv import numpy as np import holoviews.plotting.mpl #renderer = hv.Store.renderers['matplotlib'] renderer = hv.renderer('matplotlib')#.instance(fig='svg', holomap='gif') frequencies = [0.5, 0.75, 1.0, 1.25] def sine_curve(phase, freq): xvals = [0.1* i for i in range(100)] return hv.Curve((xvals, [np.sin(phase+freq*x) for x in xvals])) curve_dict = {f:sine_curve(0,f) for f in frequencies} hmap = hv.HoloMap(curve_dict, kdims='frequency') widget = renderer.get_widget(hmap, 'widgets') #renderer.show(hmap) renderer.show(widget)
5430d2daacfc6a75623004d72dfaed442200718c
7b8fd24cc6dbed385173a3857c06f2935724ace6
/LeetCode/T-46.py
60d02e43d91078c029f479bbb71b66d9607360df
[]
no_license
Yang-Jianlin/python-learn
eb1cfd731039a8e375827e80b8ef311f9ed75bfb
048cde2d87e4d06a48bd81678f6a82b02e7c4cb4
refs/heads/master
2023-07-12T16:35:13.489422
2021-08-23T11:54:10
2021-08-23T11:54:10
357,464,365
0
0
null
null
null
null
UTF-8
Python
false
false
681
py
class Solution: def __init__(self): self.res = [] self.temp = [] def permute(self, nums): n = 1 for i in range(1, len(nums) + 1): n *= i self.dfs(nums, 0, n) return self.res def dfs(self, nums, position, n): if position == len(nums): self.res.append(self.temp[:]) return else: for i in nums: if i not in self.temp: self.temp.append(i) self.dfs(nums, position + 1, n) self.temp.pop() if __name__ == '__main__': s = Solution() nums = [1, 2, 3] print(s.permute(nums))
7fff043e3f126009e64219c576fed17d3c9b08c1
f482839a5b2cf75d0ce38755d8aeefff8911e35d
/tictactoe_minimax.py
ee831bc33814df2a17a48f368646f5778e228170
[]
no_license
gmiller148/TicTacToe_Algos
aa666946a536668e683d36432e47372a140ce402
ad4f8a2bbad62816b7199eeb80e0574b7101d227
refs/heads/master
2020-04-26T16:28:42.055745
2019-03-04T05:35:07
2019-03-04T05:35:07
173,679,933
0
0
null
null
null
null
UTF-8
Python
false
false
5,385
py
class TicTacToe: def __init__(self, turn=-1): self.board = [[0,0,0], [0,0,0], [0,0,0]] self.turn = turn self.state = 'ongoing' def display(self): for i in range(3): row_res = '' for j in range(3): if self.board[i][j] == 0: row_res += ' - ' elif self.board[i][j] == -1: row_res += ' X ' elif self.board[i][j] == 1: row_res += ' O ' print(row_res) print('________') def check_victory(self): for row in self.board: rs = sum(row) if rs == 3: self.state = 'over' return (True, 'O', 1) elif rs == -3: self.state = 'over' return (True, 'X', -1) for i in range(3): cs = self.board[0][i] + self.board[1][i] + self.board[2][i] if cs == 3: self.state = 'over' return (True, 'O', 1) elif cs == -3: self.state = 'over' return (True, 'X', -1) diag1 = sum([self.board[x][x] for x in range(3)]) diag2 = sum([self.board[2-x][x] for x in range(3)]) if diag1 == 3: self.state = 'over' return (True, 'O', 1) elif diag1 == -3: self.state = 'over' return (True, 'X', -1) if diag2 == 3: self.state = 'over' return (True, 'O', 1) elif diag2 == -3: self.state = 'over' return (True, 'X', -1) return (False, '', 0) def make_move(self, x, y): if self.board[x][y] == 0: self.board[x][y] = self.turn else: print("Invalid move at x:",x,"y:",y) return res = self.check_victory() if res[0]: print("Game Over",res[1],"won") else: self.turn = -self.turn class Player: def __init__(self, symbol): self.symbol = symbol def find_moves(self, board): moves = [] for i in range(3): for j in range(3): if board[i][j] == 0: moves.append((i,j)) return moves def find_best_move(self,board): best_move = None highest_value = -10000 for move in self.find_moves(board): board[move[0]][move[1]] = self.symbol value = self.minimax(board) if value >= highest_value: highest_value = value best_move = move board[move[0]][move[1]] = 0 return best_move def minimax(self, board, depth=0, is_max_player=False): status = self.check_victory(board) if status[0]: if self.symbol == status[2]: return 10 - depth else: return -10 + depth if not self.moves_left(board): return 0 if is_max_player: best_val = -1000 for move in self.find_moves(board): board[move[0]][move[1]] = self.symbol value = self.minimax(board,depth+1,False) best_val = max(value,best_val) board[move[0]][move[1]] = 0 return best_val else: best_val = 1000 for move in self.find_moves(board): board[move[0]][move[1]] = -1*self.symbol value = self.minimax(board,depth+1,True) best_val = min(value,best_val) board[move[0]][move[1]] = 0 return best_val def check_victory(self, board): for row in board: rs = sum(row) if rs == 3: return (True, 'O', 1) elif rs == -3: return (True, 'X', -1) for i in range(3): cs = board[0][i] + board[1][i] + board[2][i] if cs == 3: return (True, 'O', 1) elif cs == -3: return (True, 'X', -1) diag1 = sum([board[x][x] for x in range(3)]) diag2 = sum([board[2-x][x] for x in range(3)]) if diag1 == 3: return (True, 'O', 1) elif diag1 == -3: return (True, 'X', -1) if diag2 == 3: return (True, 'O', 1) elif diag2 == -3: return (True, 'X', -1) return (False, '', 0) def moves_left(self,board): for i in range(3): for j in range(3): if board[i][j] == 0: return True return False def display(self,board): for i in range(3): row_res = '' for j in range(3): if board[i][j] == 0: row_res += ' - ' elif board[i][j] == -1: row_res += ' X ' elif board[i][j] == 1: row_res += ' O ' print(row_res) print('_________') t = TicTacToe() t.display() p = Player(-1) while t.state == 'ongoing': move = p.find_best_move(t.board) t.make_move(move[0],move[1]) t.display() if t.state != 'ongoing': break x = int(input('Row : ')) y = int(input('Col : ')) t.make_move(x,y) t.display()
8c609ecead5fa0b54d67b5af1fe3c7fc57656e93
f25085778485d49da4fd587a034b037df0ea98f9
/interview/findSubstrings.py
614cc8f9c71591756dbc4b3442c8a686cc32688b
[]
no_license
davcs86/codefights
477d733511a6639668a46fe55ffd47e832aa356c
f6f42c6635c48877ea9904e05bed2c029271c1d9
refs/heads/master
2021-01-23T01:17:18.042934
2017-04-28T00:05:53
2017-04-28T00:05:53
85,892,470
0
0
null
null
null
null
UTF-8
Python
false
false
928
py
def findSubstrings(words, parts): parts = sorted(parts, key = len, reverse=True) for i, w in enumerate(words): psz = 0 ppos = len(w) nw = w for p in parts: if len(p) >= psz and len(p) <= len(w): pos = w.find(p) if (len(p) > psz or pos < ppos) and pos >= 0: # found psz = len(p) ppos = pos nw = w.replace(p, "["+p+"]", 1) if len(p) < psz: break words[i] = nw return words words = ["neuroses", "myopic", "sufficient", "televise", "coccidiosis", "gules", "during", "construe", "establish", "ethyl"] parts = ["aaaaa", "Aaaa", "E", "z", "Zzzzz", "a", "mel", "lon", "el", "An", "ise", "d", "g", "wnoVV", "i", "IUMc", "P", "KQ", "QfRz", "Xyj", "yiHS"] print parts print findSubstrings(words, parts)
8a453791cd356fd9608e74273bddc8d2c8f8e1f1
d53f5cabda6350d9cf0b0d7b2ce0d271b21c8b8e
/flamingo/core/templating/__init__.py
18dd35a059fef1c9737dc1722e512c0c46973db5
[ "Apache-2.0" ]
permissive
pengutronix/flamingo
527c82add7373122c243996b35fac28253639743
e43495366ee73913f2d4565f865c04f90dc95f8d
refs/heads/master
2023-05-10T17:21:26.998164
2023-04-28T09:04:59
2023-04-28T09:04:59
156,219,977
23
10
Apache-2.0
2023-04-28T09:05:01
2018-11-05T13:12:34
JavaScript
UTF-8
Python
false
false
78
py
from .base import TemplatingEngine # NOQA from .jinja2 import Jinja2 # NOQA
c383ce7e879faf5ca5db41e4b51971d7be46d695
435723c2128a8a125ebc0bd4fdd57b2e438174a0
/tests/emissionLines/test_fluxes.py
404ae8362078d91193460af65617c20cd520c088
[]
no_license
galacticusorg/analysis-python
824e7a0311329531e42eb06fc99298cf371ec75f
09e03f8d25ab6711b4e2783454acca1422e7bc59
refs/heads/master
2022-03-10T18:39:03.766749
2022-03-03T14:49:25
2022-03-03T14:49:25
203,855,262
1
0
null
null
null
null
UTF-8
Python
false
false
5,496
py
#! /usr/bin/env python import sys,os import fnmatch import numpy as np import unittest import warnings from shutil import copyfile from galacticus import rcParams from galacticus.Cloudy import CloudyTable from galacticus.galaxies import Galaxies from galacticus.io import GalacticusHDF5 from galacticus.data import GalacticusData from galacticus.constants import luminositySolar from galacticus.constants import luminosityAB,erg from galacticus.constants import mega,centi,parsec from galacticus.constants import Pi from galacticus.emissionLines.fluxes import EmissionLineFlux,ergPerSecondPerCentimeterSquared class TestFluxes(unittest.TestCase): @classmethod def setUpClass(self): DATA = GalacticusData() self.snapshotFile = DATA.searchDynamic("galacticus.snapshotExample.hdf5") self.lightconeFile = DATA.searchDynamic("galacticus.lightconeExample.hdf5") self.removeSnapshotExample = False self.removeLightconeExample = False # If the file does not exist, create a copy from the static version. if self.snapshotFile is None: self.snapshotFile = DATA.dynamic+"/examples/galacticus.snapshotExample.hdf5" self.removeSnapshotExample = True if not os.path.exists(DATA.dynamic+"/examples"): os.makedirs(DATA.dynamic+"/examples") copyfile(DATA.static+"/examples/galacticus.snapshotExample.hdf5",self.snapshotFile) if self.lightconeFile is None: self.lightconeFile = DATA.dynamic+"/examples/galacticus.lightconeExample.hdf5" self.removeLightconeExample = True if not os.path.exists(DATA.dynamic+"/examples"): os.makedirs(DATA.dynamic+"/examples") copyfile(DATA.static+"/examples/galacticus.lightconeExample.hdf5",self.lightconeFile) # Initialize the Totals class. GH5 = GalacticusHDF5(self.lightconeFile,'r') GALS = Galaxies(GH5Obj=GH5) self.LINES = EmissionLineFlux(GALS) return @classmethod def tearDownClass(self): # Clear memory and close/delete files as necessary. self.LINES.galaxies.GH5Obj.close() del self.LINES if self.removeSnapshotExample: os.remove(self.snapshotFile) if self.removeLightconeExample: os.remove(self.lightconeFile) return def test_FluxesMatches(self): # Tests for correct dataset names for line in self.LINES.CLOUDY.listAvailableLines(): for component in ["disk","spheroid"]: name = component+"LineFlux:"+line+":rest:z1.000" self.assertTrue(self.LINES.matches(name)) name = component+"LineFlux:"+line+":observed:SDSS_r:z1.000" self.assertTrue(self.LINES.matches(name)) name = component+"LineFlux:"+line+":observed:z1.000:recent" self.assertTrue(self.LINES.matches(name)) name = component+"LineFlux:"+line+":rest:SDSS_g:z1.000:recent" self.assertTrue(self.LINES.matches(name)) # Tests for incorrect dataset names name = "diskLineFlux:notAnEmissionLine:rest:z1.000" self.assertFalse(self.LINES.matches(name,raiseError=False)) self.assertRaises(RuntimeError,self.LINES.matches,name,raiseError=True) for name in ["totalLineFlux:balmerAlpha6563:rest:z1.000", "diskLineFlux:SDSS_r:rest:z1.000", "diskLineFlux:balmerAlpha6563:obs:z1.000", "diskLineFlux:balmerAlpha6563:observed:1.000", "diskLineFlux:balmerAlpha6563:rest:z1.000:dustAtlas", "diskLineFlux:balmerAlpha6563:z1.000"]: self.assertFalse(self.LINES.matches(name,raiseError=False)) self.assertRaises(RuntimeError,self.LINES.matches,name,raiseError=True) return def test_FluxesGet(self): # Check bad names redshift = 1.0 name = "totalLineFlux:balmerAlpha6563:rest:z1.000" with self.assertRaises(RuntimeError): DATA = self.LINES.get(name,redshift) # Check values zStr = self.LINES.galaxies.GH5Obj.getRedshiftString(redshift) component = "disk" for line in self.LINES.CLOUDY.listAvailableLines()[:1]: fluxName = component+"LineFlux:"+line+":rest:"+zStr luminosityName = component+"LineLuminosity:"+line+":rest:"+zStr GALS = self.LINES.galaxies.get(redshift,properties=["redshift",luminosityName]) luminosityDistance = self.LINES.galaxies.GH5Obj.cosmology.luminosity_distance(GALS["redshift"].data) flux = GALS[luminosityName].data/(4.0*Pi*luminosityDistance**2) DATA = self.LINES.get(fluxName,redshift) self.assertEqual(DATA.name,fluxName) self.assertTrue(np.array_equal(flux,DATA.data)) # Check error raised for snapshot output return def test_ergPerSecondPerCentimeterSquared(self): flux0 = np.random.rand(50)*0.04 + 0.01 # Check conversion flux = np.log10(np.copy(flux0)) flux += np.log10(luminositySolar) flux -= np.log10(erg) flux -= np.log10((mega*parsec/centi)**2) flux = 10.0**flux self.assertTrue(np.array_equal(flux,ergPerSecondPerCentimeterSquared(flux0))) return if __name__ == "__main__": unittest.main()
571c97500fcd77b7f891fed895e3e953e3f3cc95
d7ea218f90ed241255c49db0472eefec0e78f93f
/savanna/plugins/hdp/validator.py
60980e957eb4280d68f570b4f3f80f241711b31c
[ "Apache-2.0" ]
permissive
simedcn/savanna
5829c1119930ed02bd09124224962230d0ac71f0
fc02c010db12c4bdf24c67eb0eb94026252355d0
refs/heads/master
2021-01-14T13:06:31.313572
2013-08-21T17:44:48
2013-08-21T17:44:48
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,077
py
# Copyright (c) 2013 Hortonworks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import savanna.exceptions as e from savanna.plugins.general import exceptions as ex from savanna.plugins.general import utils class Validator(object): def validate(self, cluster): funcs = inspect.getmembers(Validator, predicate=inspect.ismethod) for func in funcs: if func[0].startswith("check_"): getattr(self, func[0])(cluster) def check_for_namenode(self, cluster): count = sum([ng.count for ng in utils.get_node_groups(cluster, "NAMENODE")]) if count != 1: raise ex.NotSingleNameNodeException(count) def check_for_jobtracker_and_tasktracker(self, cluster): jt_count = sum([ng.count for ng in utils.get_node_groups(cluster, "JOBTRACKER")]) if jt_count not in [0, 1]: raise ex.NotSingleJobTrackerException(jt_count) tt_count = sum([ng.count for ng in utils.get_node_groups(cluster, "TASKTRACKER")]) if jt_count is 0 and tt_count > 0: raise ex.TaskTrackersWithoutJobTracker() def check_for_ambari_server(self, cluster): count = sum([ng.count for ng in utils.get_node_groups(cluster, "AMBARI_SERVER")]) if count != 1: raise NotSingleAmbariServerException(count) def check_for_ambari_agents(self, cluster): for ng in cluster.node_groups: if "AMBARI_AGENT" not in ng.node_processes: raise AmbariAgentNumberException(ng.name) class NoNameNodeException(e.SavannaException): def __init__(self): message = ("Hadoop cluster should contain at least one namenode") code = "NO_NAMENODE" super(NoNameNodeException, self).__init__(message, code) class NotSingleAmbariServerException(e.SavannaException): def __init__(self, count): message = ("Hadoop cluster should contain 1 Ambari Server " "instance. Actual Ambari server count is %s" % count) code = "NOT_SINGLE_AMBARI_SERVER" super(NotSingleAmbariServerException, self).__init__(message, code) class AmbariAgentNumberException(e.SavannaException): def __init__(self, count): message = ("Hadoop cluster should have an ambari agent per " "node group. Node group %s has no Ambari Agent" % count) code = "WRONG_NUMBER_AMBARI_AGENTS" super(AmbariAgentNumberException, self).__init__(message, code)
d24e314a5efa4ce965577b2a2cfb1f67ccebd1d6
eafed2a5d7de4db7e3c37bfdb2d2f2b1069e80c0
/api/app/labeller/client.py
2d2ce90abe135c6892d62370bd29c3a3a67a0b74
[]
no_license
philipk19238/klarity
53123aa52abba62bcc62b381599196b13640ba4b
11335cc74d5433e19e218a9a9b3e43acd669b789
refs/heads/master
2023-08-13T08:35:02.485853
2021-10-17T19:08:56
2021-10-17T19:08:56
417,892,692
1
3
null
null
null
null
UTF-8
Python
false
false
1,598
py
from collections import defaultdict from .constants import ( MaterialConstant, TypeConstant, ColorConstant, SizeConstant, LocationConstant ) from .trie import Trie from .tokenizer import Tokenizer class LabelerClient: def __init__(self, stop_words): self.trie = Trie() self.tokenizer = Tokenizer(stop_words) self.init_constants( MaterialConstant, TypeConstant, ColorConstant, SizeConstant, LocationConstant ) def init_constants(self, *args): for constant in args: self.trie.insert_constant(constant) def update_model(self, model, labels): tags = model.tags for k, v in labels.items(): tags[k] = v model.tags = tags return model def label(self, model): title = self.tokenizer.clean(model.title) desc = self.tokenizer.clean(model.description) title_labels = self.find_labels(title) desc_labels = self.find_labels(desc) merged_labels = self.merge_dicts(desc_labels, title_labels) return self.update_model(model, merged_labels) def find_labels(self, sentence): res = defaultdict(set) pairs = self.trie.search_sentence(sentence) for key, word in pairs: res[key].add(word) return res def merge_dicts(self, *args): res = defaultdict(set) for to_merge in args: for k, v in to_merge.items(): res[k] = res[k] | v return res
4d807c601f9a24cfa37be0f007e051f306400386
3848612966f853b70167c2e5606e5451dd0ac8f7
/architecture/make_arch/examples/memcached_path.py
10a22d2ece5ed4d29a88586284b9ec3373ebc707
[ "MIT" ]
permissive
delimitrou/uqsim-power-management-beta
1b99e3c03af812d13dbca573fd712034be75853e
87f4483a644e6dfc2c3e96497b0920e62b1f2b80
refs/heads/master
2022-06-10T17:17:25.345276
2022-05-13T17:00:08
2022-05-13T17:00:08
260,951,471
2
2
null
2022-05-13T17:00:09
2020-05-03T14:59:52
null
UTF-8
Python
false
false
745
py
import sys import os import json import make_arch as march def main(): node_0 = march.make_serv_path_node(servName="memcached", servDomain="", codePath=0, startStage=0, endStage=-1, nodeId=0, needSync=False, syncNodeId=None, childs=[1]) node_1 = march.make_serv_path_node(servName = "client", servDomain = "", codePath = -1, startStage = 0, endStage = -1, nodeId = 1, needSync = False, syncNodeId = None, childs = []) nodeList = [node_0, node_1] memc_read_only_path = march.make_serv_path(pathId=0, entry=0, prob=1.0, nodes=nodeList) paths = [memc_read_only_path] with open("/home/zhangyanqi/cornell/SAIL/microSSim/architecture/memcached/path.json", "w+") as f: json.dump(paths, f, indent=2) if __name__ == "__main__": main()
4e5a2b20a95130193194dab51ac984aab4b65175
f24cccd40b8770f3da983e45a7fd3c166331b2fa
/Python_Basics/display_output.py
73577e849804269937097f1150b4bfef7e944fde
[]
no_license
srajesh636/python_basics
4b09fb777a626c4fdba467dcde5b80b7804539be
0b26d85bf61659c9a8c4f8468c7a9a8ee29c7873
refs/heads/master
2020-03-16T07:59:38.412131
2018-05-08T11:09:29
2018-05-08T11:09:29
132,588,025
0
0
null
null
null
null
UTF-8
Python
false
false
67
py
print("print method is used to display the content on the screen")
97c31e1e4b55d0cb7b5ac2dd08339b8d13a6014d
0e114f7df2b112511785e21626bb6bdb220b5a6c
/NMS/classes/TkSceneNodeData.py
633df0ebb7284d5ec9ada0c0b5708fd56cf941c6
[]
no_license
monkeyman192/NMSDK
020c580bc7b0517bdef5b28d167924fde51dfa7f
c94bb9071e576fd16650f0b26fc5d681181976af
refs/heads/master
2023-08-09T09:08:40.453170
2023-07-26T23:28:53
2023-07-26T23:28:53
73,231,820
25
6
null
2023-07-26T23:10:29
2016-11-08T22:13:48
Python
UTF-8
Python
false
false
744
py
# TkSceneNodeData struct from .Struct import Struct from .String import String from .TkTransformData import TkTransformData from .List import List class TkSceneNodeData(Struct): def __init__(self, **kwargs): super(TkSceneNodeData, self).__init__() """ Contents of the struct """ self.data['Name'] = String(kwargs.get('Name', ""), 0x80) self.data['NameHash'] = kwargs.get('NameHash', 0) self.data['Type'] = String(kwargs.get('Type', 'MODEL'), 0x10) self.data['Transform'] = kwargs.get('Transform', TkTransformData()) self.data['Attributes'] = kwargs.get('Attributes', List()) self.data['Children'] = kwargs.get('Children', List()) """ End of the struct contents"""
992e47a305d7797ce8662af91191b183c4dc5d44
5516f874c85b7b2a194fee536f10eff22636925e
/OOP/first_class.py
404db6dc2e32a8005c85d728cf876ca71050e07d
[]
no_license
vokborok/lutz
b58140f8420500de8d47bd358cacda4db5972ea5
e6b5fe636cbccce5ec76ed0716d33eeee90f10df
refs/heads/main
2023-07-12T17:40:54.272404
2021-08-16T22:09:46
2021-08-16T22:09:46
365,612,376
0
0
null
null
null
null
UTF-8
Python
false
false
122
py
class FirstClass: def setdata(self, value): self.data = value def display(self): print(self.data)
6eb1c5eec9aff34ec78b04b73f38d2d8ea238cc0
a4da1f7c9a8726bface6e20fe77bc96e94627d62
/classwork/modules/varscope.py
2cb803bdbe90e718d8325f5846084de36abc7a62
[]
no_license
KrackedJack/dbda-feb2019-python
97d8b8e7428e735d589c36111706723070abad49
9ae82552f50ff9f0d340d0ae97c9233cd4df19d7
refs/heads/master
2020-11-24T14:07:57.997410
2019-12-15T12:41:35
2019-12-15T12:41:35
228,185,218
0
0
null
null
null
null
UTF-8
Python
false
false
176
py
x=20 def func(): global x x = 30 print("x:",x) def infunc(): #nonlocal x global x x = 45 print("x:",x) print("calling infunc()") infunc() func() print("x: ",x)
045b797fe7eb6cce795c14a6615378305af53da0
711756b796d68035dc6a39060515200d1d37a274
/output_cog/optimized_31572.py
a27c7cb0e13923113a3cd85c080912670b03b57f
[]
no_license
batxes/exocyst_scripts
8b109c279c93dd68c1d55ed64ad3cca93e3c95ca
a6c487d5053b9b67db22c59865e4ef2417e53030
refs/heads/master
2020-06-16T20:16:24.840725
2016-11-30T16:23:16
2016-11-30T16:23:16
75,075,164
0
0
null
null
null
null
UTF-8
Python
false
false
10,840
py
import _surface import chimera try: import chimera.runCommand except: pass from VolumePath import markerset as ms try: from VolumePath import Marker_Set, Link new_marker_set=Marker_Set except: from VolumePath import volume_path_dialog d= volume_path_dialog(True) new_marker_set= d.new_marker_set marker_sets={} surf_sets={} if "Cog2_GFPN" not in marker_sets: s=new_marker_set('Cog2_GFPN') marker_sets["Cog2_GFPN"]=s s= marker_sets["Cog2_GFPN"] mark=s.place_marker((478.695, 491.618, 534.974), (0.89, 0.1, 0.1), 18.4716) if "Cog2_0" not in marker_sets: s=new_marker_set('Cog2_0') marker_sets["Cog2_0"]=s s= marker_sets["Cog2_0"] mark=s.place_marker((451.617, 440.284, 572.499), (0.89, 0.1, 0.1), 17.1475) if "Cog2_1" not in marker_sets: s=new_marker_set('Cog2_1') marker_sets["Cog2_1"]=s s= marker_sets["Cog2_1"] mark=s.place_marker((421.251, 373.074, 608.156), (0.89, 0.1, 0.1), 17.1475) if "Cog2_GFPC" not in marker_sets: s=new_marker_set('Cog2_GFPC') marker_sets["Cog2_GFPC"]=s s= marker_sets["Cog2_GFPC"] mark=s.place_marker((499.981, 362.186, 493.433), (0.89, 0.1, 0.1), 18.4716) if "Cog2_Anch" not in marker_sets: s=new_marker_set('Cog2_Anch') marker_sets["Cog2_Anch"]=s s= marker_sets["Cog2_Anch"] mark=s.place_marker((338.529, 244.809, 727.359), (0.89, 0.1, 0.1), 18.4716) if "Cog3_GFPN" not in marker_sets: s=new_marker_set('Cog3_GFPN') marker_sets["Cog3_GFPN"]=s s= marker_sets["Cog3_GFPN"] mark=s.place_marker((453.438, 458.283, 555.029), (1, 1, 0), 18.4716) if "Cog3_0" not in marker_sets: s=new_marker_set('Cog3_0') marker_sets["Cog3_0"]=s s= marker_sets["Cog3_0"] mark=s.place_marker((453.114, 459.5, 554.125), (1, 1, 0.2), 17.1475) if "Cog3_1" not in marker_sets: s=new_marker_set('Cog3_1') marker_sets["Cog3_1"]=s s= marker_sets["Cog3_1"] mark=s.place_marker((454.706, 473.954, 530.234), (1, 1, 0.2), 17.1475) if "Cog3_2" not in marker_sets: s=new_marker_set('Cog3_2') marker_sets["Cog3_2"]=s s= marker_sets["Cog3_2"] mark=s.place_marker((445.868, 467.792, 504.458), (1, 1, 0.2), 17.1475) if "Cog3_3" not in marker_sets: s=new_marker_set('Cog3_3') marker_sets["Cog3_3"]=s s= marker_sets["Cog3_3"] mark=s.place_marker((419.783, 471.665, 494.786), (1, 1, 0.2), 17.1475) if "Cog3_4" not in marker_sets: s=new_marker_set('Cog3_4') marker_sets["Cog3_4"]=s s= marker_sets["Cog3_4"] mark=s.place_marker((401.639, 456.549, 510.057), (1, 1, 0.2), 17.1475) if "Cog3_5" not in marker_sets: s=new_marker_set('Cog3_5') marker_sets["Cog3_5"]=s s= marker_sets["Cog3_5"] mark=s.place_marker((383.687, 469.433, 493.196), (1, 1, 0.2), 17.1475) if "Cog3_GFPC" not in marker_sets: s=new_marker_set('Cog3_GFPC') marker_sets["Cog3_GFPC"]=s s= marker_sets["Cog3_GFPC"] mark=s.place_marker((469.191, 481.848, 558.095), (1, 1, 0.4), 18.4716) if "Cog3_Anch" not in marker_sets: s=new_marker_set('Cog3_Anch') marker_sets["Cog3_Anch"]=s s= marker_sets["Cog3_Anch"] mark=s.place_marker((298.112, 462.255, 432.399), (1, 1, 0.4), 18.4716) if "Cog4_GFPN" not in marker_sets: s=new_marker_set('Cog4_GFPN') marker_sets["Cog4_GFPN"]=s s= marker_sets["Cog4_GFPN"] mark=s.place_marker((259.758, 343.869, 591.668), (0, 0, 0.8), 18.4716) if "Cog4_0" not in marker_sets: s=new_marker_set('Cog4_0') marker_sets["Cog4_0"]=s s= marker_sets["Cog4_0"] mark=s.place_marker((259.758, 343.869, 591.668), (0, 0, 0.8), 17.1475) if "Cog4_1" not in marker_sets: s=new_marker_set('Cog4_1') marker_sets["Cog4_1"]=s s= marker_sets["Cog4_1"] mark=s.place_marker((287.288, 348.283, 581.84), (0, 0, 0.8), 17.1475) if "Cog4_2" not in marker_sets: s=new_marker_set('Cog4_2') marker_sets["Cog4_2"]=s s= marker_sets["Cog4_2"] mark=s.place_marker((314.635, 353.765, 572.544), (0, 0, 0.8), 17.1475) if "Cog4_3" not in marker_sets: s=new_marker_set('Cog4_3') marker_sets["Cog4_3"]=s s= marker_sets["Cog4_3"] mark=s.place_marker((341.364, 364.2, 567.353), (0, 0, 0.8), 17.1475) if "Cog4_4" not in marker_sets: s=new_marker_set('Cog4_4') marker_sets["Cog4_4"]=s s= marker_sets["Cog4_4"] mark=s.place_marker((365.858, 379.968, 567.214), (0, 0, 0.8), 17.1475) if "Cog4_5" not in marker_sets: s=new_marker_set('Cog4_5') marker_sets["Cog4_5"]=s s= marker_sets["Cog4_5"] mark=s.place_marker((387.236, 399.269, 572.405), (0, 0, 0.8), 17.1475) if "Cog4_6" not in marker_sets: s=new_marker_set('Cog4_6') marker_sets["Cog4_6"]=s s= marker_sets["Cog4_6"] mark=s.place_marker((405.032, 421.155, 581.55), (0, 0, 0.8), 17.1475) if "Cog4_GFPC" not in marker_sets: s=new_marker_set('Cog4_GFPC') marker_sets["Cog4_GFPC"]=s s= marker_sets["Cog4_GFPC"] mark=s.place_marker((205.981, 333.036, 445.315), (0, 0, 0.8), 18.4716) if "Cog4_Anch" not in marker_sets: s=new_marker_set('Cog4_Anch') marker_sets["Cog4_Anch"]=s s= marker_sets["Cog4_Anch"] mark=s.place_marker((598.405, 534.572, 716.502), (0, 0, 0.8), 18.4716) if "Cog5_GFPN" not in marker_sets: s=new_marker_set('Cog5_GFPN') marker_sets["Cog5_GFPN"]=s s= marker_sets["Cog5_GFPN"] mark=s.place_marker((403.357, 414.699, 621.968), (0.3, 0.3, 0.3), 18.4716) if "Cog5_0" not in marker_sets: s=new_marker_set('Cog5_0') marker_sets["Cog5_0"]=s s= marker_sets["Cog5_0"] mark=s.place_marker((403.357, 414.699, 621.968), (0.3, 0.3, 0.3), 17.1475) if "Cog5_1" not in marker_sets: s=new_marker_set('Cog5_1') marker_sets["Cog5_1"]=s s= marker_sets["Cog5_1"] mark=s.place_marker((429.663, 403.467, 620.712), (0.3, 0.3, 0.3), 17.1475) if "Cog5_2" not in marker_sets: s=new_marker_set('Cog5_2') marker_sets["Cog5_2"]=s s= marker_sets["Cog5_2"] mark=s.place_marker((450.853, 385.335, 613.4), (0.3, 0.3, 0.3), 17.1475) if "Cog5_3" not in marker_sets: s=new_marker_set('Cog5_3') marker_sets["Cog5_3"]=s s= marker_sets["Cog5_3"] mark=s.place_marker((450.131, 365.283, 592.147), (0.3, 0.3, 0.3), 17.1475) if "Cog5_GFPC" not in marker_sets: s=new_marker_set('Cog5_GFPC') marker_sets["Cog5_GFPC"]=s s= marker_sets["Cog5_GFPC"] mark=s.place_marker((513.337, 446.467, 521.88), (0.3, 0.3, 0.3), 18.4716) if "Cog5_Anch" not in marker_sets: s=new_marker_set('Cog5_Anch') marker_sets["Cog5_Anch"]=s s= marker_sets["Cog5_Anch"] mark=s.place_marker((386.052, 276.451, 653.213), (0.3, 0.3, 0.3), 18.4716) if "Cog6_GFPN" not in marker_sets: s=new_marker_set('Cog6_GFPN') marker_sets["Cog6_GFPN"]=s s= marker_sets["Cog6_GFPN"] mark=s.place_marker((473.752, 433.46, 555.492), (0.21, 0.49, 0.72), 18.4716) if "Cog6_0" not in marker_sets: s=new_marker_set('Cog6_0') marker_sets["Cog6_0"]=s s= marker_sets["Cog6_0"] mark=s.place_marker((473.764, 433.457, 555.488), (0.21, 0.49, 0.72), 17.1475) if "Cog6_1" not in marker_sets: s=new_marker_set('Cog6_1') marker_sets["Cog6_1"]=s s= marker_sets["Cog6_1"] mark=s.place_marker((483.159, 457.741, 568.226), (0.21, 0.49, 0.72), 17.1475) if "Cog6_2" not in marker_sets: s=new_marker_set('Cog6_2') marker_sets["Cog6_2"]=s s= marker_sets["Cog6_2"] mark=s.place_marker((468.537, 481.85, 573.984), (0.21, 0.49, 0.72), 17.1475) if "Cog6_3" not in marker_sets: s=new_marker_set('Cog6_3') marker_sets["Cog6_3"]=s s= marker_sets["Cog6_3"] mark=s.place_marker((442.525, 491.541, 567.233), (0.21, 0.49, 0.72), 17.1475) if "Cog6_4" not in marker_sets: s=new_marker_set('Cog6_4') marker_sets["Cog6_4"]=s s= marker_sets["Cog6_4"] mark=s.place_marker((423.292, 501.056, 548.324), (0.21, 0.49, 0.72), 17.1475) if "Cog6_5" not in marker_sets: s=new_marker_set('Cog6_5') marker_sets["Cog6_5"]=s s= marker_sets["Cog6_5"] mark=s.place_marker((400.77, 499.392, 531.192), (0.21, 0.49, 0.72), 17.1475) if "Cog6_6" not in marker_sets: s=new_marker_set('Cog6_6') marker_sets["Cog6_6"]=s s= marker_sets["Cog6_6"] mark=s.place_marker((377.516, 489.405, 518.664), (0.21, 0.49, 0.72), 17.1475) if "Cog6_GFPC" not in marker_sets: s=new_marker_set('Cog6_GFPC') marker_sets["Cog6_GFPC"]=s s= marker_sets["Cog6_GFPC"] mark=s.place_marker((404.183, 491.253, 600.111), (0.21, 0.49, 0.72), 18.4716) if "Cog6_Anch" not in marker_sets: s=new_marker_set('Cog6_Anch') marker_sets["Cog6_Anch"]=s s= marker_sets["Cog6_Anch"] mark=s.place_marker((353.742, 484.147, 434.656), (0.21, 0.49, 0.72), 18.4716) if "Cog7_GFPN" not in marker_sets: s=new_marker_set('Cog7_GFPN') marker_sets["Cog7_GFPN"]=s s= marker_sets["Cog7_GFPN"] mark=s.place_marker((435.58, 469.772, 626.287), (0.7, 0.7, 0.7), 18.4716) if "Cog7_0" not in marker_sets: s=new_marker_set('Cog7_0') marker_sets["Cog7_0"]=s s= marker_sets["Cog7_0"] mark=s.place_marker((442.07, 447.811, 613.584), (0.7, 0.7, 0.7), 17.1475) if "Cog7_1" not in marker_sets: s=new_marker_set('Cog7_1') marker_sets["Cog7_1"]=s s= marker_sets["Cog7_1"] mark=s.place_marker((458.254, 400.796, 584.742), (0.7, 0.7, 0.7), 17.1475) if "Cog7_2" not in marker_sets: s=new_marker_set('Cog7_2') marker_sets["Cog7_2"]=s s= marker_sets["Cog7_2"] mark=s.place_marker((475.76, 353.371, 557.837), (0.7, 0.7, 0.7), 17.1475) if "Cog7_GFPC" not in marker_sets: s=new_marker_set('Cog7_GFPC') marker_sets["Cog7_GFPC"]=s s= marker_sets["Cog7_GFPC"] mark=s.place_marker((544.667, 395.707, 545.915), (0.7, 0.7, 0.7), 18.4716) if "Cog7_Anch" not in marker_sets: s=new_marker_set('Cog7_Anch') marker_sets["Cog7_Anch"]=s s= marker_sets["Cog7_Anch"] mark=s.place_marker((449.222, 255.85, 531.598), (0.7, 0.7, 0.7), 18.4716) if "Cog8_0" not in marker_sets: s=new_marker_set('Cog8_0') marker_sets["Cog8_0"]=s s= marker_sets["Cog8_0"] mark=s.place_marker((519.447, 415.745, 547.547), (1, 0.5, 0), 17.1475) if "Cog8_1" not in marker_sets: s=new_marker_set('Cog8_1') marker_sets["Cog8_1"]=s s= marker_sets["Cog8_1"] mark=s.place_marker((511.16, 425.322, 572.61), (1, 0.5, 0), 17.1475) if "Cog8_2" not in marker_sets: s=new_marker_set('Cog8_2') marker_sets["Cog8_2"]=s s= marker_sets["Cog8_2"] mark=s.place_marker((487.173, 418.149, 585.288), (1, 0.5, 0), 17.1475) if "Cog8_3" not in marker_sets: s=new_marker_set('Cog8_3') marker_sets["Cog8_3"]=s s= marker_sets["Cog8_3"] mark=s.place_marker((485.574, 401.29, 608.518), (1, 0.5, 0), 17.1475) if "Cog8_4" not in marker_sets: s=new_marker_set('Cog8_4') marker_sets["Cog8_4"]=s s= marker_sets["Cog8_4"] mark=s.place_marker((479.216, 385.439, 631.641), (1, 0.5, 0), 17.1475) if "Cog8_5" not in marker_sets: s=new_marker_set('Cog8_5') marker_sets["Cog8_5"]=s s= marker_sets["Cog8_5"] mark=s.place_marker((460.226, 371.123, 647.809), (1, 0.5, 0), 17.1475) if "Cog8_GFPC" not in marker_sets: s=new_marker_set('Cog8_GFPC') marker_sets["Cog8_GFPC"]=s s= marker_sets["Cog8_GFPC"] mark=s.place_marker((462.48, 436.145, 600.066), (1, 0.6, 0.1), 18.4716) if "Cog8_Anch" not in marker_sets: s=new_marker_set('Cog8_Anch') marker_sets["Cog8_Anch"]=s s= marker_sets["Cog8_Anch"] mark=s.place_marker((454.734, 305.751, 697.003), (1, 0.6, 0.1), 18.4716) for k in surf_sets.keys(): chimera.openModels.add([surf_sets[k]])
f5738ebf2b316d78bcf67e9b2c3851c42892334e
0c4d4d199da126ff7d5d8317aaaf31fa6182d43e
/Shuffle.py
461c6951fd94b3110048c2ed93f2dc7cdef104a6
[]
no_license
riquellopes/challenges
c067101171d2716e3ddb8a928f332c4fe0c5bfb2
cedfba39d6866bd4ff1ec40d0f3641e07f805a16
refs/heads/master
2020-09-13T17:33:28.457695
2018-08-22T21:56:16
2018-08-22T21:56:16
94,462,509
0
0
null
2018-10-28T12:43:17
2017-06-15T17:21:01
Python
UTF-8
Python
false
false
605
py
# you can write to stdout for debugging purposes, e.g. # print("this is a debug message") """ >>> solution(123456) 162534 >>> solution(162534) 146325 """ def solution(A): # write your code in Python 3.6 numbers = list(str(A)) size = len(numbers) to_remove = 0 digit = [] while True: if len(digit) == size: break if to_remove == 0: num = numbers.pop(to_remove) to_remove = -1 else: num = numbers.pop() to_remove = 0 digit.append(num) return int("".join(digit))
5d3ef43a52c9d3f468feeb9ed9bdd8f5ff9dfba6
4f7742df83849517c5675513a1d111b01fc1deb3
/examples/precision_landing.py
a39daefaa1bd9c71eb0bac4f698b1f829ac703d5
[ "BSD-3-Clause" ]
permissive
mcorner/dji-asdk-to-python
319025ee1b5ebba5b26b8cdd144eec5dec243f0f
59464f36dc046a0b96b1544fff31e0e40f1322a1
refs/heads/master
2023-01-06T16:31:24.757081
2020-10-29T18:57:10
2020-10-29T18:57:10
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,386
py
from dji_asdk_to_python.products.aircraft import Aircraft import numpy as np from dji_asdk_to_python.precision_landing.aproximation import ArucoAproximation from dji_asdk_to_python.precision_landing.landing import ArucoLanding from time import sleep import os from dji_asdk_to_python.errors import CustomError APP_IP = "192.168.50.158" aircraft = Aircraft(APP_IP) camera_distortion = np.loadtxt("/home/luis/Documentos/psbposas/dji-asdk-to-python/examples/calibration/camera_distortion.txt", delimiter=",") camera_matrix = np.loadtxt("/home/luis/Documentos/psbposas/dji-asdk-to-python/examples/calibration/camera_matrix.txt", delimiter=",") stage1 = ArucoAproximation(drone_ip=APP_IP,camera_distortion=camera_distortion, camera_matrix=camera_matrix, marker_id=17, marker_size_cm=70) stage2 = ArucoLanding(drone_ip=APP_IP,camera_distortion=camera_distortion, camera_matrix=camera_matrix, marker_id=62, marker_size_cm=12) streaming_manager = aircraft.getLiveStreamManager() rtp_manager = streaming_manager.getRTPManager() rtp_manager.setWidth(1280) rtp_manager.setHeigth(720) result = rtp_manager.startStream() print("result startStream %s" % result) if isinstance(result, CustomError): raise Exception("%s" % result) stage1.start(rtp_manager) input("PRESS A KEY TO ENTER STAGE 2") #DBest notification of top platform deployment should be awaited here stage2.start(rtp_manager)
d79d232b2c92ccaf4f09f8887399945e4d279992
b05761d771bb5a85d39d370c649567c1ff3eb089
/venv/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/google/protobuf/internal/python_message.pyi
739b65ed584976d74587def3ecc0bcf58b01737f
[]
no_license
JawshyJ/Coding_Practice
88c49cab955eab04609ec1003b6b8c20f103fc06
eb6b229d41aa49b1545af2120e6bee8e982adb41
refs/heads/master
2023-02-19T10:18:04.818542
2023-02-06T21:22:58
2023-02-06T21:22:58
247,788,631
4
0
null
null
null
null
UTF-8
Python
false
false
96
pyi
/home/runner/.cache/pip/pool/20/44/ab/d4e8c0643f62760d4e816ccc7de5764ad6b4f11d2e1cb01bc1e9634c3e
d1dcdef8f4dc3fe9d977de9f8c810384be8f24d1
6426ca723494c69f7a18d6378458dad0b7abf99a
/HW3_Cocktail,ExchangeSort/cocktailshakeSort.py
dee7a95c24df85bcc21fcc02485679640a03ea53
[]
no_license
NoirNorie/Algorithm_Python
3364f8ac8a530ede3857dc46f224df2c8c7eee80
a137fb30f1c44373cc22d3110eecd51d1e057540
refs/heads/master
2022-12-24T14:45:08.122396
2020-10-03T15:49:45
2020-10-03T15:49:45
298,333,862
0
0
null
null
null
null
UTF-8
Python
false
false
3,146
py
import random, time, sys def checkSort(a,n): isSorted = True for i in range(1,n): if a[i] > a[i+1]: isSorted = False if (not isSorted): break if (isSorted): print("정렬 완료") else: print("정렬 오류 발생") # cocktailshakeSort가 너무 길어서 csSort로 줄여서 작성 def csSort(a,n): i, j = N, 1 while(i>j): if (i+j != N): # 배열의 앞에서 뒤로 진행 for k in range(j,i,1): if (a[k] > a[k+1]): a[k], a[k+1] = a[k+1], a[k] i -= 1 else : # 배열의 뒤에서 앞으로 진행 for k in range(i,j,-1): if (a[k-1] > a[k]): a[k-1], a[k] = a[k], a[k-1] j += 1 N = 5000 b = [] b.append(-1) for i in range(N): b.append(random.randint(1,N)) start_time = time.time() csSort(b,N) end_time = time.time() - start_time print('임의로 값이 삽입된 배열의 칵테일쉐이커 정렬의 실행 시간 (N = %d) : %0.3f'%(N, end_time)) b = [] b.append(-1) for i in range(N): b.append(i) start_time = time.time() csSort(b,N) end_time = time.time() - start_time print('정렬된 값이 삽입된 배열의 칵테일쉐이커 정렬의 실행 시간 (N = %d) : %0.3f'%(N, end_time)) b = [] b.append(-1) for i in range(N-1,-1,-1): b.append(i) start_time = time.time() csSort(b,N) end_time = time.time() - start_time print('역순으로 정렬된 배열의 칵테일쉐이커 정렬의 실행 시간 (N = %d) : %0.3f'%(N, end_time)) print() N = 10000 b = [] b.append(-1) for i in range(N): b.append(random.randint(1,N)) start_time = time.time() csSort(b,N) end_time = time.time() - start_time print('임의로 값이 삽입된 배열의 칵테일쉐이커 정렬의 실행 시간 (N = %d) : %0.3f'%(N, end_time)) b = [] b.append(-1) for i in range(N): b.append(i) start_time = time.time() csSort(b,N) end_time = time.time() - start_time print('정렬된 값이 삽입된 배열의 칵테일쉐이커 정렬의 실행 시간 (N = %d) : %0.3f'%(N, end_time)) b = [] b.append(-1) for i in range(N-1,-1,-1): b.append(i) start_time = time.time() csSort(b,N) end_time = time.time() - start_time print('역순으로 정렬된 배열의 칵테일쉐이커 정렬의 실행 시간 (N = %d) : %0.3f'%(N, end_time)) N = 15000 b = [] b.append(-1) for i in range(N): b.append(random.randint(1,N)) start_time = time.time() csSort(b,N) end_time = time.time() - start_time print('임의로 값이 삽입된 배열의 칵테일쉐이커 정렬의 실행 시간 (N = %d) : %0.3f'%(N, end_time)) b = [] b.append(-1) for i in range(N): b.append(i) start_time = time.time() csSort(b,N) end_time = time.time() - start_time print('정렬된 값이 삽입된 배열의 칵테일쉐이커 정렬의 실행 시간 (N = %d) : %0.3f'%(N, end_time)) b = [] b.append(-1) for i in range(N-1,-1,-1): b.append(i) start_time = time.time() csSort(b,N) end_time = time.time() - start_time print('역순으로 정렬된 배열의 칵테일쉐이커 정렬의 실행 시간 (N = %d) : %0.3f'%(N, end_time))
3b18713036101f6e001dab4bead2f1f625494818
ebbd58c88dc3ea5c3ff5b7c63cde731c063bd6cc
/sigma/gods-unchained-packs/tests/tests/test_bundle_open.py
d9c24e9aa2f78b8b3f358b4ec26e9c3ec59ce713
[]
no_license
the-mog/resources
eb65efebc47fe75cefe85049d162d9032b6cd958
58b446d3ba6e16acda163869b798e191077631ac
refs/heads/main
2023-01-21T08:36:47.822319
2020-11-23T15:21:18
2020-11-23T15:21:18
315,348,311
0
0
null
null
null
null
UTF-8
Python
false
false
5,948
py
import pytest import random from web3.contract import ConciseContract ########################## # VALID OPERATIONS ########################## def test_bundle_open( accounts, assert_tx_failed, get_logs_for_event, instantiate, pack_deploy, pack_prices, pack_types, w3, ): # Deploy a PackFive and required supporting contracts (processor, referral, cards, vault) (pack, processor, referrals, cards, vault, pack_r, processor_r, referrals_r, cards_r, vault_r) = pack_deploy() # Set packs for purchase bundle_size = 3 # 3 packs of 5 cards in a bundle tx_hash = pack.functions.setPack(pack_types['Rare'], pack_prices['Rare'], "Rare bundle", "RB", bundle_size, 0).transact({'from': accounts[0]}) rare_bundle = instantiate(get_logs_for_event(pack.events.PackAdded, tx_hash)[0]['args']['bundle'], abi=None, contract="Bundle") processor.functions.setCanSell(rare_bundle.address, True).transact({'from': accounts[0]}) rare_bundle.functions.purchase(1, accounts[2]).transact({'from': accounts[1], 'value': pack_prices['Rare'] * bundle_size}) tx = rare_bundle.functions.open(1).transact({'from': accounts[1]}) logs = get_logs_for_event(pack.events.BundlesOpened, tx) assert logs[0]['args']['id'] == 0, "Bundle open id" assert logs[0]['args']['packType'] == 0, "Bundle open pack type" assert logs[0]['args']['user'] == accounts[1], "Bundle open user" assert logs[0]['args']['count'] == 1, "Bundle open count" assert logs[0]['args']['packCount'] == 3, "Bundle open packCount" logs = get_logs_for_event(pack.events.PurchaseRecorded, tx) assert logs[0]['args']['id'] == 0, "purchaseRecorded ID" assert logs[0]['args']['packType'] == pack_types['Rare'], "purchaseRecorded packType" assert logs[0]['args']['user'] == accounts[1], "purchaseRecorded user" assert logs[0]['args']['count'] == 3, "purchaseRecorded count" assert logs[0]['args']['lockup'] == 0, "purchaseRecorded lockup" logs = get_logs_for_event(rare_bundle.events.Transfer, tx) assert logs[0]['args']['from'] == accounts[1], "Burn from user" assert logs[0]['args']['to'] == '0x' + '00' * 20, "Burn to 0" assert logs[0]['args']['value'] == 1, "Burn x tokens" def test_open_max_count( accounts, assert_tx_failed, get_logs_for_event, instantiate, pack_deploy, pack_prices, pack_types, w3, ): # Deploy a PackFive and required supporting contracts (processor, referral, cards, vault) (pack, processor, referrals, cards, vault, pack_r, processor_r, referrals_r, cards_r, vault_r) = pack_deploy() # Set packs for purchase bundle_size = 1 num_bundles = 2**15 # 2 packs of 5 cards in a bundle tx_hash = pack.functions.setPack(pack_types['Rare'], pack_prices['Rare'], "Rare bundle", "RB", bundle_size, 0).transact({'from': accounts[0]}) rare_bundle = instantiate(get_logs_for_event(pack.events.PackAdded, tx_hash)[0]['args']['bundle'], abi=None, contract="Bundle") processor.functions.setCanSell(rare_bundle.address, True).transact({'from': accounts[0]}) rare_bundle.functions.purchase(num_bundles, accounts[2]).transact({'from': accounts[1], 'value': pack_prices['Rare'] * bundle_size * num_bundles}) rare_bundle.functions.open(num_bundles).transact({'from': accounts[1]}) # This fails now because at least one bundle must be opened. @pytest.mark.xfail def test_open_zero( accounts, assert_tx_failed, get_logs_for_event, instantiate, pack_deploy, pack_prices, pack_types, w3, ): # Deploy a PackFive and required supporting contracts (processor, referral, cards, vault) (pack, processor, referrals, cards, vault, pack_r, processor_r, referrals_r, cards_r, vault_r) = pack_deploy() # Set packs for purchase bundle_size = 3 # 3 packs of 5 cards in a bundle tx_hash = pack.functions.setPack(pack_types['Rare'], pack_prices['Rare'], "Rare bundle", "RB", bundle_size, 0).transact({'from': accounts[0]}) rare_bundle = instantiate(get_logs_for_event(pack.events.PackAdded, tx_hash)[0]['args']['bundle'], abi=None, contract="Bundle") processor.functions.setCanSell(rare_bundle.address, True).transact({'from': accounts[0]}) rare_bundle.functions.purchase(1, accounts[2]).transact({'from': accounts[1], 'value': pack_prices['Rare'] * bundle_size}) # Should this fail cause it creates logs rare_bundle.functions.open(0).transact({'from': accounts[1]}) ########################## # INVALID OPERATIONS ########################## def test_open_too_many( accounts, assert_tx_failed, get_logs_for_event, instantiate, pack_deploy, pack_prices, pack_types, w3, ): # Deploy a PackFive and required supporting contracts (processor, referral, cards, vault) (pack, processor, referrals, cards, vault, pack_r, processor_r, referrals_r, cards_r, vault_r) = pack_deploy() # Set packs for purchase bundle_size = 3 # 3 packs of 5 cards in a bundle num_bundles = 5 tx_hash = pack.functions.setPack(pack_types['Rare'], pack_prices['Rare'], "Rare bundle", "RB", bundle_size, 0).transact({'from': accounts[0]}) rare_bundle = instantiate(get_logs_for_event(pack.events.PackAdded, tx_hash)[0]['args']['bundle'], abi=None, contract="Bundle") processor.functions.setCanSell(rare_bundle.address, True).transact({'from': accounts[0]}) rare_bundle.functions.purchase(num_bundles, accounts[2]).transact({'from': accounts[1], 'value': pack_prices['Rare'] * bundle_size * num_bundles}) assert_tx_failed(rare_bundle.functions.open(num_bundles + 1), {'from': accounts[1]}) assert_tx_failed(rare_bundle.functions.open(2**256 - 1), {'from': accounts[1]}) assert_tx_failed(rare_bundle.functions.open(2**16 - 1), {'from': accounts[1]})
55be114a4a3c24440decb8cc9e79341dec924dba
058f6cf55de8b72a7cdd6e592d40243a91431bde
/tests/llvm/static/test.py
5819067099605c281f435a3dad4f6c284961eb24
[ "Apache-2.0", "LicenseRef-scancode-unknown-license-reference" ]
permissive
LLNL/FPChecker
85e8ebf1d321b3208acee7ddfda2d8878a238535
e665ef0f050316f6bc4dfc64c1f17355403e771b
refs/heads/master
2023-08-30T23:24:43.749418
2022-04-14T19:57:44
2022-04-14T19:57:44
177,033,795
24
6
Apache-2.0
2022-09-19T00:09:50
2019-03-21T22:34:14
Python
UTF-8
Python
false
false
3,694
py
#!/usr/bin/env python import test_config import subprocess import os import sys def main(): print "* Static Tests *" ########################################################################### t = "Test: find instrumentation functions" testTarget = test_config.textWidth.format(t) sys.stdout.write(testTarget) os.chdir(test_config.path + "/test_find_inst_functions/") cmd = ["./test.py"] cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) sys.stdout.write(cmdOutput) os.chdir("../") ########################################################################### ########################################################################### t = "Test: num. fp operations" testTarget = test_config.textWidth.format(t) sys.stdout.write(testTarget) os.chdir(test_config.path + "/test_number_fp_operations/") cmd = ["./test.py"] cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) sys.stdout.write(cmdOutput) os.chdir("../") ########################################################################### ########################################################################### t = "Test: a device function is found" testTarget = test_config.textWidth.format(t) sys.stdout.write(testTarget) os.chdir(test_config.path + "/test_device_func_found/") cmd = ["./test.py"] cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) sys.stdout.write(cmdOutput) os.chdir("../") ########################################################################### ########################################################################### t = "Test: a global function is found" testTarget = test_config.textWidth.format(t) sys.stdout.write(testTarget) os.chdir(test_config.path + "/test_global_func_found/") cmd = ["./test.py"] cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) sys.stdout.write(cmdOutput) os.chdir("../") ########################################################################### ########################################################################### t = "Test: main() is found" testTarget = test_config.textWidth.format(t) sys.stdout.write(testTarget) os.chdir(test_config.path + "/test_main_is_found/") cmd = ["./test.py"] cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) sys.stdout.write(cmdOutput) os.chdir("../") ########################################################################### ########################################################################### t = "Test: global array instrumentation" testTarget = test_config.textWidth.format(t) sys.stdout.write(testTarget) os.chdir(test_config.path + "/test_global_array_instrumentation/") cmd = ["./test.py"] cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) sys.stdout.write(cmdOutput) os.chdir("../") ########################################################################### ########################################################################### t = "Test: correct func are found and instrumented" testTarget = test_config.textWidth.format(t) sys.stdout.write(testTarget) os.chdir(test_config.path + "/test_correct_inst_functions_found/") cmd = ["./test.py"] cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) sys.stdout.write(cmdOutput) os.chdir("../") ########################################################################### main()
ac03c81ca91034b84d892212f2c3f714f8fb0a32
2185c16a9f6564183e86a49942a2ae861bce534c
/IIHT/holland_house_scraper/scraper.py
2af00020f9bea906a35b99bc22677eeb35023511
[]
no_license
patricksile/code_folder
a9c3ebe32f6eed122fb877b955643d8944d8453a
b8f5cce6ea07ed567621e848c7a61ab457f66670
refs/heads/master
2023-01-22T14:06:33.768186
2018-07-25T12:05:44
2018-07-25T12:05:44
142,292,596
0
0
null
2023-01-11T22:20:31
2018-07-25T11:47:20
JavaScript
UTF-8
Python
false
false
2,278
py
# # /usr/bin/env python3.5 # from urllib.parse import urlparse # module to clean links (built-in) # import urllib.parse # from time import sleep # import webbrowser # Open links with a web browser(built-in) # import requests # Downloads files and web pages (external) # import bs4 # Parses HTML (external) # import selenium # Launches and controls a web browser (external) # import time # To insert some delay on actions # import smtplib # To send files through smtp server # import email.mime.text # To send data to an email # # open file with websites raw links # web_file = open('websites.txt','r') # # read object web_file and save in links_file # links_file = web_file.read().split('\n') #adfsdfsdfdsf # # clean_links = [] # object clean_links of the class array # clean_links = [urlparse(line).netloc for line in links_file] # object clean_links of the class array # cities = ['haarlem', 'amsterdam'] # list of cities for test purpose with www.huurwoningen.nl which is the first link in the object list clean_links # # page_download = requests.get("http://%s/in/%s/?min_price=%d&max_price=%d"%("www.huurwoningen.nl", "haarlem", 300, 600)).text # # webbrowser.open("http://www.huurwoningen.nl/in/haarlem/?min_price=300&max_price=600") # # sleep(10) # # page_download = requests.get("https://jobs.jumia.cm/en/jobs-douala/?by=digital+marketer") # # page_download_bs4 = bs4.BeautifulSoup(page_download.text,"lxml") # for link in clean_links[0]: # for city in cities: # https://www.huurwoningen.nl/in/haarlem/?min_price=100&max_price=300 # for max_price in range(500, 601,100): # # # time.sleep(5) # 5seconds sleep or delay # webbrowser.open("https://%s/in/%s/?min_price=%d&max_price=%d"%(link, city, 300, max_price)) # opening each pages in a new tab # # page_download = requests.get("http://%s/in/%s/?min_price=%d&max_price=%d"%(link, city, 300, max_price))# downloading the page in the page_download # # print(page_downlaod) # # page_download.raise_for_status()# http error checking # soup = bs4.BeautifulSoup(page_download.text)# creating a bs4 object for further html parsing # price_link = soup.select('div span .linsting_price')# price link elements to select of object list # for i in range(len(price_link))
4c005a778fac7b075557916fb12e526b31ac3231
3a31504d63065a2bacc4afa473a1a9662534aa7d
/re_sys/views.py
099f774af69209f8512b3fb822f991d39f18e64d
[]
no_license
wuweiwuyanzu/Personalized-recommend
09ca9218e6405e3e71d78cf42cd54ff9398e9040
c48bb53be6623beab8bcecef85354228637dc8ef
refs/heads/master
2020-06-27T04:20:09.560639
2019-05-03T15:43:58
2019-05-03T15:43:58
199,842,889
1
0
null
2019-07-31T11:29:10
2019-07-31T11:29:09
null
UTF-8
Python
false
false
2,310
py
#!/usr/bin/env python #coding=utf-8 from django.shortcuts import render from re_sys.recommend import re_model from re_sys.recommend import utils import time print('----初始化加载模型----') global_model = re_model.Model() global_loaded_graph, global_sess = global_model.loead_sess() # Create your views here. def index(request): return render(request,'index.html') def recommend(request): movie_id = request.GET.get('movie_id') try: if((int(movie_id)<0) | (int(movie_id)>3953)): return render(request,'index.html') except ValueError: return render(request, 'index.html') global global_model model=global_model print('-------正在推荐--------',movie_id) global_loaded_graph, global_sess choice_movie_name,list_same_movies_names,list_pepole_like_movies_names,list_same_movies_ids,list_pepole_like_movies_ids =model.recommend_by_movie(int(movie_id)) print('选择电影:',choice_movie_name) print('相似的电影:',list_same_movies_names) print('喜欢这个电影的人还喜欢:',list_pepole_like_movies_names) list_dict_choice=[] for i in choice_movie_name: # time.sleep(0.2) # 爬虫速度 list_dict_choice.append(utils.movie_dic(i)) list_dict_choice[0]['movie_id']=movie_id # list_dict_choice[0]['title']=choice_movie_name list_dict_same = [] for i in list_same_movies_names[:4]: # time.sleep(0.2) list_dict_same.append(utils.movie_dic(i)) for i in range(len(list_dict_same)): list_dict_same[i]['movie_id']=list_same_movies_ids[i] list_dict_otherlike = [] for i in list_pepole_like_movies_names[:4]: # time.sleep(0.2) list_dict_otherlike.append(utils.movie_dic(i)) for i in range(len(list_dict_otherlike)): list_dict_otherlike[i]['movie_id'] = list_pepole_like_movies_ids[i] #list_dict_otherlike[i]['title'] = list_dict_otherlike[i] print('返回结果') print(list_dict_choice) print(len(list_dict_same)) # print(len(list_dict_otherlike)) context = {} context['list_dict_choice'] = list_dict_choice[:4] context['list_dict_same'] = list_dict_same context['list_dict_otherlike'] = list_dict_otherlike return render(request,'index.html',context)
95e8f2292640c638b8debe67833e96aaa4b6f3e4
c41c0a760d11d384ba2ece1040875d23ee9088a2
/python_stack/django_projects/belt_reviewer/apps/login_registration/views.py
d202873f333c1d28af3a9a841121f16fe717817a
[]
no_license
frednava67/dojo
6a0d6250c992fc6910a539518891f237fcc62f4a
d342bbb474606c0bc019247aeb0212cc4704cd23
refs/heads/master
2018-12-20T02:05:47.798107
2018-11-13T05:34:49
2018-11-13T05:34:49
149,045,555
0
0
null
null
null
null
UTF-8
Python
false
false
3,732
py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.shortcuts import render, HttpResponse, redirect from django.contrib import messages import re, bcrypt from .models import User # the index function is called when root is visited def index(request): print("login_registration/index()") if "user_id" not in request.session: context = { "first_name": "", "last_name": "", "email": "" } if "reg_attempt_failed" in request.session: context = { "first_name": request.session["first_name"], "last_name": request.session["last_name"], "email": request.session["email"] } return render(request, "index.html", context) def process_registration(request): print("login_registration/process_registration()") bFlashMessage = False if request.method == "POST": bFlashMessage = User.objects.basic_validator(request) request.session["first_name"] = request.POST['first_name'] request.session["last_name"] = request.POST['last_name'] request.session["email"] = request.POST['email'] f_name = request.POST['first_name'] l_name = request.POST['last_name'] email = request.POST['email'] pwd = request.POST['password'] request.session["first_name"] = f_name request.session["last_name"] = l_name request.session["email"] = email if bFlashMessage: request.session["reg_attempt_failed"] = True return redirect("/login_registration") else: request.session.clear() pwhash = bcrypt.hashpw(pwd.encode(), bcrypt.gensalt()) new_user = User.objects.create(first_name=f_name, last_name=l_name, email=email, pwhashval=pwhash.decode()) request.session["user_id"] = new_user.id request.session["first_name"] = f_name return redirect("/") def process_login(request): print("login_registration/process_login") if request.method == "POST": loginemail = request.POST['loginemail'] loginpassword = request.POST['loginpassword'] print(loginemail) print(User.objects.all().values()) obj = User.objects.filter(email=loginemail) print("count", obj.count()) i = obj.count() if (i > 0): tempHash = obj[0].pwhashval bPasswordCheck = bcrypt.checkpw(loginpassword.encode(), tempHash.encode()) print("bPasswordCheck", bPasswordCheck) request.session.clear() if (i == 0 or bPasswordCheck != True): request.session["loginemail"] = loginemail messages.error(request, u"You were not able to login.", 'login') return redirect('/') else: request.session["first_name"] = obj[0].first_name request.session["user_id"] = obj[0].id print(request.session["user_id"]) else: request.session["loginemail"] = loginemail messages.error(request, u"You were not able to login.", 'login') return redirect('/') return redirect('/') def reset(request): print("reset()") request.session.clear() return redirect('/') # def runonce(request): # print("runonce()") # #password # badpassword1 = "password" # hash1 = bcrypt.hashpw(badpassword1.encode(), bcrypt.gensalt()) # print(User.objects.create(first_name="Foghorn", last_name="Leghorn", email="[email protected]", pwhashval=hash1.decode())) # response = "Hello, I ran your RUNONCE request!" # return HttpResponse(response)
d5260e5c6f8e6b776dd7948859e56fac6f69d8c5
e63a895b941207285d1ee1e36c5a2bf6bf3ed5bc
/progress.py
1a6229bbba8d161e43f0610f710d6b5cd09e2238
[ "MIT" ]
permissive
SSaeedHoseini/dockerscriptpy
dcea26ee8a743286e849262a03e447b096113c93
f601937d0143bac0124d5b769ff3ea10625a24ab
refs/heads/master
2020-12-09T04:40:40.304676
2020-01-13T08:18:46
2020-01-13T08:18:46
233,195,809
0
0
null
null
null
null
UTF-8
Python
false
false
1,296
py
import shutil def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', autosize = False): """ Call in a loop to create terminal progress bar @params: iteration - Required : current iteration (Int) total - Required : total iterations (Int) prefix - Optional : prefix string (Str) suffix - Optional : suffix string (Str) decimals - Optional : positive number of decimals in percent complete (Int) length - Optional : character length of bar (Int) fill - Optional : bar fill character (Str) autosize - Optional : automatically resize the length of the progress bar to the terminal window (Bool) """ percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total))) styling = '%s |%s| %s%% %s' % (prefix, fill, percent, suffix) if autosize: cols, _ = shutil.get_terminal_size(fallback = (length, 1)) length = cols - len(styling) filledLength = int(length * iteration // total) bar = fill * filledLength + '-' * (length - filledLength) print('\r%s' % styling.replace(fill, bar), end = '\r') # Print New Line on Complete if iteration == total: print()
bb313215f567104597c8c9dfc261320fd344893a
7e90ba580736a1cf03fbeb8461b5b746599f2008
/core/config.py
418f320e38639a791466c7335a7406d02e1fca01
[]
no_license
ppaydd/problem_count
7b455b51707cc0a7dac6a7e10043ae9874ba2e80
cd85d1ca0f877213d505962c627f5f241827ea8c
refs/heads/master
2021-01-12T19:20:02.536722
2016-04-12T06:39:38
2016-04-12T06:39:38
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,056
py
import os headers = { 'User-Agent': '''Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.87 Safari/537.36''', 'Connection': 'keep-alive', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', } # Error Code get_data_failed = -1 match_user_failed = -2 # Maximum Process Number # depend on the number of core MPN = os.cpu_count() # Spide Time Limits TIME = 10 # Each OJ's URL codeforces_url = 'http://codeforces.com/api/user.status?handle={0}&from=1&count=1000000000' hdu_url = 'http://acm.hdu.edu.cn/search.php?field=author&key=' fzu_url = 'http://acm.fzu.edu.cn/user.php?uname=' poj_url = 'http://poj.org/searchuser?key={0}&B1=Search' noj_url = 'https://ac.2333.moe/User/user_list.xhtml?page=' spoj_url = 'http://www.spoj.com/ranks/users/start=' lightoj_login_url = 'http://lightoj.com/login_check.php' lightoj_userlist_url = 'http://lightoj.com/volume_ranklist.php?rank_start=' bzoj_url = 'http://www.lydsy.com/JudgeOnline/userinfo.php?user=' sgu_url = 'http://acm.sgu.ru/teaminfo.php?id=' ural_url = 'http://acm.timus.ru/search.aspx?Str=' zoj_url = 'http://www.icpc.moe/onlinejudge/showRuns.do?contestId=1&search=true&firstId=-1&lastId=-1&problemCode=&handle={0}&idStart=&idEnd=' acdream_url = 'http://acdream.info/user/' nyist_url = 'http://acm.nyist.edu.cn/JudgeOnline/profile.php?userid=' # Corresponding regular expression pattern. # hdu hdu_table_pattern = '<table width="80%" border="0" align="center" cellspacing="2" class=\'TABLE_TEXT\'>([\s\S]*?)</table>' hdu_td_pattern = '<td>([\s\S]*?)</td>' hdu_username_pattern = '<A href="[\s\S]*?">([\s\S]*?)</A>' hdu_ac_number_pattern = '<A href="[\s\S]*?">([\s\S]*?)</A>' hdu_submit_number_pattern = '<A href="[\s\S]*?">([\s\S]*?)</A>' # lightoj lightoj_page_count_pattern = '<a class="user_link" style="color: #c75f3e;" href="[\s\S]*?">([\s\S]*?)</a>' lightoj_tr_data_one_pattern = '<tr class="newone">([\s\S]*?)</tr>' lightoj_tr_data_two_pattern = '<tr class="newtwo">([\s\S]*?)</tr>' lightoj_user_data_two_pattern = '<a class="user_link_newtwo" href="[\s\S]*?">([\s\S]*?)</a>' lightoj_user_data_one_pattern = '<a class="user_link_newone" href="[\s\S]*?">([\s\S]*?)</a>' lightoj_td_data_one_pattern = '<td class="newone" >([\s\S]*?)</td>' lightoj_td_data_two_pattern = '<td class="newtwo" >([\s\S]*?)</td>' # noj noj_page_count_pattern = '<a title="尾页" href="([\s\S]*?)" class="page_a">' noj_td_pattern = '<td style="text-align: center;">([\s\S]*?)</td>' noj_username_pattern = '<a target="_blank" href="[\s\S]*?">([\s\S]*?)</a>' # poj poj_table_pattern = '<table border=1 width=80%>([\s\S]*?)</table>' poj_td_pattern = '<td>([\s\S]*?)</td>' poj_username_pattern = '<a href=[\s\S]*?>([\s\S]*?)</a>' # sgu sgu_table_pattern = '<table width=90% align=center>([\s\S]*?)</table>' sgu_tr_pattern = '<td>([\s\S]*?)</td>' sgu_ac_number_pattern = 'Accepted: ([0-9]*)' #bzoj bzoj_ac_pattern = '<a href=\'[\s\S]*?jresult=4\'>([\s\S]*?)</a>' # zoj zoj_user_pattern = '<td class="runUserName"><a href="([\s\S]*?)"><font color="db6d00">[\s\S]*?</font></a></td>' zoj_div_pattern = '<div id="content_body">([\s\S]*?)</div>' zoj_ac_pattern = '<font color="red" size="4">([\s\S]*?)</font>' # acdream acdream_ul_pattern = '<ul class="user-info">([\s\S]*?)</ul>' acdream_ac_number_pattern = '<a href="[\s\S]*?">([\s\S]*?)</a>' # fzu fzu_ac_number_pattern = '<td>([\d]*?)</td>' # ural ural_table_pattern = '<TABLE WIDTH="100%" CLASS="ranklist">([\s\S]*?)</TABLE>' ural_tr_pattern = '<TR CLASS="content">([\s\S]*?)</TR>' ural_user_pattern = '<A HREF=[\s\S]*>([\s\S]*?)</A>' ural_ac_number_pattern = '<TD>([\d]*)</TD>' # database configuration database = { 'host': 'localhost', 'db': 'problem_count', 'user': 'root', 'password': 'zc87496604', 'charset': 'utf8', } # sql find_data = 'select id, ac_number from {0} where username=\"{1}\"' insert_data = 'insert into {0} (username, ac_number) values (\"{1}\", {2})' update_data = 'update {0} set username=\"{1}\",ac_number={2} where id={3}'
88a8301be9b8c59278ac84736ea6dc73a1306874
260856baeb517cf64a386341f0cfff30c628a987
/5.1.py
808d95fe4de9391f12fd2dce6f10a0dd7849c6e3
[]
no_license
awstnx/vtip
a702d58614128d693fc9f342dc5ce8a4d3b5ecc2
31377b4610004bc8a7813aeca76960f46af8d442
refs/heads/master
2022-12-21T21:49:57.050309
2020-09-29T15:57:57
2020-09-29T15:57:57
295,413,097
0
0
null
null
null
null
UTF-8
Python
false
false
1,062
py
def bmi(mass, height): """Вычисляет индекс массы тела по введенным массе и росту человека""" return (mass/((height/100)**2)) person_mass, person_height = map(float, input('Введите свою массу в килограммах и рост в сантиметрах через пробел: ').split( )) BodyMassIndex = bmi(person_mass, person_height) if BodyMassIndex < 16: print('Выраженный дефицит массы тела') elif 16 <= BodyMassIndex <18.5: print('Недостаточная масса тела') elif 18.5 <= BodyMassIndex < 25: print('Нормальная масса тела') elif 25 <= BodeMassIndex < 30: print('Избыточная масса тела') elif 30 <= BodyMassIndex < 35: print('Ожирение первой степени') elif 35 <= BodyMassIndex < 40: print('Ожирение второй степени') elif BodyMassIndex >= 40: print('Ожирение третьей степени')
71353370837cd21408b171136866a05a30ffa482
0d4ec25fb2819de88a801452f176500ccc269724
/search_sorted_matrx.py
e1e912f49ceb5959ab470dfeaec97c8d36ef1e22
[]
no_license
zopepy/leetcode
7f4213764a6a079f58402892bd0ede0514e06fcf
3bfee704adb1d94efc8e531b732cf06c4f8aef0f
refs/heads/master
2022-01-09T16:13:09.399620
2019-05-29T20:00:11
2019-05-29T20:00:11
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,079
py
class Solution: def searchMatrix(self, matrix, target): """ :type matrix: List[List[int]] :type target: int :rtype: bool """ if matrix == [] or matrix == [[]]: return False l = len(matrix) b = len(matrix[0]) if matrix[0][0] > target or matrix[l-1][b-1] < target: return False left, right = 0, l-1 while left <= right: mid = (left + right) >> 1 if matrix[mid][0] == target: return True elif matrix[mid][0] < target: left = mid + 1 else: right = mid - 1 left -= 1 start, end = 0, b-1 while start <= end: mid = (start + end) >> 1 if matrix[left][mid] == target: return True elif matrix[left][mid] < target: start = mid+1 else: end = mid-1 return False inp = [[1,3,5,7],[10,11,16,20],[23,30,34,50]] print(Solution().searchMatrix(inp, 24))
01b54643ccf3a75120100a24b778a1accb4fb555
bbd603fcd9541ed8168c765ee7c84fc379c6b692
/scripts/e087_bert_question_adamw.py
a39ce51eb9ad183a96f109022c4df0854080f73a
[]
no_license
yoichi-yamakawa/kaggle-google-quest
2513a41889ffdb68e8f3bc3fb55a41a5dd873d0f
decffc69d5657f5114970eb2ea226df8ec8cfaf6
refs/heads/master
2021-01-04T16:58:27.894352
2020-02-11T00:46:04
2020-02-11T00:46:04
null
0
0
null
null
null
null
UTF-8
Python
false
false
11,732
py
import itertools import os import random from logging import getLogger import numpy as np import pandas as pd import torch from sklearn.model_selection import GroupKFold from torch import optim from torch.nn import BCEWithLogitsLoss, DataParallel from torch.utils.data import DataLoader from torch.utils.data.sampler import RandomSampler from tqdm import tqdm from transformers import BertModel, AdamW from refactor.datasets import QUESTDataset from refactor.models import BertModelForBinaryMultiLabelClassifier from refactor.utils import compute_spearmanr, test, train_one_epoch from utils import (load_checkpoint, logInit, parse_args, save_and_clean_for_prediction, save_checkpoint, sel_log, send_line_notification) EXP_ID = os.path.basename(__file__).split('_')[0] MNT_DIR = './mnt' DEVICE = 'cuda' MODEL_PRETRAIN = 'bert-base-uncased' MODEL_CONFIG_PATH = './mnt/datasets/model_configs/bert-model-uncased-config.pkl' TOKENIZER_TYPE = 'bert' TOKENIZER_PRETRAIN = 'bert-base-uncased' BATCH_SIZE = 8 MAX_EPOCH = 6 MAX_SEQ_LEN = 512 T_MAX_LEN = 30 Q_MAX_LEN = 239 * 2 A_MAX_LEN = 239 * 0 DO_LOWER_CASE = True if MODEL_PRETRAIN == 'bert-base-uncased' else False LABEL_COL = [ 'question_asker_intent_understanding', 'question_body_critical', 'question_conversational', 'question_expect_short_answer', 'question_fact_seeking', 'question_has_commonly_accepted_answer', 'question_interestingness_others', 'question_interestingness_self', 'question_multi_intent', 'question_not_really_a_question', 'question_opinion_seeking', 'question_type_choice', 'question_type_compare', 'question_type_consequence', 'question_type_definition', 'question_type_entity', 'question_type_instructions', 'question_type_procedure', 'question_type_reason_explanation', 'question_type_spelling', 'question_well_written', # 'answer_helpful', # 'answer_level_of_information', # 'answer_plausible', # 'answer_relevance', # 'answer_satisfaction', # 'answer_type_instructions', # 'answer_type_procedure', # 'answer_type_reason_explanation', # 'answer_well_written' ] def seed_everything(seed=71): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True seed_everything() def main(args, logger): # trn_df = pd.read_csv(f'{MNT_DIR}/inputs/origin/train.csv') trn_df = pd.read_pickle(f'{MNT_DIR}/inputs/nes_info/trn_df.pkl') trn_df['is_original'] = 1 gkf = GroupKFold( n_splits=5).split( X=trn_df.question_body, groups=trn_df.question_body_le, ) histories = { 'trn_loss': {}, 'val_loss': {}, 'val_metric': {}, 'val_metric_raws': {}, } loaded_fold = -1 loaded_epoch = -1 if args.checkpoint: histories, loaded_fold, loaded_epoch = load_checkpoint(args.checkpoint) fold_best_metrics = [] fold_best_metrics_raws = [] for fold, (trn_idx, val_idx) in enumerate(gkf): if fold < loaded_fold: fold_best_metrics.append(np.max(histories["val_metric"][fold])) fold_best_metrics_raws.append( histories["val_metric_raws"][fold][np.argmax(histories["val_metric"][fold])]) continue sel_log( f' --------------------------- start fold {fold} --------------------------- ', logger) fold_trn_df = trn_df.iloc[trn_idx] # .query('is_original == 1') fold_trn_df = fold_trn_df.drop( ['is_original', 'question_body_le'], axis=1) # use only original row fold_val_df = trn_df.iloc[val_idx].query('is_original == 1') fold_val_df = fold_val_df.drop( ['is_original', 'question_body_le'], axis=1) if args.debug: fold_trn_df = fold_trn_df.sample(100, random_state=71) fold_val_df = fold_val_df.sample(100, random_state=71) temp = pd.Series(list(itertools.chain.from_iterable( fold_trn_df.question_title.apply(lambda x: x.split(' ')) + fold_trn_df.question_body.apply(lambda x: x.split(' ')) + fold_trn_df.answer.apply(lambda x: x.split(' ')) ))).value_counts() tokens = temp[temp >= 10].index.tolist() # tokens = [] tokens = [ 'CAT_TECHNOLOGY'.casefold(), 'CAT_STACKOVERFLOW'.casefold(), 'CAT_CULTURE'.casefold(), 'CAT_SCIENCE'.casefold(), 'CAT_LIFE_ARTS'.casefold(), ] trn_dataset = QUESTDataset( df=fold_trn_df, mode='train', tokens=tokens, augment=[], tokenizer_type=TOKENIZER_TYPE, pretrained_model_name_or_path=TOKENIZER_PRETRAIN, do_lower_case=DO_LOWER_CASE, LABEL_COL=LABEL_COL, t_max_len=T_MAX_LEN, q_max_len=Q_MAX_LEN, a_max_len=A_MAX_LEN, tqa_mode='tq_a', TBSEP='[TBSEP]', pos_id_type='arange', MAX_SEQUENCE_LENGTH=MAX_SEQ_LEN, ) # update token trn_sampler = RandomSampler(data_source=trn_dataset) trn_loader = DataLoader(trn_dataset, batch_size=BATCH_SIZE, sampler=trn_sampler, num_workers=os.cpu_count(), worker_init_fn=lambda x: np.random.seed(), drop_last=True, pin_memory=True) val_dataset = QUESTDataset( df=fold_val_df, mode='valid', tokens=tokens, augment=[], tokenizer_type=TOKENIZER_TYPE, pretrained_model_name_or_path=TOKENIZER_PRETRAIN, do_lower_case=DO_LOWER_CASE, LABEL_COL=LABEL_COL, t_max_len=T_MAX_LEN, q_max_len=Q_MAX_LEN, a_max_len=A_MAX_LEN, tqa_mode='tq_a', TBSEP='[TBSEP]', pos_id_type='arange', MAX_SEQUENCE_LENGTH=MAX_SEQ_LEN, ) val_sampler = RandomSampler(data_source=val_dataset) val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, sampler=val_sampler, num_workers=os.cpu_count(), worker_init_fn=lambda x: np.random.seed(), drop_last=False, pin_memory=True) fobj = BCEWithLogitsLoss() state_dict = BertModel.from_pretrained(MODEL_PRETRAIN).state_dict() model = BertModelForBinaryMultiLabelClassifier(num_labels=len(LABEL_COL), config_path=MODEL_CONFIG_PATH, state_dict=state_dict, token_size=len( trn_dataset.tokenizer), MAX_SEQUENCE_LENGTH=MAX_SEQ_LEN, ) # optimizer = optim.Adam(model.parameters(), lr=3e-5) # optimizer = optim.SGD(model.parameters(), lr=1e-2, momentum=0.9, nesterov=True) optimizer = AdamW(model.parameters(), lr=3e-5, correct_bias=False, eps=1e-7) scheduler = optim.lr_scheduler.CosineAnnealingLR( optimizer, T_max=MAX_EPOCH, eta_min=1e-5) # load checkpoint model, optim, scheduler if args.checkpoint and fold == loaded_fold: load_checkpoint(args.checkpoint, model, optimizer, scheduler) for epoch in tqdm(list(range(MAX_EPOCH))): if fold <= loaded_fold and epoch <= loaded_epoch: continue if epoch < 1: model.freeze_unfreeze_bert(freeze=True, logger=logger) else: model.freeze_unfreeze_bert(freeze=False, logger=logger) model = DataParallel(model) model = model.to(DEVICE) trn_loss = train_one_epoch(model, fobj, optimizer, trn_loader, DEVICE) val_loss, val_metric, val_metric_raws, val_y_preds, val_y_trues, val_qa_ids = test( model, fobj, val_loader, DEVICE, mode='valid') scheduler.step() if fold in histories['trn_loss']: histories['trn_loss'][fold].append(trn_loss) else: histories['trn_loss'][fold] = [trn_loss, ] if fold in histories['val_loss']: histories['val_loss'][fold].append(val_loss) else: histories['val_loss'][fold] = [val_loss, ] if fold in histories['val_metric']: histories['val_metric'][fold].append(val_metric) else: histories['val_metric'][fold] = [val_metric, ] if fold in histories['val_metric_raws']: histories['val_metric_raws'][fold].append(val_metric_raws) else: histories['val_metric_raws'][fold] = [val_metric_raws, ] logging_val_metric_raws = '' for val_metric_raw in val_metric_raws: logging_val_metric_raws += f'{float(val_metric_raw):.4f}, ' sel_log( f'fold : {fold} -- epoch : {epoch} -- ' f'trn_loss : {float(trn_loss.detach().to("cpu").numpy()):.4f} -- ' f'val_loss : {float(val_loss.detach().to("cpu").numpy()):.4f} -- ' f'val_metric : {float(val_metric):.4f} -- ' f'val_metric_raws : {logging_val_metric_raws}', logger) model = model.to('cpu') model = model.module save_checkpoint( f'{MNT_DIR}/checkpoints/{EXP_ID}/{fold}', model, optimizer, scheduler, histories, val_y_preds, val_y_trues, val_qa_ids, fold, epoch, val_loss, val_metric) fold_best_metrics.append(np.max(histories["val_metric"][fold])) fold_best_metrics_raws.append( histories["val_metric_raws"][fold][np.argmax(histories["val_metric"][fold])]) save_and_clean_for_prediction( f'{MNT_DIR}/checkpoints/{EXP_ID}/{fold}', trn_dataset.tokenizer, clean=False) del model # calc training stats fold_best_metric_mean = np.mean(fold_best_metrics) fold_best_metric_std = np.std(fold_best_metrics) fold_stats = f'{EXP_ID} : {fold_best_metric_mean:.4f} +- {fold_best_metric_std:.4f}' sel_log(fold_stats, logger) send_line_notification(fold_stats) fold_best_metrics_raws_mean = np.mean(fold_best_metrics_raws, axis=0) fold_raw_stats = '' for metric_stats_raw in fold_best_metrics_raws_mean: fold_raw_stats += f'{float(metric_stats_raw):.4f},' sel_log(fold_raw_stats, logger) send_line_notification(fold_raw_stats) sel_log('now saving best checkpoints...', logger) if __name__ == '__main__': args = parse_args(None) log_file = f'{EXP_ID}.log' logger = getLogger(__name__) logger = logInit(logger, f'{MNT_DIR}/logs/', log_file) sel_log(f'args: {sorted(vars(args).items())}', logger) # send_line_notification(f' ------------- start {EXP_ID} ------------- ') main(args, logger)
c0568f477ed273def386782acf8cb794c81ac227
1bab2b06c7cc813c0eff23e63783859ae60c7e73
/ex9.py
d20d92d79f6a5f414212ea1548a5b228af2365b3
[]
no_license
asimkaleem/LPTHW
a19681e9927817034a8fd18c9f3e037766f59648
f565e2b1581d0453f83d3e40a0e0bb65d2bb000b
refs/heads/master
2021-09-10T15:01:27.357864
2018-03-28T06:52:54
2018-03-28T06:52:54
126,018,322
0
0
null
null
null
null
UTF-8
Python
false
false
378
py
# Here's some new strange stuff, remember type it exactly. days = "Mon Tue Wed Thu Fri Sat Sun" months = "Jan\nFeb\nMar\nApr\nMay\nJun\nJul\nAug" print "Here are the days: ", days print "Here are the months: ", months print """ There's something going on here. With the three double- quotes. We'll be able to type as much as we like. Even 4 lines if we want, or 5, or 6. """
7eb4211874de5cb57b04a4c673199d8f475ebd62
77b4bb15ed2cd7d4db07f0098ff1a6638790c3d8
/tests/test_mongo_controller_auto_increment.py
1db6cc8280a3b5577927397de3c49a908b11b4fa
[ "MIT" ]
permissive
Simon-Le/layabase
d10276d1dc37a4d36b39aad89b0bc00d818ae2d7
4670a3d0849785e22b80a88af634c69220cf0113
refs/heads/develop
2020-12-03T15:03:08.352875
2020-04-13T23:59:33
2020-04-13T23:59:33
231,363,516
0
0
MIT
2020-04-13T20:04:17
2020-01-02T10:58:46
null
UTF-8
Python
false
false
10,614
py
import enum import flask import flask_restplus import pytest from layaberr import ValidationFailed import layabase import layabase.mongo class EnumTest(enum.Enum): Value1 = 1 Value2 = 2 @pytest.fixture def controller(): class TestCollection: __collection_name__ = "test" key = layabase.mongo.Column( int, is_primary_key=True, should_auto_increment=True ) enum_field = layabase.mongo.Column( EnumTest, is_nullable=False, description="Test Documentation" ) optional_with_default = layabase.mongo.Column(str, default_value="Test value") controller = layabase.CRUDController(TestCollection) layabase.load("mongomock", [controller]) return controller @pytest.fixture def app(controller): application = flask.Flask(__name__) application.testing = True api = flask_restplus.Api(application) namespace = api.namespace("Test", path="/") controller.namespace(namespace) @namespace.route("/test") class TestResource(flask_restplus.Resource): @namespace.expect(controller.query_get_parser) @namespace.marshal_with(controller.get_response_model) def get(self): return [] @namespace.expect(controller.json_post_model) def post(self): return [] @namespace.expect(controller.json_put_model) def put(self): return [] @namespace.expect(controller.query_delete_parser) def delete(self): return [] return application def test_post_with_specified_incremented_field_is_ignored_and_valid(client, controller): assert controller.post({"key": "my_key", "enum_field": "Value1"}) == { "optional_with_default": "Test value", "key": 1, "enum_field": "Value1", } def test_post_with_enum_is_valid(client, controller): assert controller.post({"key": "my_key", "enum_field": EnumTest.Value1}) == { "optional_with_default": "Test value", "key": 1, "enum_field": "Value1", } def test_post_with_invalid_enum_choice_is_invalid(client, controller): with pytest.raises(ValidationFailed) as exception_info: controller.post({"key": "my_key", "enum_field": "InvalidValue"}) assert exception_info.value.errors == { "enum_field": ["Value \"InvalidValue\" is not within ['Value1', 'Value2']."] } assert exception_info.value.received_data == {"enum_field": "InvalidValue"} def test_post_many_with_specified_incremented_field_is_ignored_and_valid( client, controller ): assert controller.post_many( [ {"key": "my_key", "enum_field": "Value1"}, {"key": "my_key", "enum_field": "Value2"}, ] ) == [ {"optional_with_default": "Test value", "enum_field": "Value1", "key": 1}, {"optional_with_default": "Test value", "enum_field": "Value2", "key": 2}, ] def test_open_api_definition(client): response = client.get("/swagger.json") assert response.json == { "swagger": "2.0", "basePath": "/", "paths": { "/test": { "post": { "responses": {"200": {"description": "Success"}}, "operationId": "post_test_resource", "parameters": [ { "name": "payload", "required": True, "in": "body", "schema": { "$ref": "#/definitions/TestCollection_PostRequestModel" }, } ], "tags": ["Test"], }, "put": { "responses": {"200": {"description": "Success"}}, "operationId": "put_test_resource", "parameters": [ { "name": "payload", "required": True, "in": "body", "schema": { "$ref": "#/definitions/TestCollection_PutRequestModel" }, } ], "tags": ["Test"], }, "delete": { "responses": {"200": {"description": "Success"}}, "operationId": "delete_test_resource", "parameters": [ { "name": "key", "in": "query", "type": "array", "items": {"type": "integer"}, "collectionFormat": "multi", }, { "name": "enum_field", "in": "query", "type": "array", "items": {"type": "string"}, "collectionFormat": "multi", }, { "name": "optional_with_default", "in": "query", "type": "array", "items": {"type": "string"}, "collectionFormat": "multi", }, ], "tags": ["Test"], }, "get": { "responses": { "200": { "description": "Success", "schema": { "$ref": "#/definitions/TestCollection_GetResponseModel" }, } }, "operationId": "get_test_resource", "parameters": [ { "name": "key", "in": "query", "type": "array", "items": {"type": "integer"}, "collectionFormat": "multi", }, { "name": "enum_field", "in": "query", "type": "array", "items": {"type": "string"}, "collectionFormat": "multi", }, { "name": "optional_with_default", "in": "query", "type": "array", "items": {"type": "string"}, "collectionFormat": "multi", }, { "name": "limit", "in": "query", "type": "integer", "minimum": 0, "exclusiveMinimum": True, }, { "name": "offset", "in": "query", "type": "integer", "minimum": 0, }, { "name": "X-Fields", "in": "header", "type": "string", "format": "mask", "description": "An optional fields mask", }, ], "tags": ["Test"], }, } }, "info": {"title": "API", "version": "1.0"}, "produces": ["application/json"], "consumes": ["application/json"], "tags": [{"name": "Test"}], "definitions": { "TestCollection_PostRequestModel": { "properties": { "enum_field": { "type": "string", "description": "Test Documentation", "readOnly": False, "example": "Value1", "enum": ["Value1", "Value2"], }, "key": {"type": "integer", "readOnly": True, "example": 1}, "optional_with_default": { "type": "string", "readOnly": False, "default": "Test value", "example": "Test value", }, }, "type": "object", }, "TestCollection_PutRequestModel": { "properties": { "enum_field": { "type": "string", "description": "Test Documentation", "readOnly": False, "example": "Value1", "enum": ["Value1", "Value2"], }, "key": {"type": "integer", "readOnly": True, "example": 1}, "optional_with_default": { "type": "string", "readOnly": False, "default": "Test value", "example": "Test value", }, }, "type": "object", }, "TestCollection_GetResponseModel": { "properties": { "enum_field": { "type": "string", "description": "Test Documentation", "readOnly": False, "example": "Value1", "enum": ["Value1", "Value2"], }, "key": {"type": "integer", "readOnly": True, "example": 1}, "optional_with_default": { "type": "string", "readOnly": False, "default": "Test value", "example": "Test value", }, }, "type": "object", }, }, "responses": { "ParseError": {"description": "When a mask can't be parsed"}, "MaskError": {"description": "When any error occurs on mask"}, }, }
08d8980c21d8013ca60ccf34189734c1caf085e7
efa5d0866a8a0aa9dd9dde27f0d9d9c1c9f551c1
/setup.py
64e766a5d40a628bcba3f62dc0b975d579775bed
[ "Apache-2.0" ]
permissive
miketwo/pyschedule
7fef3dfb77259ae34a46535292bc6b40cbbfb1c8
792305faae9d0413ed22e7c57d5e9610fded7751
refs/heads/master
2021-01-10T22:22:09.056650
2016-06-04T23:54:25
2016-06-04T23:54:25
60,433,964
0
0
null
2016-06-04T23:02:41
2016-06-04T23:02:37
Python
UTF-8
Python
false
false
596
py
from setuptools import setup, find_packages setup(name='pyschedule', version='0.2.13', description='A python package to formulate and solve resource-constrained scheduling problems: flow- and job-shop, travelling salesman, vehicle routing and all kind of combinations', url='https://github.com/timnon/pyschedule', author='Tim Nonner', author_email='[email protected]', license='Apache 2.0', packages=['pyschedule','pyschedule.solvers','pyschedule.plotters'], package_dir={'':'src'}, include_package_data=True, install_requires=['pulp'])
bc481205615d5df87eba3418a666917f5ec8ce66
157c7325539a713b35bb418913303d5a9036ac56
/vision_cv_google.py
cb616651c4d4c26a66226f97ac3f40c01f4bc0f1
[ "MIT" ]
permissive
konsan1101/pycv3
f06ee37a636a6499463a286a64f9b1ccf47310c5
12688afb54a133f8706df2da9c7d3e34d1e70590
refs/heads/master
2020-06-03T11:44:49.664856
2019-06-12T13:36:17
2019-06-12T13:36:17
191,554,751
0
0
null
null
null
null
UTF-8
Python
false
false
4,506
py
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import numpy as np import cv2 import base64 from requests import Request, Session #from bs4 import BeautifulSoup import json # Google #GOOGLE_VISION_KEY = 'xx' GOOGLE_VISION_KEY = 'xx' def google_vision(image_path): global GOOGLE_VISION_KEY bin_image = open(image_path, 'rb').read() #enc_image = base64.b64encode(bin_image) enc_image = base64.b64encode(bin_image).decode("utf-8") str_url = "https://vision.googleapis.com/v1/images:annotate?key=" str_headers = {'Content-Type': 'application/json'} str_json_data = { 'requests': [ { 'image': { 'content': enc_image }, 'features': [ { 'type': "LABEL_DETECTION", 'maxResults': 10 }, { 'type': "TEXT_DETECTION", 'maxResults': 10 } ] } ] } #print("begin request") obj_session = Session() obj_request = Request("POST", str_url + GOOGLE_VISION_KEY, data=json.dumps(str_json_data), headers=str_headers ) obj_prepped = obj_session.prepare_request(obj_request) obj_response = obj_session.send(obj_prepped, verify=True, timeout=60 ) #print("end request") if obj_response.status_code == 200: print (obj_response.text) #with open('data.json', 'w') as outfile: # json.dump(obj_response.text, outfile) return obj_response.text else: print (obj_response.text) return "error" if __name__ == '__main__': print("main init") img = "CaptureImage.jpg" txt = "CaptureText.txt" lng = "ja" if len(sys.argv)>=2: img = sys.argv[1] if len(sys.argv)>=3: txt = sys.argv[2] if len(sys.argv)>=4: lng = sys.argv[3] print("main image proc") image_img = cv2.imread(img, cv2.IMREAD_UNCHANGED) if len(image_img.shape) == 3: image_height, image_width, image_channels = image_img.shape[:3] else: image_height, image_width = image_img.shape[:2] image_channels = 1 #if (img=='Test_Image_1.jpg' or img=='CaptureName.jpg') and image_channels == 3: # temp_img = np.zeros((image_height*2,image_width*2,3), np.uint8) # cv2.rectangle(temp_img,(0,0),(image_width*2,image_height*2),(255,255,255),-1) # temp_img[0+image_height/2:image_height/2+image_height, 0+image_width/2:image_width/2+image_width] = image_img.copy() # image_img = cv2.resize(temp_img, (image_width, image_height)) if image_channels != 1: gray_img = cv2.cvtColor(image_img, cv2.COLOR_BGR2GRAY) else: gray_img = image_img.copy() #hist_img = cv2.equalizeHist(gray_img) #blur_img = cv2.blur(gray_img, (3,3), 0) _, thresh_img = cv2.threshold(gray_img, 140, 255, cv2.THRESH_BINARY) temp_img = image_img.copy() #temp_img = gray_img.copy() #temp_img = thresh_img.copy() cv2.imwrite("temp/@" + img, temp_img) #cv2.imshow("Base", temp_img) #cv2.waitKey(0) #cv2.destroyAllWindows() #jpg_parm = [int(cv2.IMWRITE_JPEG_QUALITY), 90] #_, img_data = cv2.imencode('.jpg', temp_img, jpg_parm) #img_data64 = base64.b64encode(img_data) img_data = open("temp/@" + img, 'rb') print("main Google AI") res = google_vision("temp/@" + img) print( "" ) print( res ) print( "" ) js = json.loads(res) data = js["responses"] #print(data) #print( json.dumps( data, sort_keys = True, indent = 4) ) try: print( "" ) s = "[ LABEL_DETECTION ]" f = open(txt, 'w') print( s ) f.writelines( s ) for t in data: for d in t["labelAnnotations"]: print( str(d["description"]) ) f.writelines( str(d["description"]) ) except: pass finally: f.close() print( "" ) print("main Bye!") print( "" )
bd3182dba60a1531c8779781ab6bbc9f1099b12c
fd7a2c2265363dc06c9e23c1ce3182bb99be6b70
/ingest/ingest_dx_tomo.py
20d4fce4584a768b2270fa93933601f5ba776895
[]
no_license
als-computing/scicatlive-modifications
093d03e0027c30173c1140139b6dd708dbfc1fee
5186511fc1554a1c6d349e14ddd72cf41d1ac1b4
refs/heads/main
2023-03-05T05:51:03.878037
2021-02-22T20:03:07
2021-02-22T20:03:07
338,426,336
0
0
null
null
null
null
UTF-8
Python
false
false
11,781
py
import h5py import json import sys import datetime import hashlib import urllib import base64 import logging import json # for easy parsing from pathlib import Path from pprint import pprint import numpy as np import requests # for HTTP requests from splash_ingest.ingestors import MappedHD5Ingestor from splash_ingest.model import Mapping import h5py class ScicatTomo(object): # settables host = "noether.lbl.gov" # baseurl = "http://" + host + "/api/v3/" # timeouts = (4, 8) # we are hitting a transmission timeout... timeouts = None # we are hitting a transmission timeout... sslVerify = False # do not check certificate username="ingestor" # default username password="aman" # default password # You should see a nice, but abbreviated table here with the logbook contents. token = None # store token here settables = ['host', 'baseurl', 'timeouts', 'sslVerify', 'username', 'password', 'token'] pid = None # gets set if you search for something entries = None # gets set if you search for something datasetType = "RawDatasets" datasetTypes = ["RawDatasets", "DerivedDatasets", "Proposals"] def __init__(self, **kwargs): # nothing to do for key, value in kwargs.items(): assert key in self.settables, f"key {key} is not a valid input argument" setattr(self, key, value) # get token self.token = self.get_token(username=self.username, password=self.password) def get_token(self, username=None, password=None): if username is None: username = self.username if password is None: password = self.password """logs in using the provided username / password combination and receives token for further communication use""" logging.info("Getting new token ...") response = requests.post( self.baseurl + "Users/login", json={"username": username, "password": password}, timeout=self.timeouts, stream=False, verify=self.sslVerify, ) if not response.ok: logging.error(f'** Error received: {response}') err = response.json()["error"] logging.error(f'{err["name"]}, {err["statusCode"]}: {err["message"]}') sys.exit(1) # does not make sense to continue here data = response.json() logging.error(f"Response: {data}") data = response.json() # print("Response:", data) token = data["id"] # not sure if semantically correct logging.info(f"token: {token}") self.token = token # store new token return token def send_to_scicat(self, url, dataDict = None, cmd="post"): """ sends a command to the SciCat API server using url and token, returns the response JSON Get token with the getToken method""" if cmd == "post": response = requests.post( url, params={"access_token": self.token}, json=dataDict, timeout=self.timeouts, stream=False, verify=self.sslVerify, ) elif cmd == "delete": response = requests.delete( url, params={"access_token": self.token}, timeout=self.timeouts, stream=False, verify=self.sslVerify, ) elif cmd == "get": response = requests.get( url, params={"access_token": self.token}, json=dataDict, timeout=self.timeouts, stream=False, verify=self.sslVerify, ) elif cmd == "patch": response = requests.patch( url, params={"access_token": self.token}, json=dataDict, timeout=self.timeouts, stream=False, verify=self.sslVerify, ) rdata = response.json() if not response.ok: err = response.json()["error"] logging.error(f'{err["name"]}, {err["statusCode"]}: {err["message"]}') logging.error("returning...") rdata = response.json() logging.error(f"Response: {json.dumps(rdata, indent=4)}") return rdata def get_file_size_from_path_obj(self, pathobj): filesize = pathobj.lstat().st_size return filesize def getFileChecksumFromPathObj(self, pathobj): with open(pathobj) as file_to_check: # pipe contents of the file through return hashlib.md5(file_to_check.read()).hexdigest() def clear_previous_attachments(self, datasetId, datasetType): # remove previous entries to avoid tons of attachments to a particular dataset. # todo: needs appropriate permissions! self.get_entries(url = self.baseurl + "Attachments", whereDict = {"datasetId": str(datasetId)}) for entry in self.entries: url = self.baseurl + f"Attachments/{urllib.parse.quote_plus(entry['id'])}" self.send_to_scicat(url, {}, cmd="delete") def add_data_block(self, datasetId = None, filename = None, datasetType="RawDatasets", clearPrevious = False): if clearPrevious: self.clear_previous_attachments(datasetId, datasetType) dataBlock = { # "id": pid, "size": self.get_file_size_from_path_obj(filename), "dataFileList": [ { "path": str(filename.absolute()), "size": self.get_file_size_from_path_obj(filename), "time": self.getFileModTimeFromPathObj(filename), "chk": "", # do not do remote: getFileChecksumFromPathObj(filename) "uid": str( filename.stat().st_uid ), # not implemented on windows: filename.owner(), "gid": str(filename.stat().st_gid), "perm": str(filename.stat().st_mode), } ], "ownerGroup": "BAM 6.5", "accessGroups": ["BAM", "BAM 6.5"], "createdBy": "datasetUpload", "updatedBy": "datasetUpload", "datasetId": datasetId, "updatedAt": datetime.datetime.isoformat(datetime.datetime.utcnow()) + "Z", "createdAt": datetime.datetime.isoformat(datetime.datetime.utcnow()) + "Z", # "createdAt": "", # "updatedAt": "" } url = self.baseurl + f"{datasetType}/{urllib.parse.quote_plus(datasetId)}/origdatablocks" logging.debug(url) resp = self.send_to_scicat(url, dataBlock) return resp def get_entries(self, url, whereDict = {}): # gets the complete response when searching for a particular entry based on a dictionary of keyword-value pairs resp = self.send_to_scicat(url, {"filter": {"where": whereDict}}, cmd="get") self.entries = resp return resp def get_pid(self, url, whereDict = {}, returnIfNone=0, returnField = 'pid'): # returns only the (first matching) pid (or proposalId in case of proposals) matching a given search request resp = self.get_entries(url, whereDict) if resp == []: # no raw dataset available pid = returnIfNone else: pid = resp[0][returnField] self.pid = pid return pid def add_thumbnail(self, datasetId = None, filename = None, datasetType="RawDatasets", clearPrevious = False): if clearPrevious: self.clear_previous_attachments(datasetId, datasetType) def encodeImageToThumbnail(filename, imType = 'jpg'): header = "data:image/{imType};base64,".format(imType=imType) with open(filename, 'rb') as f: data = f.read() dataBytes = base64.b64encode(data) dataStr = dataBytes.decode('UTF-8') return header + dataStr dataBlock = { "caption": filename.stem, "thumbnail" : encodeImageToThumbnail(filename), "datasetId": datasetId, "ownerGroup": "BAM 6.5", } url = self.baseurl + f"{datasetType}/{urllib.parse.quote_plus(datasetId)}/attachments" logging.debug(url) resp = requests.post( url, params={"access_token": self.token}, timeout=self.timeouts, stream=False, json = dataBlock, verify=self.sslVerify, ) return resp def doRaw(self, scm: ScicatTomo, file_name, run_start, thumbnail=None): # scb = self.scb # for convenience # year, datasetName, lbEntry = self.getLbEntryFromFileName(filename) # # this sets scs.year, scs.datasetName, scs.lbEntry # logging.info(f" working on {filename}") # sciMeta = scb.h5GetDict(filename, sciMetadataKeyDict) # if str(lbEntry.sampleid).startswith("{}".format(year)): # sampleId = str(lbEntry.sampleid) # else: # sampleId = scb.h5Get(filename, "/entry1/sample/name") # # see if entry exists: # pid = scb.getPid( # changed from "RawDatasets" to "datasets" which should be agnostic # scb.baseurl + "datasets", {"datasetName": datasetName}, returnIfNone=0 # ) # if (pid != 0) and self.deleteExisting: # # delete offending item # url = scb.baseurl + "RawDatasets/{id}".format(id=urllib.parse.quote_plus(pid)) # scb.sendToSciCat(url, {}, cmd="delete") # pid = 0 data = { # model for the raw datasets as defined in the RawDatasets "owner": None, "contactEmail": "[email protected]", "createdBy": self.username, "updatedBy": self.username, "creationLocation": "SAXS002", "creationTime": None, "updatedAt": datetime.datetime.isoformat(datetime.datetime.utcnow()) + "Z", # "createdAt": datetime.datetime.isoformat(datetime.datetime.utcnow()) + "Z", # "creationTime": h5Get(filename, "/processed/process/date"), "datasetName": datasetName, "type": "raw", "instrumentId": "SAXS002", "ownerGroup": "BAM 6.5", "accessGroups": ["BAM", "BAM 6.5"], "proposalId": str(lbEntry.proposal), "dataFormat": "NeXus", "principalInvestigator": scb.h5Get(filename, "/entry1/sample/sampleowner"), "pid": pid, "size": 0, "sourceFolder": filename.parent.as_posix(), "size": scb.getFileSizeFromPathObj(filename), "scientificMetadata": sciMeta, "sampleId": str(sampleId), } urlAdd = "RawDatasets" # determine thumbnail: # upload if thumbnail.exists(): npid = self.uploadBit(pid = pid, urlAdd = urlAdd, data = data, attachFile = thumbnail) logging.info("* * * * adding datablock") self.scb.addDataBlock(npid, file_name, datasetType='datasets', clearPrevious=False) def gen_ev_docs(scm: ScicatTomo): with open('/home/dylan/work/als-computing/splash-ingest/.scratch/832Mapping.json', 'r') as json_file: data = json.load(json_file) map = Mapping(**data) with h5py.File('/home/dylan/data/beamlines/als832/20210204_172932_ddd.h5', 'r') as h5_file: ingestor = MappedHD5Ingestor( map, h5_file, 'root', '/home/dylan/data/beamlines/als832/thumbs') for name, doc in ingestor.generate_docstream(): if 'start' in name: doRaw(doc, scm) if 'descriptor' in name: pprint(doc) scm = ScicatTomo() gen_ev_docs(scm)
cb376017989fd0dd30b31b43274c50aed7951e85
e36c42157b6eb5c5e951d5a56b717ce2edf682fc
/session_server/common.py
d0628b325e47a3fe69c25f95dde05f3f944e113b
[]
no_license
parkchansoo/pamisol_temporary
253cfd9cc972c3b26463207f765aa4f9004cd5e3
6fd0f92fd6988dc6a005435ef6548c02397dcf96
refs/heads/master
2022-12-13T12:23:33.975379
2018-02-15T02:40:02
2018-02-15T02:40:02
121,584,090
0
0
null
2022-12-08T00:51:43
2018-02-15T02:28:01
Python
UTF-8
Python
false
false
1,316
py
status_code ={ "REGISTER_SUCCESS": { "code": 1000, "msg": "Register Success", }, "REGISTER_FAILURE": { "code": 1001, "msg": "Register Failure", }, "LOGIN_SUCCESS": { "code": 1010, "msg": "Login Success", }, "LOGIN_FAILURE": { "code": 1011, "msg": "Login Failure", }, "LOGOUT_SUCCESS": { "code": 1020, "msg": "Logout Success", }, "LOGOUT_FAILURE": { "code": 1021, "msg": "Logout Failure", }, "SAVE_TOKEN_SUCCESS":{ "code": 1030, "msg": "Token save Success" }, "SAVE_TOKEN_FAILURE":{ "code": 1031, "msg": "Token save Failure" }, "VERIFY_TOKEN_SUCCESS":{ "code": 1040, "msg": "Token Verification Success" }, "VERIFY_TOKEN_FAILURE":{ "code": 1041, "msg": "Token Verification Failure" }, "EXPIRE_TOKEN_SUCCESS":{ "code": 1050, "msg": "Token Expiration Success" }, "EXPIRE_TOKEN_FAILURE":{ "code": 1051, "msg": "Token Expiration Failure" }, "AUTH_SUCCESS":{ "code": 1060, "msg": "Authentication Success" }, "AUTH_FAILURE":{ "code": 1061, "msg": "Authentication Failure" } }
20e4455b062aaabbef8f56ad00ac2e90b32e6512
e6f0ee76b3b98407fae4ac736100d50dff94c3f1
/SimpleERP/ERP/migrations/0014_auto_20180422_1314.py
9861ae49fa0d04baeb80a4f848e1d0966426b72a
[]
no_license
skth5199/SimpleERP
f5e4835f578fdb82f0f50fdce1e2198f00b062ab
9ca5192d2c88e897474ca5fa326897eba4ef1e2f
refs/heads/master
2022-05-18T00:14:38.341656
2022-04-25T19:31:57
2022-04-25T19:31:57
134,972,882
0
0
null
null
null
null
UTF-8
Python
false
false
2,169
py
# Generated by Django 2.0 on 2018-04-22 13:14 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('ERP', '0013_auto_20180422_1253'), ] operations = [ migrations.RenameField( model_name='price', old_name='tax_amount', new_name='buying_tax_amount', ), migrations.RenameField( model_name='pricelog', old_name='tax_amount', new_name='buying_tax_amount', ), migrations.RemoveField( model_name='price', name='tax_group', ), migrations.RemoveField( model_name='pricelog', name='tax_group', ), migrations.AddField( model_name='price', name='buying_tax_group', field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='Price_buying_tax_group', to='ERP.TaxGroup'), ), migrations.AddField( model_name='price', name='selling_tax_amount', field=models.FloatField(default=0), ), migrations.AddField( model_name='price', name='selling_tax_group', field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='Price_selling_tax_group', to='ERP.TaxGroup'), ), migrations.AddField( model_name='pricelog', name='buying_tax_group', field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='PriceLog_buying_tax_group', to='ERP.TaxGroup'), ), migrations.AddField( model_name='pricelog', name='selling_tax_amount', field=models.FloatField(default=0), ), migrations.AddField( model_name='pricelog', name='selling_tax_group', field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='PriceLog_selling_tax_group', to='ERP.TaxGroup'), ), ]
177938fe0a876bb495853e00039504169e457847
726ce8dddbb12af1662e002633bfe538ddf77708
/BCPy2000-33960-py2.5.egg/BCPy2000/VisionEggRenderer.py
996063aed6e9ce75dc545a4a40ab94a7daccdace
[]
no_license
bopopescu/BCPy2000-1
f9264bb020ba734be0bcc8e8173d2746b0f17eeb
0f877075a846d17e7593222628e9fe49ab863039
refs/heads/master
2022-11-26T07:58:03.493727
2019-06-02T20:25:58
2019-06-02T20:25:58
282,195,357
0
0
null
2020-07-24T10:52:24
2020-07-24T10:52:24
null
UTF-8
Python
false
false
13,833
py
# -*- coding: utf-8 -*- # # $Id: VisionEggRenderer.py 3328 2011-06-18 02:17:13Z jhill $ # # This file is part of the BCPy2000 framework, a Python framework for # implementing modules that run on top of the BCI2000 <http://bci2000.org/> # platform, for the purpose of realtime biosignal processing. # # Copyright (C) 2007-11 Jeremy Hill, Thomas Schreiner, # Christian Puzicha, Jason Farquhar # # [email protected] # # The BCPy2000 framework is free software: you can redistribute it # and/or modify it under the terms of the GNU General Public License # as published by the Free Software Foundation, either version 3 of # the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # __all__ = ['Text', 'Block', 'Disc', 'ImageStimulus'] import os,sys import pygame #; pygame.init() import logging import VisionEgg.Core import VisionEgg.Text import VisionEgg.WrappedText try: from BCI2000PythonApplication import BciGenericRenderer,BciStimulus # development copy except: from BCPy2000.GenericApplication import BciGenericRenderer,BciStimulus # installed copy ################################################################# ################################################################# def delegate_getattr(self, v, key): p = getattr(v, 'parameters', None) if p != None and hasattr(p, key): return True,getattr(p, key) if v != None and hasattr(v, key): return True,getattr(v, key) return False,None ################################################################# def delegate_setattr(self, v, key, value, restrict_to=None): p = getattr(v, 'parameters', None) if p != None and hasattr(p, key): if restrict_to != None and not 'parameters.'+key in restrict_to: raise AttributeError, "the '%s' attribute is read-only" % key setattr(p, key, value) return True if v != None and hasattr(v, key): if restrict_to != None and not key in restrict_to: raise AttributeError, "the '%s' attribute is read-only" % key setattr(p, key, value) return True return False ################################################################# class VisionEggRenderer(BciGenericRenderer): """ This is a subclass of BciGenericRenderer that renders stimuli via the VisionEgg package (which is based on pygame and PyOpenGL) and polls for mouse and keyboard events via pygame. The VisionEggRenderer is our default implementation, but you can implement other renderers (see the documentation for the BciGenericRenderer class). The object wraps a number of VisionEgg instances including a Screen and a Viewport, but it behaves most like a Screen --- indeed any attributes of the underlying VisionEgg.Core.Screen instance, and its .parameters, are also accessible directly as if they were attributes of this wrapper object. In particular, the following attributes (only accessible after the window has opened) are most useful: .size (read-only) contains the window's (width,height) in pixels. .bgcolor is used to get and set the background colour of the window. .color is an alias for bgcolor. """### ############################################################# def __init__(self): self.__dict__['_frame_timer'] = None self.__dict__['_viewport'] = None self.__dict__['_screen'] = None self.__dict__['monofont'] = self.findfont(('lucida console', 'monaco', 'monospace', 'courier new', 'courier')) # default config settings (can be changed in self.Preflight): VisionEgg.config.VISIONEGG_MAX_PRIORITY = 0 VisionEgg.config.VISIONEGG_HIDE_MOUSE = 0 VisionEgg.config.VISIONEGG_GUI_INIT = 0 VisionEgg.config.VISIONEGG_FULLSCREEN = 0 VisionEgg.config.VISIONEGG_FRAMELESS_WINDOW = 1 VisionEgg.config.VISIONEGG_LOG_FILE = None #VisionEgg.start_default_logging() ############################################################# def use_frame_timer(self, setting=True, renew=False): if setting: if renew or not self._frame_timer: self._frame_timer = VisionEgg.Core.FrameTimer() else: self._frame_timer = None ############################################################# def findfont(self, fontnames): """ Tries to find a system font file corresponding to one of the supplied list of names. Returns None if no match is found. """### def matchfont(fontname): bold = italic = False for i in range(0,1): if fontname.lower().endswith(' italic'): italic = True; fontname = fontname[:-len(' italic')] if fontname.lower().endswith(' bold'): bold = True; fontname = fontname[:-len(' bold')] return pygame.font.match_font(fontname, bold=int(bold), italic=int(italic)) if not isinstance(fontnames, (list,tuple)): fontnames = [fontnames] fontnames = [f for f in fontnames if f != None] f = (filter(None, map(matchfont, fontnames)) + [None])[0] if f == None and sys.platform == 'darwin': # pygame on OSX doesn't seem even to try to find fonts... f = (filter(os.path.isfile, map(lambda x:os.path.realpath('/Library/Fonts/%s.ttf'%x),fontnames)) + [None])[0] return f ############################################################# def setup(self, left=None,top=None,width=None,height=None,changemode=None,framerate=None,bitdepth=None, **kwargs): """ Call this to set certain commonly-defined parameters for the screen during BciApplication.Preflight(). The renderer object will read these parameters in order to initialize the stimulus window, before BciApplication.Initialize() is called. """### BciGenericRenderer.setup(self, left=left,top=top,width=width,height=height,changemode=changemode,framerate=framerate,bitdepth=bitdepth,**kwargs) pos = os.environ.get('SDL_VIDEO_WINDOW_POS','').split(',') if len(pos)==2: prevleft,prevtop = int(pos[0]),int(pos[1]) else: prevleft,prevtop = 160,120 if left != None and top == None: top = prevtop if top != None and left == None: left = prevleft if left != None and top != None: if sys.platform != 'darwin': # yup, yet another thing that's broken in pygame under OSX os.environ['SDL_VIDEO_WINDOW_POS'] = '%d,%d' % (int(left), int(top)) if width != None: VisionEgg.config.VISIONEGG_SCREEN_W = int(width) if height != None: VisionEgg.config.VISIONEGG_SCREEN_H = int(height) if changemode != None: VisionEgg.config.VISIONEGG_FULLSCREEN = int(changemode) if framerate != None: VisionEgg.config.VISIONEGG_MONITOR_REFRESH_HZ = float(framerate) if bitdepth != None: VisionEgg.config.VISIONEGG_PREFERRED_BPP = int(bitdepth) for k,v in kwargs.items(): kk = (k, k.upper(), 'VISIONEGG_'+k.upper()) for k in kk: if hasattr(VisionEgg.config, k): setattr(VisionEgg.config, k, v) #print "VisionEgg.config.%s = %s" % (k, repr(v)) break else: raise AttributeError, "VisionEgg.config has no attribute '%s'" % kk[0] ############################################################# def GetDefaultFont(self): d = VisionEgg.Text.Text.constant_parameters_and_defaults return d['font_name'][0], d['font_size'][0] ############################################################# def SetDefaultFont(self, name=None, size=None): """ Set the name and/or size of the font that will be used by default for Text stimuli. Returns True if the named font can be found, False if not. """### dd = [ VisionEgg.Text.Text.constant_parameters_and_defaults, VisionEgg.WrappedText.WrappedText.constant_parameters_and_defaults, ] if name != None: if os.path.isabs(name) and os.path.isfile(name): font = name else: font = self.findfont(name) if font == None: return False for d in dd: d['font_name'] = (font,) + d['font_name'][1:] if size != None: for d in dd: d['font_size'] = (size,) + d['font_size'][1:] return True ############################################################# def Initialize(self, bci): self.__dict__['_bci'] = bci # this is a mutual reference loop, but what the hell: self and bci only die when the process dies logging.raiseExceptions = 0 # suppresses the "No handlers could be found" chatter pygame.quit(); pygame.init() self._screen = VisionEgg.Core.get_default_screen() self._viewport = VisionEgg.Core.Viewport(screen=self._screen) self.use_frame_timer(self._frame_timer != None, renew=True) ############################################################# def GetFrameRate(self): if sys.platform == 'darwin': import platform if platform.architecture()[0].startswith('64'): print "query_refresh_rate is broken under darwin on 64bit architectures" return float(VisionEgg.config.VISIONEGG_MONITOR_REFRESH_HZ) try: return float(self._screen.query_refresh_rate()) except: print "VisionEgg failed to query refresh rate" return float(VisionEgg.config.VISIONEGG_MONITOR_REFRESH_HZ) ############################################################# def RaiseWindow(self): try: import ctypes # !! Windows-specific code. stimwin = ctypes.windll.user32.FindWindowA(0, "Vision Egg") self._bci._raise_window(stimwin) except: pass ############################################################# def GetEvents(self): return pygame.event.get() ############################################################# def DefaultEventHandler(self, event): return (event.type == pygame.locals.QUIT) or (event.type == pygame.locals.KEYDOWN and event.key == pygame.locals.K_ESCAPE) ############################################################# def StartFrame(self, objlist): if self._bci: self._bci.ftdb(label='screen.clear') #-------------------- self._screen.clear() if self._bci: self._bci.ftdb(label='viewport.draw') #-------------------- self._viewport.parameters.stimuli = objlist self._viewport.draw() ############################################################# def FinishFrame(self): if self._bci: self._bci.ftdb(label='swap_buffers') #-------------------- VisionEgg.Core.swap_buffers() if self._bci: self._bci.ftdb(label='glFlush') #-------------------- VisionEgg.GL.glFlush() if self._frame_timer: self._frame_timer.tick() ############################################################# def Cleanup(self): if self._frame_timer: self._frame_timer.log_histogram() self._frame_timer = True self._viewport = None self._screen.close() self._screen = None VisionEgg.Text._font_objects = {} # VisionEgg 1.1 allowed these cached pygame.font.Font objects to persist even # after pygame quits or is reloaded: this causes a crash the second time around. # VisionEgg 1.0 didn't cache, so we never ran across the problem under Python 2.4. # Andrew fixed it in VE 1.1.1. pygame.quit() ############################################################# def __getattr__(self, key): if key == 'color': key = 'bgcolor' v = self.__dict__.get('_screen') if v == None: raise AttributeError, "a Screen object has not yet been instantiated inside this object" gotit,value = self.__delegate_getattr__(v, key) if not gotit: raise AttributeError, "'%s' object has no attribute or parameter '%s'" % (self.__class__.__name__, key) return value ############################################################# def __setattr__(self, key, value): if key in self.__dict__: self.__dict__[key] = value else: if key == 'color': key = 'bgcolor' v = self.__dict__.get('_screen') if v == None: raise AttributeError, "a Screen object has not yet been instantiated inside this object" if not self.__delegate_setattr__(v, key, value, restrict_to=['parameters.bgcolor']): raise AttributeError, "'%s' object has no attribute or parameter '%s'"%(self.__class__.__name__, key) ############################################################# def _getAttributeNames(self): v = self.__dict__.get('_screen') if v == None: return () return ('color', 'bgcolor', 'size', 'parameters') ############################################################# __delegate_setattr__ = delegate_setattr __delegate_getattr__ = delegate_getattr ################################################################# ################################################################# def GetVEParameterNames(self): p = getattr(self.__dict__.get('obj'), 'parameters', None) if p == None: return () return p.__dict__.keys() BciStimulus._getAttributeNames = GetVEParameterNames BciStimulus.__delegate_setattr__ = delegate_setattr BciStimulus.__delegate_getattr__ = delegate_getattr import VisionEgg.Textures, VisionEgg.GL class ImageStimulus(VisionEgg.Textures.TextureStimulus): """ A subclass of VisionEgg.Textures.TextureStimulus """### def __init__(self, **kwargs): if 'texture' in kwargs and not isinstance(kwargs['texture'], VisionEgg.Textures.Texture): kwargs['texture'] = VisionEgg.Textures.Texture(kwargs['texture']) kwargs['mipmaps_enabled'] = kwargs.get('mipmaps_enabled', 0) kwargs['internal_format'] = kwargs.get('internal_format', VisionEgg.GL.GL_RGBA) kwargs['texture_min_filter'] = kwargs.get('texture_min_filter', VisionEgg.GL.GL_LINEAR) VisionEgg.Textures.TextureStimulus.__init__(self, **kwargs) from VisionEgg.Text import Text from VisionEgg.MoreStimuli import Target2D as Block from VisionEgg.MoreStimuli import FilledCircle as Disc BciGenericRenderer.subclass = VisionEggRenderer ################################################################# #################################################################
4ccf4f0a65743ccb7d85345fa763d577d726f775
2b2de1801e45582e0ba67fbd451df84c298dd0fe
/Basic Python/14. dictionary.py
fd65d0f6e670ddd8043de709d3277a5b43fba9be
[]
no_license
Geca981020/Undergraduate-Study
99c92e439058fed88658bf6b0535c0009a0bc762
78d82a453e05a3695fc42d34c95eae2fc16123ce
refs/heads/main
2023-05-25T07:17:26.025753
2021-06-05T07:36:12
2021-06-05T07:36:12
337,929,295
0
0
null
null
null
null
UTF-8
Python
false
false
275
py
# dictionary Declaration&insert people = {'name': 'hong gil dong'} people['phone'] = '010-1234-5678' people['birthday'] = 1122 # dictionary Searching print(people['phone']) # dictionary remove del people['phone'] print(people) people.clear() print(people)
0aff2ea3edae0934d10f6d423ca02ad299969547
596b852bb8428a6db4dc1153f68f6d3e0da6efec
/findNextLargestString.py
88ebc60cfeca04d01f820e661809dfe014416ce5
[]
no_license
bsofcs/interviewPrep
3da14bf509bb169b0adb185bd3e1f08c371f6d72
ace8ac2d3b47652efeb7052f93557e753796a677
refs/heads/master
2022-11-16T15:33:44.780238
2020-07-03T19:35:43
2020-07-03T19:35:43
272,368,723
0
1
null
null
null
null
UTF-8
Python
false
false
379
py
def findNextLargestString(arr,val): if arr is None or val is None: return None if val>=arr[-1] or val<arr[0]: return arr[0] low,high=0,len(arr)-1 while low<high: mid=low+(high-low)//2 if arr[mid]<=val and arr[mid+1]>val: return(arr[mid+1]) if arr[mid]>val: high=mid-1 else: low=mid+1 arr=['b','c','g','h'] val='g' print(findNextLargestString(arr,val))
fab9a78af75beca94afbca19f6f69d7997087601
02ee91a60e80629fcce24d511ada9204d1f360a1
/constrained_network.py
97a4afba2ba9abd3f19e15e1c06595f3db34f6a7
[]
no_license
tjwldnjss13/ANN-PNU
7b73b93971cac5e671dbeaf1172d8cd25b6ab442
380596eb39a1834dce5c6b1a0a1942ff2246b2a4
refs/heads/master
2022-11-11T20:06:55.547303
2020-06-22T02:23:03
2020-06-22T02:23:03
265,425,814
0
0
null
null
null
null
UTF-8
Python
false
false
14,059
py
import numpy as np from PIL import Image import matplotlib.pyplot as plt import os from filters import * class ConstrainedNet: epoch_list = [] train_loss_list = [] train_acc_list = [] valid_loss_list = [] valid_acc_list = [] test_loss = 0 test_acc = 0 lr_list = [] def __init__(self, lr=.001, lr_decay=None, lr_decay_term=None, epochs=10): self.lr = lr self.lr_decay = lr_decay self.lr_decay_term = lr_decay_term self.lr_b = lr * 2 self.epochs = epochs self.W_ItoH1 = 2 * np.random.random((3, 3)) - 1 self.W_H1toH2 = 2 * np.random.random((2, 5, 5)) - 1 self.W_H2toO = 2 * np.random.random((16, 10)) - 1 def exec_all(self, fp, filter=None): images, labels = self.dataset(fp, filter) train_images, train_labels = images[:300], labels[:300] valid_images, valid_labels = images[300:400], labels[300:400] test_images, test_labels = images[400:], labels[400:] self.train([train_images, train_labels], [valid_images, valid_labels]) self.test([test_images, test_labels]) self.visualize() def dataset(self, fp, filter): images = [] labels = [] for n in range(50): for label in range(10): fn = str(label) + '.' + str(n) + '.png' image_path = os.path.join(fp, fn) image = Image.open(image_path).convert('L') image = self.preprocessed_image(image, filter) images.append(image) labels.append(label) images = np.array(images) labels = np.array(labels) labels_one_hot = [] for i in range(len(labels)): arr = np.zeros(10) arr[labels[i]] = 1 labels_one_hot.append(arr) labels = np.array(labels_one_hot) return images, labels @staticmethod def preprocessed_image(image_file, filter): image = np.array(image_file) if filter is None: image_padded = np.zeros((17, 17)) image = image.astype('float32') / 255 image_padded[:16, :16] = image return image_padded else: f_size = len(filter) # if_size = 16 - f_size + 1 image_filtered = np.zeros((17, 17)) image_padded = np.zeros((16 + f_size, 16 + f_size)) for i in range(16 + f_size): for j in range(16 + f_size): image_padded[i, j] = 255 image_padded[int(f_size / 2):int(f_size / 2) + 16, int(f_size / 2): int(f_size / 2) + 16] = image for i in range(17): for j in range(17): image_filtered[i, j] = np.sum(image_padded[i:i + f_size, j:j + f_size] * filter) image_filtered = image_filtered.astype('float32') / 255 # image_padded[1:1 + if_size, 1:1 + if_size] = image_filtered return image_filtered # @staticmethod # def preprocessed_image(image_file, filter): # image = np.array(image_file) # image_padded = np.zeros((17, 17)) # if filter is None: # image = image.astype('float32') / 255 # image_padded[:17, :17] = image # else: # f_size = len(filter) # if_size = 16 - f_size + 1 # image_filtered = np.zeros((if_size, if_size)) # for i in range(if_size): # for j in range(if_size): # image_filtered[i, j] = np.sum(image[i:i + f_size, j:j + f_size] * filter) # image_filtered = image_filtered.astype('float32') / 255 # image_padded[1:1 + if_size, 1:1 + if_size] = image_filtered # # return image_padded def feedforward(self, image): pre_H1 = np.zeros((2, 8, 8)) for k in range(2): for i in range(8): for j in range(8): pre_H1[k, i, j] = np.sum(image[2 * i:2 * i + 3, 2 * j:2 * j + 3] * self.W_ItoH1) post_H1 = ConstrainedNet.relu(pre_H1) pre_H2 = np.zeros((4, 4)) for k in range(2): for i in range(4): for j in range(4): pre_H2[i, j] += np.sum(post_H1[k, i:i + 5, j:j + 5] * self.W_H1toH2[k]) post_H2 = ConstrainedNet.relu(pre_H2) post_H2_flattened = np.reshape(post_H2, 16) pre_O = np.matmul(post_H2_flattened, self.W_H2toO) post_O = ConstrainedNet.softmax(pre_O) return post_O def train(self, train_dataset, valid_dataset): train_images, train_labels = train_dataset[0], train_dataset[1] valid_images, valid_labels = valid_dataset[0], valid_dataset[1] for epoch in range(self.epochs): if self.lr_decay is not None and self.lr_decay_term is not None: if epoch != 0 and epoch % self.lr_decay_term == 0: self.lr *= self.lr_decay self.epoch_list.append(epoch) self.lr_list.append(self.lr) print('[{}/{} epoch]'.format(epoch + 1, self.epochs), end=' ') train_acc = 0 train_loss = 0 # Feedforward for train_idx in range(len(train_images)): image = train_images[train_idx] label = train_labels[train_idx] pre_H1 = np.zeros((2, 8, 8)) for k in range(2): for i in range(8): for j in range(8): pre_H1[k, i, j] = np.sum(image[2 * i:2 * i + 3, 2 * j:2 * j + 3] * self.W_ItoH1) post_H1 = ConstrainedNet.relu(pre_H1) pre_H2 = np.zeros((4, 4)) for k in range(2): for i in range(4): for j in range(4): pre_H2[i, j] += np.sum(post_H1[k, i:i + 5, j:j + 5] * self.W_H1toH2[k]) post_H2 = ConstrainedNet.relu(pre_H2) post_H2_flattened = np.reshape(post_H2, 16) pre_O = np.matmul(post_H2_flattened, self.W_H2toO) post_O = ConstrainedNet.softmax(pre_O) softmax_ce = ConstrainedNet.softmax_cross_entropy(label, post_O) if np.argmax(post_O) == np.argmax(label): train_acc += 1 train_loss += softmax_ce # Backpropagate # O D_post_O = post_O - label softmax_derv_m = ConstrainedNet.softmax_derv(pre_O) D_pre_O = np.zeros(10) for i in range(10): for j in range(10): D_pre_O[i] += D_post_O[j] * softmax_derv_m[i, j] # Weight (H2 -- O) W_H2toO_old = self.W_H2toO for i in range(16): for j in range(10): self.W_H2toO[i, j] -= self.lr * D_pre_O[j] * post_H2_flattened[i] # H2 D_post_H2_flattened = np.zeros(16) for i in range(16): for j in range(10): D_post_H2_flattened[i] += W_H2toO_old[i, j] * D_pre_O[j] D_post_H2 = np.reshape(D_post_H2_flattened, (4, 4)) D_pre_H2 = np.zeros((4, 4)) for i in range(4): for j in range(4): D_pre_H2[i, j] = D_post_H2[i, j] * ConstrainedNet.relu_derv(pre_H2[i, j]) # Weight (H1 -- H2) W_H1toH2_old = self.W_H1toH2 for k in range(2): for i in range(5): for j in range(5): self.W_H1toH2[k, i, j] -= self.lr * np.sum(post_H1[k, i:i + 4, j:j + 4] * D_pre_H2) # H1 W_H1toH2_old_inv = [] for k in range(2): W_H1toH2_old_inv.append(np.flip(W_H1toH2_old[k])) W_H1toH2_old_inv = np.array(W_H1toH2_old_inv) D_pre_H2_padded = np.zeros((12, 12)) D_pre_H2_padded[4:8, 4:8] = D_pre_H2 # for i in range(4, 8): # for j in range(4, 8): # D_pre_H2_padded[i,j] = D_pre_H2[i-4, j-4] D_post_H1 = np.zeros((2, 8, 8)) D_pre_H1 = np.zeros((2, 8, 8)) for k in range(2): for i in range(8): for j in range(8): D_post_H1[k, i, j] = np.sum(D_pre_H2_padded[i:i + 5, j:j + 5] * W_H1toH2_old_inv[k]) D_pre_H1[k, i, j] = D_post_H1[k, i, j] * ConstrainedNet.relu_derv(pre_H1[k, i, j]) # Weight (I -- H1) for k in range(2): for i in range(3): for j in range(3): self.W_ItoH1[i, j] -= self.lr * np.sum(image[i:i + 15:2, j:j + 15:2] * D_pre_H1[k]) train_acc /= len(train_images) train_loss /= len(train_images) print('(Train) Accuracy : {:.4f}, Loss : {:.5f}'.format(train_acc, train_loss), end=' ') self.train_loss_list.append(train_loss) self.train_acc_list.append(train_acc) # Validation valid_acc = 0 valid_loss = 0 for valid_idx in range(len(valid_images)): image = valid_images[valid_idx] label = valid_labels[valid_idx] valid_O = self.feedforward(image) if np.argmax(valid_O) == np.argmax(label): valid_acc += 1 valid_loss += ConstrainedNet.softmax_cross_entropy(label, valid_O) valid_acc /= len(valid_images) valid_loss /= len(valid_images) print('(Valid) Accuracy : {:.4f}, Loss : {:.5f}'.format(valid_acc, valid_loss)) self.valid_loss_list.append(valid_loss) self.valid_acc_list.append(valid_acc) def test(self, test_dataset): test_images, test_labels = test_dataset[0], test_dataset[1] test_acc = 0 test_loss = 0 for test_idx in range(len(test_images)): image, label = test_images[test_idx], test_labels[test_idx] test_O = self.feedforward(image) if np.argmax(test_O) == np.argmax(label): test_acc += 1 test_loss += ConstrainedNet.softmax_cross_entropy(label, test_O) test_acc /= len(test_images) test_loss /= len(test_images) self.test_acc = test_acc self.test_loss = test_loss print(' (Test) Accuracy : {:.4f}, Loss : {:.5f}'.format(test_acc, test_loss)) def visualize(self): epochs = np.array(self.epoch_list) train_losses = np.array(self.train_loss_list) valid_losses = np.array(self.valid_loss_list) train_accs = np.array(self.train_acc_list) valid_accs = np.array(self.valid_acc_list) lrs = np.array(self.lr_list) plt.figure(0) plt.plot(epochs, train_losses, 'r-', label='Train loss') plt.plot(epochs, valid_losses, 'b:', label='Valid loss') plt.title('Train/Validation Loss') plt.legend() plt.figure(1) plt.plot(epochs, train_accs, 'r-', label='Train acc') plt.plot(epochs, valid_accs, 'b:', label='Valid acc') plt.title('Train/Validation Acc') plt.legend() if self.lr_decay is not None: plt.figure(2) plt.plot(epochs, lrs, 'g-', label='Learning curve') plt.title('Learning Curve') plt.show() @staticmethod def bit_l2_cost_function(target, output): return .5 * np.square(target - output) @staticmethod def l2_cost_function(target, output): return np.mean(ConstrainedNet.bit_l2_cost_function(target, output)) @staticmethod def bit_cost_function(target, output): return -target * np.log(output) - (1 - target) * np.log(1 - output) @staticmethod def cost_function(target, output): return np.mean(ConstrainedNet.bit_cost_function(target, output)) @staticmethod def softmax_cross_entropy(target, softmax_class): losses = 0 for i in range(len(target)): if softmax_class[i] == 0: losses += target[i] * np.log(.00001) else: losses += target[i] * np.log(softmax_class[i]) return -losses @staticmethod def sigmoid(x): return 1 / (1 + np.exp(-x)) @staticmethod def sigmoid_derv(x): return ConstrainedNet.sigmoid(x) * (1 - ConstrainedNet.sigmoid(x)) @staticmethod def softmax(x): sum = np.sum(np.exp(x)) return np.exp(x) / sum @staticmethod def softmax_derv(x): softmax_x = ConstrainedNet.softmax(x) jacobian_m = np.diag(softmax_x) for i in range(len(jacobian_m)): for j in range(len(jacobian_m)): if i == j: jacobian_m[i, j] = softmax_x[i] * (1 - softmax_x[i]) else: jacobian_m[i, j] = -softmax_x[i] * softmax_x[j] return jacobian_m @staticmethod def leaky_relu(x): return np.maximum(.01 * x, x) @staticmethod def leaky_relu_derv(x): if x > 0: return 1 else: return .01 @staticmethod def relu(x): return np.maximum(0, x) @staticmethod def relu_derv(x): if x is 0: return 1 else: return 0 @staticmethod def softmax(x): exps = np.exp(x - np.max(x)) return exps / np.sum(exps) if __name__ == '__main__': # SOBEL_X : .0047985 (.55) # SOBEL_Y : .00425 (.50) # PREWITT_X : .004855 (.55) # PREWITT_Y : .00425 (.51) # LAPLACIAN : .00708 (.45) # LOG : .007 (.33) cn1 = ConstrainedNet(lr=.006, epochs=500) cn1.exec_all('./digit data', LOG)
4a5cebd758b9f2a3c1588a39c9bb4cbcd4a2080f
0b7008ebe62448d929f5159f1fde7af11ef278ee
/2016/06-1.py
70459eef1c2870cf9e35a6458cbd24554d8c03db
[]
no_license
TheMiles/aoc
6db7cd1fbafbde0de0e532cc60f1da45020367d2
51d4940c53bcef266a93de5593db071e08285031
refs/heads/master
2020-06-12T18:28:02.180745
2019-12-19T18:18:15
2019-12-19T18:18:15
75,777,935
0
0
null
null
null
null
UTF-8
Python
false
false
710
py
#!/usr/bin/env python3 import argparse import hashlib parser = argparse.ArgumentParser() parser.add_argument('file', type=argparse.FileType('r'), help='the input file') args = parser.parse_args() number_of_lines = 0 histogram = [] for l in [ x.strip() for x in args.file]: number_of_lines += 1 if len(histogram) < len(l): histogram.extend([{} for _ in range(len(l)-len(histogram))]) for i, c in enumerate(l): d = histogram[i] d[c] = d.get(c,0) + 1 cleartext = '' for d in histogram: min_number = number_of_lines; min_char = '-' for key, value in d.items(): if value < min_number: min_number = value min_char = key cleartext += min_char print(cleartext)
f138b1a144eedf0c0d0871e5d677b9735bb782e0
7665b25d1f5ec432b976537a50c5bc73858be6c6
/stack.py
23544462324610e87e7a313a23b23dff4cb76a98
[]
no_license
arindam7development/Python-Factory
14b2ea3a073eb40dda23a36f52dd7a07df4f87a0
7cd517e3f6f0ed178ea6c959bf4b73a8600ea6dc
refs/heads/master
2021-01-22T01:33:49.212537
2015-04-21T05:57:20
2015-04-21T05:57:20
33,924,534
0
0
null
null
null
null
UTF-8
Python
false
false
427
py
# The list methods make it very easy to use a list as a stack, where the last element added is the first element retrieved (“last-in, first-out”). #To add an item to the top of the stack, use append(). #To retrieve an item from the top of the stack, use pop() without an explicit index. stack = [3, 4, 5] stack.append(6) stack.append(7) stack.append(10) print (stack) stack.pop(1) print (stack) stack.pop(2) print (stack)
ff5b5983c22a847685d889c11aa61868c0103063
e5ed648d069cca47531c178ca4f7fc6447f09dfa
/micropython/nucleo-f767zi/tcp_htget_u.py
161e48c389f7358ec3bd554472c560717baef7c4
[ "MIT" ]
permissive
bokunimowakaru/iot
fef89df949121be46494dcfc604085edb22ca756
07c42bdcd273812e54465638f74acc641bf346b9
refs/heads/master
2023-07-19T10:18:33.286104
2023-07-10T13:39:39
2023-07-10T13:39:39
163,511,968
7
0
null
null
null
null
UTF-8
Python
false
false
2,802
py
# coding: utf-8 # IoT連携の基本 HTTP GETμ for MicroPython (よりメモリの節約が可能なusocket使用) # Copyright (c) 2019 Wataru KUNINO import network # ネットワーク通信ライブラリ import usocket # ソケット通信ライブラリ import ujson # JSON変換ライブラリを組み込む from sys import exit # ライブラリsysからexitを組み込む host_s = 'bokunimo.net' # アクセス先のホスト名 path_s = '/iot/cq/test.json' # アクセスするファイルパス pyb.LED(1).on() # LED(緑色)を点灯 eth = network.Ethernet() # Ethernetのインスタンスethを生成 try: # 例外処理の監視を開始 eth.active(True) # Ethernetを起動 eth.ifconfig('dhcp') # DHCPクライアントを設定 except Exception as e: # 例外処理発生時 print(e) # エラー内容を表示 exit() try: # 例外処理の監視を開始 addr = usocket.getaddrinfo(host_s, 80)[0][-1] sock = usocket.socket() sock.connect(addr) req = 'GET ' + path_s + ' HTTP/1.0\r\n' req += 'Host: ' + host_s + '\r\n\r\n' sock.send(bytes(req,'UTF-8')) except Exception as e: # 例外処理発生時 print(e) # エラー内容を表示 sock.close() exit() body = '<head>' while True: res = str(sock.readline(), 'UTF-8') print(res.strip()) if len(res) <= 0: break if res == '\n' or res == '\r\n': body = '<body>' break if body != '<body>': print('no body data') sock.close() exit() body = '' while True: res = str(sock.readline(), 'UTF-8').strip() if len(res) <= 0: break body += res print(body) res_dict = ujson.loads(body) # 受信データを変数res_dictへ代入 print('--------------------------------------') # ----------------------------- print('title :', res_dict.get('title')) # 項目'title'の内容を取得・表示 print('descr :', res_dict.get('descr')) # 項目'descr'の内容を取得・表示 print('state :', res_dict.get('state')) # 項目'state'の内容を取得・表示 print('url :', res_dict.get('url')) # 項目'url'内容を取得・表示 print('date :', res_dict.get('date')) # 項目'date'内容を取得・表示 sock.close() # ソケットの終了 pyb.LED(1).off() # LED(緑色)を消灯
de4f669765dab0e5633ca48adaee9cd29c083726
f281f7ca2843fa51f87e87e7a9cb721ef0a938e7
/src/svs/inacademia_server.py
5c51e7315be97932f95cca5102cec4fdacd9dab4
[ "Apache-2.0", "LicenseRef-scancode-unknown-license-reference" ]
permissive
SvHu/svs
c45407decdc215fd5f5782f65184d26d8f031bf1
6a62fc11373ced691ce4f6bbe75ef7790dee0ef2
refs/heads/master
2020-12-25T20:42:51.697729
2016-07-10T08:37:48
2016-07-10T08:37:48
63,401,231
0
0
null
2016-07-15T07:29:33
2016-07-15T07:29:32
Python
UTF-8
Python
false
false
14,754
py
import json import logging.config import os import urllib import cherrypy from oic.utils.keyio import KeyBundle from oic.utils.webfinger import WebFinger, OIC_ISSUER from saml2.response import DecryptionFailed from saml2 import BINDING_HTTP_POST, BINDING_HTTP_REDIRECT from svs.cherrypy_util import PathDispatcher, response_to_cherrypy from svs.client_db import ClientDB from svs.message_utils import abort_with_client_error, abort_with_enduser_error, \ negative_transaction_response from svs.oidc import InAcademiaOpenIDConnectFrontend from svs.saml import InAcademiaSAMLBackend from svs.user_interaction import ConsentPage, EndUserErrorResponse from svs.i18n_tool import ugettext as _ from svs.log_utils import log_transaction_start, log_internal from svs.utils import deconstruct_state, construct_state logger = logging.getLogger(__name__) def setup_logging(config_dict=None, env_key="LOG_CFG", config_file="conf/logging_conf.json", level=logging.INFO): """Setup logging configuration. The configuration is fetched in order from: 1. Supplied configuration dictionary 2. Configuration file specified in environment variable 'LOG_CFG' 3. Configuration file specified as parameter 4. Basic config, configured with log level 'INFO' """ if config_dict is not None: logging.config.dictConfig(config_dict) else: env_conf = os.getenv(env_key, None) if env_conf: config_file = env_conf if os.path.exists(config_file): with open(config_file, 'r') as f: config = json.load(f) logging.config.dictConfig(config) else: logging.basicConfig(level=level) def main(): import argparse import pkg_resources parser = argparse.ArgumentParser() parser.add_argument("--mdx", dest="mdx", required=True, type=str, help="base url to the MDX server") parser.add_argument("--cdb", dest="cdb", required=True, type=str, help="path to the client metadata file") parser.add_argument("--disco", dest="disco_url", type=str, help="base url to the discovery server") parser.add_argument("-b", dest="base", required=True, type=str, help="base url for the service") parser.add_argument("-H", dest="host", default="0.0.0.0", type=str, help="host for the service") parser.add_argument("-p", dest="port", default=8087, type=int, help="port for the service to listen on") args = parser.parse_args() # Force base url to end with '/' base_url = args.base setup_logging() # add directory to PATH environment variable to find xmlsec os.environ["PATH"] += os.pathsep + '/usr/local/bin' # ============== SAML =============== SP = InAcademiaSAMLBackend(base_url, args.mdx, args.disco_url) # ============== OIDC =============== client_db = ClientDB(args.cdb) client_db.update() OP = InAcademiaOpenIDConnectFrontend(base_url, client_db) # ============== Web server =============== inacademia = InAcademiaMediator(base_url, OP, SP) cherrypy.config.update({ # "request.error_response": _send_418, "tools.I18nTool.on": True, "tools.I18nTool.default": "en", "tools.I18nTool.mo_dir": pkg_resources.resource_filename("svs", "data/i18n/locales"), "tools.I18nTool.domain": "messages", }) cherrypy.config.update({'engine.autoreload.on': False}) cherrypy.server.socket_host = args.host cherrypy.server.socket_port = args.port cherrypy.tree.mount(inacademia, "/", config={ "/static": { "tools.staticdir.on": True, "tools.staticdir.dir": os.path.join(os.getcwd(), "static"), }, "/robots.txt": { "tools.staticfile.on": True, "tools.staticfile.filename": pkg_resources.resource_filename("svs", "site/static/robots.txt"), }, "/webroot": { "tools.staticdir.on": True, "tools.staticdir.dir": pkg_resources.resource_filename("svs", "site/static/") } }) cherrypy.tree.mount(None, "/.well-known", config={ "/": { "request.dispatch": PathDispatcher({ "/webfinger": inacademia.webfinger, "/openid-configuration": inacademia.openid_configuration, }) } }) cherrypy.tree.mount(None, "/acs", config={ "/": { "request.dispatch": PathDispatcher({ "/post": inacademia.acs_post, "/redirect": inacademia.acs_redirect, }) } }) cherrypy.tree.mount(None, "/consent", config={ "/": { "request.dispatch": PathDispatcher({ "/": inacademia.consent_index, "/allow": inacademia.consent_allow, "/deny": inacademia.consent_deny }) } }) cherrypy.engine.signal_handler.set_handler("SIGTERM", cherrypy.engine.signal_handler.bus.exit) cherrypy.engine.signal_handler.set_handler("SIGUSR1", client_db.update) cherrypy.engine.start() cherrypy.engine.block() class InAcademiaMediator(object): """The main CherryPy application, with all exposed endpoints. This app mediates between a OpenIDConnect provider front-end, which uses SAML as the back-end for authenticating users. """ def __init__(self, base_url, op, sp): self.base_url = base_url self.op = op self.sp = sp # Setup key for encrypting/decrypting the state (passed in the SAML RelayState). source = "file://symkey.json" self.key_bundle = KeyBundle(source=source, fileformat="jwk") for key in self.key_bundle.keys(): key.deserialize() @cherrypy.expose def index(self): raise cherrypy.HTTPRedirect("http://www.inacademia.org") @cherrypy.expose def status(self): return @cherrypy.expose def authorization(self, *args, **kwargs): """Where the OP Authentication Request arrives. """ transaction_session = self.op.verify_authn_request(cherrypy.request.query_string) state = self._encode_state(transaction_session) log_transaction_start(logger, cherrypy.request, state, transaction_session["client_id"], transaction_session["scope"], transaction_session["redirect_uri"]) return self.sp.redirect_to_auth(state, transaction_session["scope"]) @cherrypy.expose def disco(self, state=None, entityID=None, **kwargs): """Where the SAML Discovery Service response arrives. """ if state is None: raise cherrypy.HTTPError(404, _('Page not found.')) transaction_session = self._decode_state(state) if "error" in kwargs: abort_with_client_error(state, transaction_session, cherrypy.request, logger, "Discovery service error: '{}'.".format(kwargs["error"])) elif entityID is None or entityID == "": abort_with_client_error(state, transaction_session, cherrypy.request, logger, "No entity id returned from discovery server.") return self.sp.disco(entityID, state, transaction_session) @cherrypy.expose def error(self, lang=None, error=None): """Where the i18n of the error page is handled. """ if error is None: raise cherrypy.HTTPError(404, _("Page not found.")) self._set_language(lang) error = json.loads(urllib.unquote_plus(error)) raise EndUserErrorResponse(**error) def webfinger(self, rel=None, resource=None): """Where the WebFinger request arrives. This function is mapped explicitly using PathDiscpatcher. """ try: assert rel == OIC_ISSUER assert resource is not None except AssertionError as e: raise cherrypy.HTTPError(400, "Missing or incorrect parameter in webfinger request.") cherrypy.response.headers["Content-Type"] = "application/jrd+json" return WebFinger().response(resource, self.op.OP.baseurl) def openid_configuration(self): """Where the OP configuration request arrives. This function is mapped explicitly using PathDispatcher. """ cherrypy.response.headers["Content-Type"] = "application/json" cherrypy.response.headers["Cache-Control"] = "no-store" return self.op.OP.capabilities.to_json() def consent_allow(self, state=None, released_claims=None): """Where the approved consent arrives. This function is mapped explicitly using PathDispatcher. """ if state is None or released_claims is None: raise cherrypy.HTTPError(404, _("Page not found.")) state = json.loads(urllib.unquote_plus(state)) released_claims = json.loads(urllib.unquote_plus(released_claims)) transaction_session = self._decode_state(state["state"]) log_internal(logger, "consented claims: {}".format(json.dumps(released_claims)), cherrypy.request, state["state"], transaction_session["client_id"]) return self.op.id_token(released_claims, state["idp_entity_id"], state["state"], transaction_session) def consent_deny(self, state=None, released_claims=None): """Where the denied consent arrives. This function is mapped explicitly using PathDispatcher. """ if state is None: raise cherrypy.HTTPError(404, _("Page not found.")) state = json.loads(urllib.unquote_plus(state)) transaction_session = self._decode_state(state["state"]) negative_transaction_response(state["state"], transaction_session, cherrypy.request, logger, "User did not give consent.", state["idp_entity_id"]) def consent_index(self, lang=None, state=None, released_claims=None): """Where the i18n of the consent page arrives. This function is mapped explicitly using PathDispatcher. """ if state is None or released_claims is None: raise cherrypy.HTTPError(404, _("Page not found.")) self._set_language(lang) state = json.loads(urllib.unquote_plus(state)) rp_client_id = self._decode_state(state["state"])["client_id"] released_claims = json.loads(urllib.unquote_plus(released_claims)) client_name = self._get_client_name(rp_client_id) return ConsentPage.render(client_name, state["idp_entity_id"], released_claims, state["state"]) def acs_post(self, SAMLResponse=None, RelayState=None, **kwargs): """Where the SAML Authentication Response arrives. This function is mapped explicitly using PathDiscpatcher. """ return self._acs(SAMLResponse, RelayState, BINDING_HTTP_POST) def acs_redirect(self, SAMLResponse=None, RelayState=None): """Where the SAML Authentication Response arrives. """ return self._acs(SAMLResponse, RelayState, BINDING_HTTP_REDIRECT) def _acs(self, SAMLResponse, RelayState, binding): """Handle the SAMLResponse from the IdP and produce the consent page. :return: HTML of the OP consent page. """ transaction_session = self._decode_state(RelayState) user_id, affiliation, identity, auth_time, idp_entity_id = self.sp.acs(SAMLResponse, binding, RelayState, transaction_session) # if we have passed all checks, ask the user for consent before finalizing released_claims = self.op.get_claims_to_release(user_id, affiliation, identity, auth_time, idp_entity_id, self.sp.metadata, transaction_session) log_internal(logger, "claims to consent: {}".format(json.dumps(released_claims)), cherrypy.request, RelayState, transaction_session["client_id"]) client_name = self._get_client_name(transaction_session["client_id"]) return ConsentPage.render(client_name, idp_entity_id, released_claims, RelayState) def _set_language(self, lang): """Set the language. """ if lang is None: lang = "en" # Modify the Accept-Language header and use the CherryPy i18n tool for translation cherrypy.request.headers["Accept-Language"] = lang i18n_args = { "default": cherrypy.config["tools.I18nTool.default"], "mo_dir": cherrypy.config["tools.I18nTool.mo_dir"], "domain": cherrypy.config["tools.I18nTool.domain"] } cherrypy.tools.I18nTool.callable(**i18n_args) def _decode_state(self, state): """Decode the transaction data. If the state can not be decoded, the transaction will fail with error page for the user. We can't notify the client since the transaction state now is unknown. """ try: return deconstruct_state(state, self.key_bundle.keys()) except DecryptionFailed as e: abort_with_enduser_error(state, "-", cherrypy.request, logger, _( "We could not complete your validation because an error occurred while handling " "your request. Please return to the service which initiated the validation " "request and try again."), "Transaction state missing or broken in incoming response.") def _encode_state(self, payload): """Encode the transaction data. """ _kids = self.key_bundle.kids() _kids.sort() return construct_state(payload, self.key_bundle.get_key_with_kid(_kids[-1])) def _get_client_name(self, client_id): """Get the display name for the client. :return: the clients display name, or client_id if no display name is known. """ try: client_info = self.op.OP.cdb[client_id] return client_info.get("display_name", client_id) except KeyError as e: return client_id if __name__ == '__main__': main()
ad082d6a706f4aaef8bf00a35b031d026601dbd4
e8487b1670fc06852af90fd5f00dd4d45e51c8a2
/TexGenerator/year2014/sem1_39_kr2.py
f1fa0085e50c4eac9d1e96d3d4153f3f3cf5fc49
[]
no_license
AntipovDen/Matan
f235e4f2ac8f2effeb42170a3d3c265a53bc860a
1823a4c605227103cd838fc98be8cf0e223239fc
refs/heads/master
2021-09-08T19:53:05.372844
2021-08-27T13:33:11
2021-08-27T13:33:11
68,631,208
0
0
null
null
null
null
UTF-8
Python
false
false
3,679
py
__author__ = 'Den' from random import randint p1 = ['Продифференцируйте f(x):\\tabularnewline\r\n$f(x) = x^\\frac{2}{\\ln x} - 2x^{\\log_x e} e^{1+\\ln x} + e^{1+\\frac{2}{\\log_x e}}$\\tabularnewline\r\n\\noalign{\\vskip4mm}\r\n', 'Продифференцируйте f(x) 50 раз:\\tabularnewline\r\n$f(x) = (x^2 - 1)(4 \\sin^3 x + \\sin 3x)$\\tabularnewline\r\n\\noalign{\\vskip4mm}\r\n'] p2 = ['Посчитайте предел, пользуясь правилом Лопиталя:\\tabularnewline\r\n$\\lim\\limits_{x \\to 0} \\sin x \\ln \\cot x$\\tabularnewline\r\n\\noalign{\\vskip4mm}\r\n', 'Посчитайте предел, пользуясь правилом Лопиталя:\\tabularnewline\r\n$\\lim\\limits_{x \\to +\\infty} (\\pi - 2\\arctan\\sqrt{x})\\sqrt{x}$\\tabularnewline\r\n\\noalign{\\vskip4mm}\r\n'] p3 = ['Разложите по формуле Тейлора с остатком $o((x-1)^{2n+1})$:\\tabularnewline\r\n$f(x) = (3x^2 - 6x + 4)e^{2x^2-4x+5}$\\tabularnewline\r\n\\noalign{\\vskip4mm}\r\n', 'Разложите по формуле Тейлора с остатком $o((x-1)^{2n})$:\\tabularnewline\r\n$f(x) = \\frac{x^2-2x+1}{\\sqrt[3]{x(2 - x)}}$\\tabularnewline\r\n\\noalign{\\vskip4mm}\r\n', 'Разложите по формуле Тейлора с остатком $o((x - 1)^n)$:\\tabularnewline\r\n$f(x) = \\ln \\sqrt[4]{\\frac{x - 2}{5 - x}}$\\tabularnewline\r\n\\noalign{\\vskip4mm}\r\n'] p4 =['Посчитайте предел, пользуясь формулой Тейлора:\\tabularnewline\r\n$\\lim\\limits_{x \\to 0} (\\sqrt{1 + 2 \\tan x} + \\ln(1 - x))^\\frac{1}{x^2}$\\tabularnewline\r\n\\noalign{\\vskip4mm}\r\n', 'Посчитайте предел, пользуясь формулой Тейлора:\\tabularnewline\r\n$\\lim\\limits_{x \\to 0} \\left(\\frac{x \\sin x}{2 \\cosh - 2}\\right)^\\frac{1}{\\sin^2 x}$\\tabularnewline\r\n\\noalign{\\vskip4mm}\r\n', 'Посчитайте предел, пользуясь формулой Тейлора:\\tabularnewline\r\n$\\lim\\limits_{x \\to 0} \\frac{\\ln (1 + x) + \\frac{1}{2}\\sinh (x^2) - x}{\\sqrt{1 + \\tan x} - \\sqrt{1 + \\sin x}}$\\tabularnewline\r\n\\noalign{\\vskip4mm}\r\n'] varNames = ['Rick Grimes', 'Carl Grimes', 'Lori Grimes', 'Shane', 'Glenn', 'Carol', 'Daryl', 'Merle', 'Andrea', 'Meggie', 'Beth', 'Hershel', 'Michonne', 'The Governor', 'Tyreese', 'Sasha', 'Bob', 'Tara'] varNames.sort(); varNumber = 0; def genVariant(): return [randint(0, 1), randint(0, 1), randint(0, 2), randint(0, 2)] def printVariant(): global varNumber v = genVariant() print('\\begin{tabular}{l}') print('Вариант', varNames[varNumber], '\\tabularnewline') varNumber += 1 print(p1[v[0]]) print(p2[v[1]]) print(p3[v[2]]) print(p4[v[3]]) print('\\end{tabular}', end='') print('\\begin{tabular}{cc}') for i in range(4): printVariant() print('& %') printVariant() print('\\tabularnewline') print('\\noalign{\\vskip4mm}') print('\\end{tabular}') print('\\begin{tabular}{cc}') for i in range(4): printVariant() print('& %') printVariant() print('\\tabularnewline') print('\\noalign{\\vskip4mm}') print('\\end{tabular}') print('\\begin{tabular}{cc}') for i in range(1): printVariant() print('& %') printVariant() print('\\tabularnewline') print('\\noalign{\\vskip4mm}') print('\\end{tabular}')
594a6854aa036bb05a7b70e39cf7494fa72f71a0
395f0f1faa1ba05b1dcd026b5c4c8ae6f49a931e
/states.py
4a4a9399414bca3d40096145970df92b68a655e4
[]
no_license
ddthj/Rogue-Neurons
f9b6c8b1a49f8acc0f730111e9f1fe7256f0bfc9
da849b9614cc20adf8121c84a4ca80b1b6dae9d6
refs/heads/master
2020-05-16T22:31:44.384901
2019-04-25T02:23:32
2019-04-25T02:23:32
183,338,907
1
0
null
null
null
null
UTF-8
Python
false
false
4,377
py
import math from objects import * from util import * ''' states.py - contains every state in the form of classes plus the controller, which takes state output and converts it to something we can return to the framework ''' class state: #all states inherit from this def __init__(self): self.expired = False def reset(self): self.expired = False def execute(self,agent): pass class atba(state): #always towards ball agent def __init__(self): super().__init__() def execute(self,agent): #all states produce a target and target speed target = agent.ball.location speed = 2300 return control(agent,target,speed) class shoot(state):#takes shot on opponent goal def __init__(self): super().__init__() def execute(self,agent): goal = Vector3(0,5100*-side(agent.team),0) distance = (agent.ball.location - agent.me.location).magnitude() / 2.5 goal_to_ball = (agent.ball.location- goal).normalize() target = agent.ball.location + distance * goal_to_ball perp = goal_to_ball.cross(Vector3(0,0,1)) adjustment = perp * cap(perp.dot(agent.ball.velocity), -distance/2.3, distance/2.3) target += adjustment speed = 2300 if distance > 2050: target, retarget = retarget_boost(agent,target) #it's bad to call this at close distances return control(agent,Target(target),speed,False) class contest(state): #hits the ball asap, dodges into it. def __init__(self): super().__init__() def execute(self,agent): target,retarget = retarget_boost(agent,agent.ball.location) speed = 2300 return control(agent,Target(target, agent.ball.velocity),speed,not retarget) class clear(state): #hits ball to side of field def __init__(self): super().__init__() def execute(self,agent): distance = (agent.me.location - agent.ball.location).flatten().magnitude() goal_vector = Vector3(-sign(agent.ball.location[0],False),0,0) target = agent.ball.location + (goal_vector*(40+(distance/5))) target += Vector3(0,25*side(agent.team),0) speed = 2300 * cap(distance / (-180 + agent.ball.location[2]*2), 0.1, 1) return control(agent, Target(target), speed, False) class retreat(state): #returns to goal and stops def __init__(self): super().__init__() def execute(self,agent): goal = Vector3(0,5100*side(agent.team),70) if (agent.me.location - goal).magnitude() < 500: speed = 30 target = agent.ball.location else: speed = 1800 target,retarget = retarget_boost(agent,goal) return control(agent, Target(target), speed, False) class recover(state): #tries to land facing in the direction it's moving def __init__(self): super().__init__() def execute(self,agent): target = agent.me.location + agent.me.velocity.flatten() speed = 30 return control(agent,Target(target), speed, False) def control(agent,target, target_speed, f = False): #turns targets and speeds into controller outputes c = agent.refresh() local_target = agent.me.matrix.dot(target.location - agent.me.location) local_velocity = agent.me.matrix.dot(agent.me.velocity)[0] turn_radius = radius(local_velocity) turn_center = Vector3(0,sign(local_target[1])*(turn_radius + 70),0) slowdown = (turn_center - local_target.flatten()).magnitude() / cap(turn_radius * 1.5, 1, 1200) target_speed = cap(target_speed * slowdown, -abs(target_speed),abs(target_speed)) c.handbrake = True if slowdown < 0.44 else False c.steer,c.yaw,c.pitch,c.roll,angle_to_target = defaultPD(agent, local_target, True) c.throttle,c.boost = throttle(target_speed, local_velocity, 1) if agent.me.airborn and (angle_to_target > 0.2 or (agent.me.location - target.location).magnitude() > 800): c.boost = False closing_vel = cap((target.location - agent.me.location).normalize().dot(agent.me.velocity-target.velocity),0.01, 2300) if agent.sinceJump < 1.5 or (f == True and (target.location - agent.me.location).magnitude() / closing_vel < 0.38 and abs(angle_to_target) < 0.21): flip(agent,c,local_target,angle_to_target) return c
37dcd51d3214a03cb4d9bd5ef3121c0b9a56103a
a85d1d6a54c8c143d0d64f02b80c54aba78b3a84
/1116/클라이언트 타임.py
078160e9ce1284f47a7a314e32060291650920da
[]
no_license
w51w/python
30007548ba19076285954099125f42bc63a3d204
bc556a520ad0a9d99b5445fc92113c4afa83b4c2
refs/heads/master
2023-01-28T17:37:15.344106
2020-12-06T14:56:47
2020-12-06T15:42:40
308,628,844
0
0
null
null
null
null
UTF-8
Python
false
false
176
py
#client_time import socket sock = socket.socket() #인수 생략 가능 address = ('localhost', 4444) sock.connect((address)) print("현재 시간: ", sock.recv(1024).decode())
554a381d585961861f2b683247f2d7acdb9d391e
602bdbd1d8ef4d36ccfdcae5756bc8e448d30584
/share/ecommerce/voucher/basic.py
5f14cce3af33ec80ad9e80dd70fb9a3ead072d59
[]
no_license
timparkin/timparkingallery
1136027bf9cfbad31319958f20771a6fdc9f5fc4
6e6c02684a701817a2efae27e21b77765daa2c33
refs/heads/master
2016-09-06T00:28:16.965416
2008-11-25T21:15:45
2008-11-25T21:15:45
12,716
1
1
null
null
null
null
UTF-8
Python
false
false
3,182
py
import re import formal from ecommerce.voucher import base class Voucher(object): def getCreator(self): return BasicVoucherDefinitionCreator() def getEditor(self): return BasicVoucherDefinitionEditor() def getType(self): return BasicVoucherDefinition def getUpdateSQL(self): sql = """ update %(table)s set start_date=%%(start_date)s, end_date=%%(end_date)s, amount=%%(amount)s where voucher_definition_id = %%(voucher_definition_id)s""" return sql class BasicVoucherDefinition(base.BaseVoucherDefinition): _attrs = ( 'voucher_definition_id', 'code', 'count', 'multiuse', 'start_date', 'end_date', 'amount' ) AMOUNT_RE = re.compile( '^\d+(\.\d+)?%?$' ) def addFields(form, forCreate = False): if forCreate: codeField = formal.String(required=True, strip=True) countField = formal.Integer() multiuseField = formal.Boolean() else: codeField = formal.String(immutable=True) countField = formal.Integer(immutable=True) multiuseField = formal.Boolean(immutable=True) form.add( formal.Field('code', codeField) ) form.add( formal.Field('count', countField) ) form.add( formal.Field('multiuse', multiuseField) ) form.add( formal.Field('start_date', formal.Date()) ) form.add( formal.Field('end_date', formal.Date()) ) form.add( formal.Field('amount', formal.String(required=True, strip=True), description="Either an amount or a '%'") ) return form class BasicVoucherDefinitionCreator(object): def addFields(self, form): addFields(form, forCreate = True) def create(self, ctx, form, data): if not data['multiuse'] and not data['count']: raise formal.FormError( "One of 'multiuse' and 'count' must be specified" ) if data['multiuse'] and data['count']: raise formal.FormError( "Only one of 'multiuse' and 'count' must be specified" ) if not AMOUNT_RE.match(data['amount']): raise formal.FieldError( "Unrecognised format", 'amount' ) voucherDefinition = BasicVoucherDefinition(**data) if data['multiuse']: voucher = base.Voucher(code=data['code']) voucherDefinition.addVoucher(voucher) else: codes = base.generateCodes(data['code'], data['count']) for code in codes: voucher = base.Voucher(code=code) voucherDefinition.addVoucher(voucher) return voucherDefinition class BasicVoucherDefinitionEditor(object): def addFieldsAndData(self, form, voucherDefinition): addFields(form) form.data = voucherDefinition.getDataDict() def update(self, voucherDefinition, data): if not AMOUNT_RE.match(data['amount']): raise formal.FieldError( "Unrecognised format", 'amount' ) voucherDefinition.start_date = data['start_date'] voucherDefinition.end_date = data['end_date'] voucherDefinition.amount = data['amount']
d9410fc95bb27e72892fdb3ed8a98dd71d4aad6d
db45f73a9b2a2cd8e867c577e68e9b3e6f0244f9
/ScanWatch/storage/ScanDataBase.py
2e91241f31ea76a964bbdd9b536be9e3d898ea69
[ "MIT" ]
permissive
qihangwang/ScanWatch
753d6c431df1e9e0468fd0195ac8fc8aedea0b6b
97f60cd3ad394dc0bfb50e846bbfaa1eeb9cc197
refs/heads/master
2023-06-30T16:57:56.414960
2021-07-17T10:14:53
2021-07-17T10:14:53
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,003
py
from typing import Dict, List from ScanWatch.storage.DataBase import DataBase from ScanWatch.storage.tables import get_transaction_table from ScanWatch.utils.enums import TRANSACTION, NETWORK class ScanDataBase(DataBase): """ Handles the recording of the address transactions in a local database """ def __init__(self, name: str = 'scan_db'): """ Initialise a Scan database instance :param name: name of the database :type name: str """ super().__init__(name) def add_transactions(self, address: str, nt_type: NETWORK, tr_type: TRANSACTION, transactions: List[Dict]): """ Add a list of transactions to the database :param address: address involved in the transaction :type address: str :param nt_type: type of network :type nt_type: NETWORK :param tr_type: type of the transaction to record :type tr_type: TRANSACTION :param transactions: list of the transaction to record :type transactions: List[Dict] :return: None :rtype: None """ table = get_transaction_table(address, nt_type, tr_type) for transaction in transactions: row = table.dict_to_tuple(transaction) self.add_row(table, row, auto_commit=False) self.commit() def get_transactions(self, address: str, nt_type: NETWORK, tr_type: TRANSACTION) -> List[Dict]: """ Return the List of the transactions recorded in the database :param address: address involved in the transactions :type address: str :param nt_type: type of network :type nt_type: NETWORK :param tr_type: type of the transaction to fetch :type tr_type: TRANSACTION :return: list of the transaction recorded :rtype: List[Dict] """ table = get_transaction_table(address, nt_type, tr_type) rows = self.get_all_rows(table) return [table.tuple_to_dict(row) for row in rows] def get_last_block_number(self, address: str, nt_type: NETWORK, tr_type: TRANSACTION) -> int: """ Return the last block number seen in recorded transactions (per address, type of transaction and network) If None are found, return 0 :param address: address involved in the transactions :type address: str :param nt_type: type of network :type nt_type: NETWORK :param tr_type: type of the transaction to fetch :type tr_type: TRANSACTION :return: last block number :rtype: int """ table = get_transaction_table(address, nt_type, tr_type) selection = f"MAX({table.blockNumber})" result = self.get_conditions_rows(table, selection=selection) default = 0 try: result = result[0][0] except IndexError: return default if result is None: return default return int(result)