blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
87d2c895c4c15dd4c38376f740c302e94c9d1a8d | cbe1a042417584da2ad2ebdeb046f438953ddf4a | /web_crawler/naver_review.py | e24acd0c893897c1b68f8bd11e67ff2211129ab5 | []
| no_license | sjknight79/Police_study_200810 | 36f13276e6379deeb3972192fec1a5320924dc8d | 94329b820f854696dcefa3798ccdb4c469822adf | refs/heads/master | 2022-11-30T13:00:53.272355 | 2020-08-12T00:28:37 | 2020-08-12T00:28:37 | 285,436,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | import requests
from bs4 import BeautifulSoup
url = "https://movie.naver.com/movie/bi/mi/review.nhn?code=188909"
res = requests.get(url)
# print(res.text)
soup = BeautifulSoup(res.content, 'html.parser')
# print(soup.prettify())
ul = soup.find("ul", class_="rvw_list_area")
# print(ul)
lis = ul.find_all("li")
for i, li in enumerate(lis):
print(i, "="*50)
print(li.a.text)
print(li.span.em.text)
| [
"[email protected]"
]
| |
3cda43de60849b7e6f49a8d3582fccee1df59256 | 0b65ab6c42179965bfd76c2c55943cb83e3c34d4 | /runserver.py | 2d3f8de484ccb862a0bc488821d4e4103f1fe730 | []
| no_license | ojixzzz/WebAnalytics-Flask | 2c3bed3cf2dbf910bf466535367bed3dd9c5e675 | f9d0de4fc71f639b0e0779664c2614a3545e58c6 | refs/heads/master | 2020-12-22T05:08:55.158147 | 2016-06-15T06:19:20 | 2016-06-15T06:19:20 | 60,417,969 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | from analytic import app
app.debug = True
if __name__ == '__main__':
app.run(host='0.0.0.0') | [
"[email protected]"
]
| |
33f4608198892596efdf57c1d8dfe31a4a950ab0 | 32c00255e58a17ea97c27f932c7e9871019f216b | /eeg_processing/config.py | 3c42a10fa560445ca29c50237a8f7c959b5c8a65 | []
| no_license | saintvictoria/crowdeeg-viewer | 9d080dac23f0bb59f0bd79a8eba164aa6ae8cc45 | 681bb37a7af55460c556584d0dc7423421f9c2fa | refs/heads/master | 2021-01-23T02:48:06.691225 | 2015-10-05T17:10:46 | 2015-10-05T17:10:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | from collections import OrderedDict
# EDF profile - signal channel numbers to the names we wish to use
edf_profiles = {
'DREAMS': OrderedDict([
('Time', 'time'),
('8', 'FP1-A1'),
('7', 'C3-A1'), # one channel is enough for MTURK!
('1', 'O1-A1'),
])
}
# EDF File Defaults
EDF_FILE_PATH = '.'
# Filter Defaults
EEG_LOW_BOUND_FREQUENCY = 0.3
EEG_HIGH_BOUND_FREQUENCY = 35
EMG_LOW_BOUND_FREQUENCY = 10
EMG_HIGH_BOUND_FREQUENCY = 100
NOTCH_FREQUENCY = 60
NOTCH_FREQUENCY_BANDWIDTH = 2 | [
"[email protected]"
]
| |
5901e5381b54af17773dc3e7c1520e28cf0de3f4 | 2cb507ecd6629b9ff457a36e462f987913d94c1a | /webspider/3.数据提取/3.3bs4模块/07-bs4案例.py | e0ba2dc15a95fa18c7b4907a3e13a505e0e05098 | [
"Apache-2.0"
]
| permissive | youaresherlock/PythonPractice | 6869e0a5949675198826e5a07552237a636d6f5b | 2e22d3fdcb26353cb0d8215c150e84d11bc9a022 | refs/heads/master | 2021-08-16T03:09:44.203035 | 2021-08-02T07:40:00 | 2021-08-02T07:40:00 | 146,625,560 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | #!usr/bin/python
# -*- coding:utf8 -*-
"""
https://mil.news.sina.com.cn/roll/index.d.html
"""
import json
import requests
from bs4 import BeautifulSoup
url = 'https://mil.news.sina.com.cn/roll/index.d.html'
headers = {"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)"}
response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.content.decode(), 'html.parser')
# 层级选择器
news_list = soup.select('.linkNews li a')
news_results = []
for news in news_list:
new_dict = dict()
new_dict['title'] = news.get_text()
new_dict['url'] = news.get('href')
news_results.append(new_dict)
print(news_results)
with open('news.json', 'w') as f:
content = json.dumps(news_results, ensure_ascii=False, indent=1)
f.write(content)
| [
"[email protected]"
]
| |
6517fec4d77570a3b3058385a4b8b69313b8a877 | 6ed5b3ef5c24d9b24bff329d7d69a54908386014 | /ALDS1_9_A.py | ed191daedd9af5fedc5e2c2a200be721b6352afa | []
| no_license | python-practicing/Aizu_Online_Judge | cb954b806329fc3332916b9120eb8d77965b896c | 364e205c0f345c2387019bee269fb1075ffdf761 | refs/heads/main | 2023-04-27T07:56:28.485550 | 2021-05-16T07:45:53 | 2021-05-16T07:45:53 | 356,594,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | import math
n = int(input())
heap_elements = list(map(int, input().split()))
for i in range(1, n+1):
key = heap_elements[i-1]
if i == 1:
left_key = heap_elements[1]
right_key = heap_elements[2]
print(f'node {i}: key = {key}, left key = {left_key}, right key = {right_key}, ')
else:
parent_key = heap_elements[math.floor(i / 2) - 1]
if 2*i <= n-1:
left_key = heap_elements[2*i-1]
right_key = heap_elements[2*i]
print(f'node {i}: key = {key}, parent key = {parent_key}, left key = {left_key}, right key = {right_key}, ')
elif 2*i == n:
left_key = heap_elements[2*i-1]
print(f'node {i}: key = {key}, parent key = {parent_key}, left key = {left_key}, ')
else:
print(f'node {i}: key = {key}, parent key = {parent_key}, ')
| [
"[email protected]"
]
| |
3f9c5087daf02fa4d3f63eed410bf3cac7690a7a | 5936b0f025944d265cc64d31ef93bc578d5ae6a2 | /home/migrations/0002_load_initial_data.py | aa10fce4bdfc1b079fe4363502f83665c2758cfe | []
| no_license | crowdbotics-apps/smiley-18358 | b4e91ddeaf525aedf990ec1df65d65fb583f4b7c | 7935dd2fad196a7b573c1126905af5fcf93110b0 | refs/heads/master | 2022-11-06T07:02:33.512245 | 2020-06-23T20:08:09 | 2020-06-23T20:08:09 | 274,497,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,278 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "smiley"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">smiley</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "smiley-18358.botics.co"
site_params = {
"name": "smiley",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"[email protected]"
]
| |
b8f3fa45da63fbbe34737aa9fbd34da6475d1715 | edb140c6b4cf5042480f2dd8adca6222dc0f7fa6 | /ec2/urls.py | 5faee05666c950b8a542c519bd358f746c5eafc9 | []
| no_license | chuong-phan/ec2 | bc4afb6e4b515ac8104cc9027226bbf42e5a9ddc | 748c04b177f1597131e7e506dcbf3cb64f3c7ffd | refs/heads/master | 2021-07-16T04:18:06.538715 | 2017-03-13T16:16:31 | 2017-03-13T16:16:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | """ec2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
| [
"[email protected]"
]
| |
3c41505b6df63f203fef498970b71945aa8ac5d8 | b3b68efa404a7034f0d5a1c10b281ef721f8321a | /Scripts/simulation/situations/complex/looping_interaction_situation.py | a00207c4f468d786c117ad5289444841aeb74d90 | [
"Apache-2.0"
]
| permissive | velocist/TS4CheatsInfo | 62195f3333076c148b2a59f926c9fb5202f1c6fb | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | refs/heads/main | 2023-03-08T01:57:39.879485 | 2021-02-13T21:27:38 | 2021-02-13T21:27:38 | 337,543,310 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,478 | py | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\situations\complex\looping_interaction_situation.py
# Compiled at: 2020-10-01 19:10:21
# Size of source mod 2**32: 12943 bytes
from event_testing.test_events import TestEvent
from event_testing.tests import TunableTestSet
from interactions.base.interaction import InteractionFailureOptions
from interactions.context import InteractionContext, QueueInsertStrategy
from interactions.interaction_finisher import FinishingType
from interactions.priority import Priority
from sims4.tuning.tunable import Tunable, TunablePackSafeReference
from singletons import DEFAULT
from situations.situation import Situation
from situations.situation_complex import SituationComplexCommon, SituationStateData, CommonSituationState, TunableSituationJobAndRoleState
import interactions.aop, routing, services, situations, operator
OBJECT_TOKEN = 'object_id'
class RunInteractionState(CommonSituationState):
FACTORY_TUNABLES = {'max_retry_attempts': Tunable(description='\n The number of times the Sim can fail to successfully run the \n tuned interaction before giving up and moving on to the next \n object as a target.\n ',
tunable_type=int,
default=3)}
def __init__(self, *args, targets=None, interaction=None, max_retry_attempts=None, basic_extra=None, previous_si=None, **kwargs):
(super().__init__)(*args, **kwargs)
self.targets = targets
self.interaction = interaction
self._retry_count = 0
self._target = None
self.max_retry_attempts = max_retry_attempts
self.basic_extra = basic_extra
self._previous_si = previous_si
self._interaction_instance = None
def on_activate(self, reader=None):
if not self.find_target_and_push_interaction():
if not self.targets:
self.owner._self_destruct()
else:
self.retry_interaction()
return
self._test_event_register(TestEvent.InteractionStart)
self._test_event_register(TestEvent.InteractionExitedPipeline)
def handle_event(self, sim_info, event, resolver):
if event == TestEvent.InteractionStart:
if resolver.interaction is self._interaction_instance:
if self._additional_tests(sim_info, event, resolver):
self._on_interaction_of_interest_start()
return
elif event == TestEvent.InteractionExitedPipeline:
if resolver.interaction is self._interaction_instance and self._additional_tests(sim_info, event, resolver):
if resolver.interaction.has_been_user_canceled:
self.cancel_interaction()
return
if not resolver.interaction.is_finishing_naturally:
self._on_interaction_of_interest_failure()
return
def _on_interaction_of_interest_start(self):
self.owner.advance_to_next_object((self.targets), previous_si=(self._interaction_instance))
def _on_interaction_of_interest_failure(self):
self.retry_interaction()
def _additional_tests(self, sim_info, event, resolver):
return self.owner.is_sim_in_situation(sim_info.get_sim_instance())
def cancel_interaction(self):
self.owner._self_destruct()
def timer_expired(self):
self.owner.advance_to_next_object(previous_si=(self._interaction_instance))
def find_target_and_push_interaction(self):
if self.targets is None:
self.owner._self_destruct()
return
sim = self.owner.initiating_sim_info.get_sim_instance()
failed_connectivity_obj = None
for obj in sorted((self.targets), key=(operator.attrgetter('part_group_index'))):
if self._previous_si is not None:
context = self._previous_si.context.clone_for_continuation(self._previous_si)
else:
context = InteractionContext(sim, (InteractionContext.SOURCE_SCRIPT),
(Priority.High),
insert_strategy=(QueueInsertStrategy.FIRST))
resolver = self.interaction.get_resolver(target=obj, context=context)
if not self.owner.tests.run_tests(resolver):
self.targets.remove(obj)
continue
if not obj.is_connected(sim):
failed_connectivity_obj = obj
self.targets.remove(obj)
continue
self.targets.remove(obj)
self._target = obj
return self.push_interaction(context=context)
if failed_connectivity_obj is not None:
route_fail_context = InteractionContext(sim, (InteractionContext.SOURCE_SCRIPT),
(Priority.High),
insert_strategy=(QueueInsertStrategy.NEXT))
result = sim.push_super_affordance((InteractionFailureOptions.ROUTE_FAILURE_AFFORDANCE), failed_connectivity_obj,
route_fail_context, interaction_name=self.interaction.get_name(target=failed_connectivity_obj, context=route_fail_context),
interaction_icon_info=self.interaction.get_icon_info(target=failed_connectivity_obj, context=route_fail_context))
return False
def push_interaction(self, context=DEFAULT):
for sim in self.owner.all_sims_in_situation_gen():
if context is DEFAULT:
context = InteractionContext(sim, (InteractionContext.SOURCE_SCRIPT), (Priority.High),
insert_strategy=(QueueInsertStrategy.NEXT))
aop = interactions.aop.AffordanceObjectPair(self.interaction, self._target, self.interaction, None)
test_result, execute_result = aop.test_and_execute(context)
self._interaction_instance = execute_result[1]
if self.basic_extra:
if self._interaction_instance is not None:
self._interaction_instance.add_additional_instance_basic_extra(self.basic_extra)
return test_result
def retry_interaction(self):
self._retry_count += 1
if self._retry_count < self.max_retry_attempts:
self.push_interaction()
else:
self._retry_count = 0
self.owner.advance_to_next_object((self.targets), previous_si=(self._interaction_instance))
class LoopingInteractionSituation(situations.situation_complex.SituationComplexCommon):
INSTANCE_TUNABLES = {'tendor_job_and_role_state':TunableSituationJobAndRoleState(description='\n Job and Role State for the Sim in this situation.\n '),
'interaction':TunablePackSafeReference(description='\n The interaction that the Sim will run in looping succession on\n the object(s) specified if the tests pass.\n ',
manager=services.affordance_manager()),
'tests':TunableTestSet(description='\n The tests that muss pass for the Sim to run the tuned interaction\n with the object as the target.\n '),
'run_interaction_state':RunInteractionState.TunableFactory(description='\n Situation State used to run the tuned interaction on a specific\n object.\n ',
tuning_group=SituationComplexCommon.SITUATION_STATE_GROUP)}
REMOVE_INSTANCE_TUNABLES = Situation.NON_USER_FACING_REMOVE_INSTANCE_TUNABLES
def __init__(self, *args, **kwargs):
(super().__init__)(*args, **kwargs)
reader = self._seed.custom_init_params_reader
self.targets = None
self._retry_count = 0
self.interaction_override = self._seed.extra_kwargs.get('interaction', None)
self.basic_extra = self._seed.extra_kwargs.get('basic_extra', ())
if reader is None:
self._target_id = self._seed.extra_kwargs.get('default_target_id', None)
else:
self._target_id = reader.read_uint64(OBJECT_TOKEN, None)
if self._target_id is not None:
target = services.object_manager().get(self._target_id)
if target.parts:
self.targets = set(target.parts)
else:
self.targets = set((target,))
@classmethod
def default_job(cls):
pass
@classmethod
def _states(cls):
return (SituationStateData(1, RunInteractionState, factory=(cls.run_interaction_state)),)
@classmethod
def _get_tuned_job_and_default_role_state_tuples(cls):
return [(cls.tendor_job_and_role_state.job, cls.tendor_job_and_role_state.role_state)]
def _on_set_sim_job(self, sim, job_type):
super()._on_set_sim_job(sim, job_type)
self._change_state(self.run_interaction_state(targets=(self.targets), interaction=(self.looping_interaction),
basic_extra=(self.basic_extra)))
def advance_to_next_object(self, targets, previous_si=None):
self._change_state(self.run_interaction_state(targets=targets, interaction=(self.looping_interaction),
basic_extra=(self.basic_extra),
previous_si=previous_si))
@property
def looping_interaction(self):
if self.interaction_override is not None:
return self.interaction_override
return self.interaction | [
"[email protected]"
]
| |
75434b093211de8bd09ddd5d42a9bf15f06d16c6 | 77116b044adb3f28c5ea53d17fc69c29fd9bee55 | /modules/influxdb_wrapper.py | 683fcb41dd50d91836b1b24a3421205c11cc4a99 | [
"MIT"
]
| permissive | manav1403/stopstalk-deployment | 63a5c22f20cf1dbe81024ba63b33c1c986ae8ada | 667f6d89b24ce04595e2c70e02aa44aa3d836c42 | refs/heads/master | 2023-03-22T18:39:37.371341 | 2021-03-20T15:40:20 | 2021-03-20T15:40:20 | 290,265,152 | 0 | 0 | MIT | 2020-08-25T16:22:59 | 2020-08-25T16:22:58 | null | UTF-8 | Python | false | false | 2,960 | py | """
Copyright (c) 2015-2020 Raj Patel([email protected]), StopStalk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from influxdb import SeriesHelper
from gluon import current
series_helper_classes = {}
# ------------------------------------------------------------------------------
def get_series_helper(measurement_name,
measurement_fields,
measurement_tags):
if measurement_name in series_helper_classes:
return series_helper_classes[measurement_name]
else:
series_helper_classes[measurement_name] = series_helper_class_wrapper(
measurement_name,
measurement_fields,
measurement_tags
)
return series_helper_classes[measurement_name]
# ------------------------------------------------------------------------------
def series_helper_class_wrapper(measurement_name,
measurement_fields,
measurement_tags):
class StopStalkSeriesHelper(SeriesHelper):
"""Instantiate SeriesHelper to write points to the backend."""
class Meta:
"""Meta class stores time series helper configuration."""
# The client should be an instance of InfluxDBClient.
client = current.INFLUXDB_CLIENT
# The series name must be a string. Add dependent fields/tags
# in curly brackets.
series_name = measurement_name
# Defines all the fields in this time series.
fields = measurement_fields
# Defines all the tags for the series.
tags = measurement_tags
# Defines the number of data points to store prior to writing
# on the wire.
bulk_size = 5
# autocommit must be set to True when using bulk_size
autocommit = True
return StopStalkSeriesHelper | [
"[email protected]"
]
| |
6e7c618700c92cd8df767a24b9ce804ec40da416 | 90cc82215e9e67cc548e11b0ee1dc76ba8c2d4c8 | /lab4.py | a072dc19d00f28f63f81863f3d5543e7f9cd2bc9 | []
| no_license | greplist/mzi | e76c6bf9295ca786fbc68efe3b93f97b279e06d6 | 00c1769d3e8479f08a7914a4e4f689a7d24e318e | refs/heads/master | 2021-07-05T17:00:34.200440 | 2017-09-28T10:57:07 | 2017-09-28T10:57:07 | 103,229,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,292 | py | from Crypto.Util.number import getStrongPrime
from Crypto.Random import random
from math import log
def egcd(a, b):
if (a == 0):
x, y = 0, 1
return b, x, y
g, x, y = egcd(b % a, a)
x, y = y - (b / a) * x, x
return g, x, y
def it_egcd(a, b):
states = []
while a != 0:
states.append((a, b))
a,b = b%a, a
g, x, y = b, 0, 1
for a,b in reversed(states):
x,y = y-(b/a)*x, x
return g, x, y
def mul_by_mod(a, b, mod):
return (a % mod * b % mod + mod) % mod
def div_by_mod(a, b, mod):
g, x, y = it_egcd(b, mod)
if (g == 1):
x = (x % mod + mod) % mod
return mul_by_mod(a, x, mod)
def generate_keys(size):
a = getStrongPrime(size)
b = getStrongPrime(size)
n = a*b
fi = (a - 1)*(b - 1)
while(True):
e = 2**(2**random.choice(range(5, int(log(size, 2) - 2)))) + 1
if egcd(fi, e)[0] == 1:
break
d = div_by_mod(1, e, fi)
return (e, n), (d, n)
def encrypt(public_key, open_text):
e, n = public_key
return pow(open_text, e, n)
def decrypt(private_key, cipher):
d, n = private_key
return pow(cipher, d, n)
if __name__ == '__main__':
msg = 11111
public_key, private_key = generate_keys(2048)
ciphertext = encrypt(public_key, msg)
decrypted = decrypt(private_key, ciphertext)
print decrypted == msg
| [
"[email protected]"
]
| |
e0620bad78e3d37d6cae6bbb4aad089d4272be4c | d1548848a67d8b4bca7f3253eb5c08dcb031b132 | /NetworkTEST.py | 18145c1f719a41f5452ae17d043247372617afa7 | []
| no_license | LeslieK/MaxFlow | 3e7bcfdacbfef248d24ba4efcbed016de16320e0 | b2360e927508fcc2b977032e50c479b878b35cb3 | refs/heads/master | 2016-09-11T02:01:04.852145 | 2013-11-26T18:13:54 | 2013-11-26T18:13:54 | 13,695,868 | 2 | 1 | null | 2013-10-21T16:15:02 | 2013-10-19T05:09:26 | Python | UTF-8 | Python | false | false | 3,407 | py | '''
Performs network flow analysis on a network of V points
Each point is a bike station in the citibike network
Pairs of points are connected if they are a distance of _DISTANCE km
from each other.
Capacity on an edge is the sum of the bike docks at each edge vertex.
'''
import json
import argparse
import matplotlib.pyplot as plt
import numpy as np
import random
import MaxFlowUtils
from MaxFlowLib import FlowEdge, FlowNetwork
from FordFulkerson import FF
import ConnectedComponent
from collections import defaultdict
_DISTANCE = .4 # km
parser = argparse.ArgumentParser()
parser.add_argument("filename", help="filename of json network", type=str)
args = parser.parse_args()
# read network data
with open(args.filename) as f:
input = json.load(f)
num_stations = len(input["stationBeanList"])
stations = np.random.random((num_stations, 2)) # 2 cols for latitude, longitude
totalDocks = np.zeros((num_stations,), dtype=np.uint8)
names = {} # maps vertex number to stationName
vertex = {} # maps stationName to vertex number
# store data
row = 0
for station in input["stationBeanList"]:
# store data in numpy arrays
totalDocks[row] = station["totalDocks"]
stations[row][0] = station["latitude"]
stations[row][1] = station["longitude"]
vertex[station["stationName"]] = row
names[row] = station["stationName"]
row += 1
# strategy:
start_station = "1 Ave & E 15 St"
end_station = "Broadway & W 51 St"
# build a digraph from start to end
source = vertex[start_station]
target = vertex[end_station]
# connect stations
flow_edges = MaxFlowUtils.findFlowEdges(stations, totalDocks, source, target)
# flow_edges = []
# for v in range(num_stations):
# vlat = stations[v][0]
# vlong = stations[v][1]
# for w in range(num_stations):
# if w > v:
# wlat = stations[w][0]
# wlong = stations[w][1]
# d = MaxFlowUtils.distance(vlat, vlong, wlat, wlong)
# if d < _DISTANCE:
# capacity = totalDocks[v] + totalDocks[w]
# flow_edges.append(FlowEdge(v, w, capacity))
# #flow_edges.append(FlowEdge(w, v, capacity))
# flow_edges.append(FlowEdge(329, 330, totalDocks[329] + totalDocks[330]))
#draw scatter plot
plt.xlabel('latitude')
plt.ylabel('longitude')
plt.title('citibike stations in NYC')
plt.plot(stations[:, 0], stations[:, 1], 'ro')
# draw lines
lat_array = np.arange(len(flow_edges) * 2)
long_array = np.arange(len(flow_edges) * 2)
for e in range(len(flow_edges)):
f = flow_edges[e]
v = f.source()
w = f.sink()
plt.plot([stations[v][0],stations[w][0]], [stations[v][1], stations[w][1]], 'b-')
plt.show()
# build flow network
# number of vertices = num_stations
flownet = FlowNetwork(num_stations)
for e in flow_edges:
flownet.addEdge(e)
# # find connected components in flow network
# cc = ConnectedComponent.CC(flownet)
# print 'Number of connected components: {}'.format(cc.count())
# counts = [0] * cc.count()
# vertices = defaultdict(list)
# for v in range(num_stations):
# counts[cc.id(v)] += 1
# vertices[cc.id(v)].append(v)
# # run Ford-Fulkerson algorithm over graph to find max flow
# start = vertex["1 Ave & E 15 St"]
# end = vertex["Broadway & W 51 St"]
# # check if connected
# if cc.id(start) == cc.id(end):
# list_of_vertices = vertices[cc.id(start)]
# maxflow = FF(flownet, start, end, list_of_vertices)
# else:
# print '{} is not connected to {}'.format(names[start], names[end])
| [
"[email protected]"
]
| |
5c28907d436b434c6086d6e55952dad43ef34844 | 5a0db8775f29934648c384771a9518a810d9a55f | /读写文件.py | 82057f386fa5b80206666d2a37aa301cdc30d4c2 | []
| no_license | GhostClock/PythonStudy | c3634d8a6ab4e4c34fef080f7fcdc8d114d8575c | c4fb35f0ca2d6f8d49d65fb7cb571012a1fc5be2 | refs/heads/master | 2021-06-26T17:32:08.162956 | 2017-09-10T11:45:51 | 2017-09-10T11:45:51 | 88,620,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | from sys import argv
script,fileName = argv
print "We will going to erase %r " % fileName
print "If you dont want that ,Hit CTRL-C"
print "If you do want that ,Hit RETURN"
raw_input("?")
print "Opening the file..."
target = open(fileName,'w')
print "Truncating the file ,Good Bye"
target.truncate()
print "Now Im going to ask you for three lines"
line1 = raw_input("line 1:")
line2 = raw_input("line 2:")
line3 = raw_input("line 3:")
print "Im going to write there to the file"
target.write(line1 + "\n")
target.write(line2 + "\n")
target.write(line3 + "\n")
print "And finally, we close it"
target.close()
try:
pass
except :
pass | [
"[email protected]"
]
| |
9e0a93f3988ad9bafaa30d4e1f01ee3091a6d91d | 3fa79c1c450676a6ad07cceef6a311ff3e18e366 | /util/old/downsample2x_velocity.py | 93057a87dc8932f1759f38353893e4af55bdfa7c | []
| no_license | Alan19922015/cism-data | 7858bfd834e00c1f4cba865bf6a6574f757d9ee0 | f7a48a37bdb102bd929bb07626bf9236166d5d37 | refs/heads/master | 2020-03-19T06:29:54.002451 | 2017-04-04T17:39:26 | 2017-04-04T17:39:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,873 | py |
from numpy import *
from struct import unpack
from netCDF4 import Dataset
def downsample( a , f ) :
if (f == 1) :
return a
nx = a.shape[0] / f
ny = a.shape[1] / f
a1 = a[:a.shape[0],:ny]
for j in range(ny) :
a1[:,j] = a[:,j*f:(j+1)*f].mean(1)
a2 = a1[:nx,:ny]
for i in range(nx) :
a2[i,:] = a1[i*f:(i+1)*f,:].mean(0)
return a2
nc500 = Dataset('greenland_vel_mosaic500_bambergrid.nc', 'r', format='NETCDF4')
nc1km = Dataset('greenland_vel_mosaic1km_bambergrid.nc', 'w', format='NETCDF4')
x500 = nc500.variables['x'][:]
y500 = nc500.variables['y'][:]
vx500 = nc500.variables['vx'][:]
vy500 = nc500.variables['vy'][:]
ex500 = nc500.variables['ex'][:]
ey500 = nc500.variables['ey'][:]
nx500 = x500.shape[0]
ny500 = y500.shape[0]
nx1km = nx500/2
ny1km = ny500/2
x1km = ndarray((nx1km))
y1km = ndarray((ny1km))
vx1km = ndarray((ny1km,nx1km))
vy1km = ndarray((ny1km,nx1km))
ex1km = ndarray((ny1km,nx1km))
ey1km = ndarray((ny1km,nx1km))
for i in range(nx1km) :
x1km[i] = ( x500[2*i] + x500[2*i+1] ) / 2.
for j in range(ny1km) :
y1km[j] = ( y500[2*j] + y500[2*j+1] ) / 2.
print(y500)
print(y1km)
vx1km = downsample( vx500 , 2 )
vy1km = downsample( vy500 , 2 )
ex1km = downsample( ex500 , 2 )
ey1km = downsample( ey500 , 2 )
x_d = nc1km.createDimension('x', nx1km)
y_d = nc1km.createDimension('y', ny1km)
x_v = nc1km.createVariable('x' ,'f4',('x',))
y_v = nc1km.createVariable('y' ,'f4',('y',))
vx_v = nc1km.createVariable('vx','f4',('y','x',))
vy_v = nc1km.createVariable('vy','f4',('y','x',))
ex_v = nc1km.createVariable('ex','f4',('y','x',))
ey_v = nc1km.createVariable('ey','f4',('y','x',))
vx_v.missing_value = -2.e9
vy_v.missing_value = -2.e9
ex_v.missing_value = -2.e9
ey_v.missing_value = -2.e9
x_v[:] = x1km
y_v[:] = y1km
vx_v[:,:] = vx1km
vy_v[:,:] = vy1km
ex_v[:,:] = ex1km
ey_v[:,:] = ey1km
nc1km.close()
nc500.close()
| [
"[email protected]"
]
| |
7d529134fc3ef9143ade22ef95e546615b1e24e1 | c1c1a60defe660a12bb568339925a0b85d3de918 | /utils.py | b5874ef9bdaf2b94a86ad97140262e7a426868c3 | []
| no_license | mysunk/dacon-celestialtype-classification | dbf9afad251d93542d37bad12d914cb4819438cb | 872e3f06b3faa7c013856d9cf02b6b990c875ec6 | refs/heads/master | 2023-03-25T20:38:13.788811 | 2021-03-25T06:00:09 | 2021-03-25T06:00:09 | 351,323,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,697 | py | import pandas as pd
import glob, os
import csv
try:
import cPickle as pickle
except BaseException:
import pickle
def to_number(x, dic):
return dic[x]
def load_dataset(n_stack): # stacking에 따라 다른 dataset load -- not implemented
# load dataset
train = pd.read_csv('data_raw/train.csv', index_col=0)
sample_submission = pd.read_csv('data_raw/sample_submission.csv', index_col=0)
# make label
column_number = {}
for i, column in enumerate(sample_submission.columns):
column_number[column] = i
train['type_num'] = train['type'].apply(lambda x: to_number(x, column_number))
train['fiberID'] = train['fiberID'].astype(int)
train_label = train['type_num']
train = train.drop(columns=['type', 'type_num'], axis=1)
return train, train_label
def save_obj(obj, name):
try:
with open('trials/'+ name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
except FileNotFoundError:
os.mkdir('trials')
def load_obj(name):
with open('trials/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
def change_param(cv_result):
cv_best_param = cv_result.best_trial['result']['param']['cv_params']
nfold = cv_best_param['nfold']
del cv_best_param['nfold']
tree_best_param = cv_result.best_trial['result']['param']['tree_params']
tree_best_param['n_estimators'] = cv_best_param.pop('num_boost_round') # change name
tree_best_param['seed'] = cv_best_param.pop('seed')
cv_best_param['eval_metric'] = tree_best_param.pop('eval_metric')
param = {'fit_params':cv_best_param, 'tree_params':tree_best_param}
return param, nfold | [
"[email protected]"
]
| |
ae671c59180030aef4bf58a5dbb0cd5abbc89454 | e5dc2651bcce866d89537014221cd345b4e2d103 | /codeitsuisse/routes/parasite.py | 3fad245cec992636ce7bf3cdfa33825b513ce067 | []
| no_license | borische1322/CodeITSuisse | f748aa6fb2adaff9e8eca01998757e945d0d80f4 | 76f0a117656975652132eaa0da8aa1b58629d198 | refs/heads/master | 2023-08-14T22:31:48.765960 | 2021-09-25T10:27:08 | 2021-09-25T10:27:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,957 | py | import logging
import json
from flask import request, jsonify
from codeitsuisse import app
global p1ANS
global test
p2Ans=0
again1=1
again2=1
test=0
logger = logging.getLogger(__name__)
@app.route('/parasite', methods=['POST'])
def evaluate_parasite():
global text
data = request.get_json()
logging.info("data sent for evaluation {}".format(data))
#inputValue = data.get("input")
result = []
for test_case in data:
result.append(main(test_case["room"],test_case["grid"],test_case["interestedIndividuals"]))
return jsonify(result)
def target(inputTarget):
targry_array=[]
for i in inputTarget:
x=i.rsplit(',')
targry_array.append(x)
return targry_array
def locationParasite(x):
#x=[[0, 3],[0, 1]]
parasite=[]
for i in range(len(x)):
for j in range(len(x[i])):
if x[i][j]==3:
parasite.append(i)
parasite.append(j)
return parasite
def noHealth(inputGrid):
i=0
j=0
while i<len(inputGrid):
while j<len(inputGrid[i]):
if inputGrid[i][j]==1:
return False
else:
j+=1
j=0
i+=1
return True
def make_stepForP2(m):
global again1
for i in range(len(m)):
for j in range(len(m[i])):
if m[i][j] == 3:
if i>0 and m[i-1][j] == 1:
m[i-1][j] = 3
again1+=1
if j>0 and m[i][j-1] == 1 :
m[i][j-1] = 3
again1+=1
if i<len(m)-1 and m[i+1][j] == 1 :
m[i+1][j] = 3
again1+=1
if j<len(m[i])-1 and m[i][j+1] == 1 :
m[i][j+1] = 3
again1+=1
def isolatedForp2(m):
for i in range(len(m)-1):
for j in range(len(m[i])-1):
if m[i][j]==1:
if i==0 and j==0:
if m[i][j+1]==0 and m[i+1][j]==0:
return True
elif i==len(m)-1 and j==0:
if m[i][j+1]==0 and m[i-1][j]==0:
return True
elif i==0 and j==len(m[i])-1:
if m[i][j-1]==0 and m[i+1][j]==0:
return True
elif i==len(m)-1 and j==len(m[i])-1:
return True
else:
if (m[i-1][j] ==0 and i>0) and (m[i][j-1]==0 and j>0) and (m[i+1][j]==0 and i<len(m[1])-1) and (m[i][j+1]==0 and j<len(m[1])-1) :
return True
return False
def p2(grid):
global p2Ans
global again1
if isolatedForp2(grid)==True:
p2Ans=-1
else:
while noHealth(grid)==False and again1>0:
make_stepForP2(grid)
p2Ans+=1
again1-=1
return p2Ans
def p1(inputGrid, inputTarget):
global p1_value
p1 = target(inputTarget)
#p1_ans[len(p1)]
p1_ans=[]
parasite = locationParasite(inputGrid)
x = parasite[0]
y = parasite[1]
#print(x)
#print(y)
i=0
while i<len(p1):
p1_value = 0
if solve(inputGrid,x,y,int(p1[i][0]),int(p1[i][1])) == True:
#p1_ans[i] = p1_value
p1_ans.append(p1_value)
i+=1
else:
p1_ans.append(-1)
i+=1
p1_finalAns=dict(zip(inputTarget,p1_ans))
return p1_finalAns
def solve(m,x,y,p1_x,p1_y):
global p1_value
global test
#print(p1_value)
#Base case
if y > len(m)-1 or x > len(m[0])-1:
p1_value -= 1
return False
if x == p1_x and y == p1_y :
return True
if m[x][y] != 1 and test != 0:
p1_value -= 1
return False
test+=1
#print("fuck")
#recursive case
if solve(m,x,y+1,p1_x,p1_y) == True : #right
p1_value += 1
return True
if solve(m,x+1,y,p1_x,p1_y) == True : #down
p1_value += 1
return True
if solve(m,x,y-1,p1_x,p1_y) == True : #left
p1_value += 1
return True
if solve(m,x-1,y,p1_x,p1_y) == True : #up
p1_value += 1
return True
#Backtracking
return False
def p1test(grid,inputTarget):
global p1ANS
global again1
p1target=target()
p1ANS=[]
for i in p1target:
steps=0
if isolatedForp1(grid,int(i[0]),int(i[1]))==True:
print(-1)
else:
while noHealth()==False and again1>0:
if grid[int(i[0])][int(i[1])]!=3:
make_stepForP2(grid)
steps+=1
again1-=1
else:
break
p1ANS.append(steps)
p1_finalAns=dict(zip(inputTarget,p1ANS))
return p1_finalAnsdef p1test(grid,inputTarget):
global p1ANS
global again1
p1target=target()
p1ANS=[]
for i in p1target:
steps=0
if isolatedForp1(grid,int(i[0]),int(i[1]))==True:
print(-1)
else:
while noHealth()==False and again1>0:
if grid[int(i[0])][int(i[1])]!=3:
make_stepForP2(grid)
steps+=1
again1-=1
else:
break
p1ANS.append(steps)
p1_finalAns=dict(zip(inputTarget,p1ANS))
return p1_finalAns
def isolatedForp1(m,i,j):
if m[i][j]==1:
if i==0 and j==0:
if m[i][j+1]==0 and m[i+1][j]==0:
return True
elif i==len(m)-1 and j==0:
if m[i][j+1]==0 and m[i-1][j]==0:
return True
elif i==0 and j==len(m[i])-1:
if m[i][j-1]==0 and m[i+1][j]==0:
return True
elif i==len(m)-1 and j==len(m[i])-1:
return True
else:
if (m[i-1][j] ==0 and i>0) and (m[i][j-1]==0 and j>0) and (m[i+1][j]==0 and i<len(m[1])-1) and (m[i][j+1]==0 and j<len(m[1])-1) :
return True
elif m[i][j]==0:
return True
else:
return False
def make_stepForP1(m):
global again1
for i in range(len(m)):
for j in range(len(m[i])):
if m[i][j] == 3:
if i>0 and m[i-1][j] == 1:
m[i-1][j] = 3
again1+=1
if j>0 and m[i][j-1] == 1 :
m[i][j-1] = 3
again1+=1
if i<len(m)-1 and m[i+1][j] == 1 :
m[i+1][j] = 3
again1+=1
if j<len(m[i])-1 and m[i][j+1] == 1 :
m[i][j+1] = 3
again1+=1
def main(inputRoom,inputGrid,inputTarget):
p1_ans=p1(inputGrid, inputTarget)
p2_ans=p2(inputGrid)
p3_ans=-1
p4_ans=1
finalAns={'room':inputRoom,'p1':p1_ans,'p2':p2_ans,'p3':p3_ans,'p4':p4_ans}
return finalAns
| [
"[email protected]"
]
| |
6d3e6d6192178fdbd567a66120eb0aeb0b1077a1 | a281d09ed91914b134028c3a9f11f0beb69a9089 | /contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_stellar_address.py | e90bce385fe104a9ad05fb1b06683e925a02a2db | [
"Apache-2.0"
]
| permissive | CarstenFrommhold/great_expectations | 4e67bbf43d21bc414f56d576704259a4eca283a5 | 23d61c5ed26689d6ff9cec647cc35712ad744559 | refs/heads/develop | 2023-01-08T10:01:12.074165 | 2022-11-29T18:50:18 | 2022-11-29T18:50:18 | 311,708,429 | 0 | 0 | Apache-2.0 | 2020-11-10T15:52:05 | 2020-11-10T15:52:04 | null | UTF-8 | Python | false | false | 6,125 | py | """
This is a template for creating custom ColumnMapExpectations.
For detailed instructions on how to use it, please see:
https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_column_map_expectations
"""
import json
from typing import Optional
import coinaddrvalidator
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.exceptions import InvalidExpectationConfigurationError
from great_expectations.execution_engine import PandasExecutionEngine
from great_expectations.expectations.expectation import ColumnMapExpectation
from great_expectations.expectations.metrics import (
ColumnMapMetricProvider,
column_condition_partial,
)
def is_valid_stellar_address(addr: str) -> bool:
try:
res = coinaddrvalidator.validate("xlm", addr).valid
return res
except Exception as e:
return False
# This class defines a Metric to support your Expectation.
# For most ColumnMapExpectations, the main business logic for calculation will live in this class.
class ColumnValuesToBeValidStellarAddress(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_stellar_address"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_stellar_address(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
class ExpectColumnValuesToBeValidStellarAddress(ColumnMapExpectation):
"""Expect column values to be valid Stellar address"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"all_valid": [
"GA7YNBW5CBTJZ3ZZOWX3ZNBKD6OE7A7IHUQVWMY62W2ZBG2SGZVOOPVH",
"GBTA54J4LY5BAQWA4KECII66TPTU3V6DXPBPNVXIPMHN5W6QFATWRXY5",
"GCINDD6LNZSYPND4WRQL6NRFGOAXMAMK7M3QP2JXWC5634BY4DSZ4YG2",
"GDKRCHSD2YUW3X6FXRAVOOZZ2IOMWSGM6SH6I56VCX6V2DTPG7FO626W",
],
"some_other": [
"1BoatSLRHtKNngkdXEeobR76b53LETtpyT",
"n2nzi7xDTrMVK9stGpbK3BtrpBCJfH7LRQ",
"3QJmV3qfvL9SuYo34YihAf3sRCW3qSinyC",
"bc1qxneu85dnhx33asv8da45x55qyeu44ek9h3vngxdsare",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "all_valid"},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "some_other", "mostly": 1},
"out": {
"success": False,
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_stellar_address"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration]
) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().validate_configuration(configuration)
if configuration is None:
configuration = self.configuration
# # Check other things in configuration.kwargs and raise Exceptions if needed
# try:
# assert (
# ...
# ), "message"
# assert (
# ...
# ), "message"
# except AssertionError as e:
# raise InvalidExpectationConfigurationError(str(e))
return True
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": [
"hackathon-22",
"experimental",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@szecsip", # Don't forget to add your github handle here!
],
"requirements": ["coinaddrvalidator"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidStellarAddress().print_diagnostic_checklist()
| [
"[email protected]"
]
| |
74d08d6dbd8515e44b6a02e7c4576cb9cf0fd0b8 | d0a0989e2f965e482efa41456449c87cba6ea19d | /quote/dash_quote.py | 4eb3c6682fefb4c829115aeafe0f542b6ae2ff28 | []
| no_license | dsjclp/dsjclp.eu.pythonanywhere.com | cdc50a1975e4a55d7970e2f35c8a2c4140ac753f | 81491d86883ff38f9712da2725abaadc71533d26 | refs/heads/main | 2023-01-09T14:00:31.761476 | 2020-11-15T20:15:52 | 2020-11-15T20:15:52 | 273,483,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,853 | py | import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_table as dct
import dash_html_components as html
import pandas as pd
from django_plotly_dash import DjangoDash
from dash.dependencies import Input, Output, State
from dash_table.Format import Format, Group, Scheme, Symbol
import datetime
from dateutil.relativedelta import relativedelta
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from core.models import Customer
from core.models import Contract
from core.models import Schedule
from core.models import Step
from django.shortcuts import get_object_or_404
import dash_daq as daq
startdate = datetime.datetime.now()
# Create figure with secondary y-axis
fig = make_subplots(specs=[[{"secondary_y": True}]])
BS = "../../../core/staticfiles/css/bootstrap.css"
app = DjangoDash("QuoteApp", external_stylesheets=[BS])
graphcolors = {
'background': '#222',
'text': '#fff'
}
app.layout = html.Div(
[
html.Div(id="output-one", className='d-sm-flex align-items-center justify-content-between mb-4',
children=[
html.Div('Lease quote', className='h3 mb-0'),
dbc.Button("Save quote", id="save_quote_button", className="d-none d-md-block btn btn-sm btn-primary"),
]
),
dbc.CardDeck(
[
dbc.Card(
[
dbc.CardHeader("Your input", className="card-title font-weight-bold bg-primary"),
dbc.CardBody(
[
dbc.FormGroup(id='wrapper_amount',
children=[
dbc.Label("Financed amount", html_for="amountInput"),
dbc.InputGroup(
[
dbc.InputGroupAddon("€", addon_type="prepend"),
dbc.Input(id="amountInput", type="text", min=10000, max=100000, step=1000,value=10000, className='bg-secondary')
]
),
dcc.Slider(id='amountSlider',min=10000,max=100000,value=10000,step=1000,updatemode='drag',
marks={10000: {'label': '10K'},20000: {'label': '20K'},40000: {'label': '40K'},50000: {'label': '50K'},60000: {'label': '60K'},80000: {'label': '80K'},100000: {'label': '100K'}
}
),
]
),
dbc.FormGroup(id='wrapper_rv',
children=[
dbc.Label("Residual value", html_for="rvInput"),
dbc.InputGroup(
[
dbc.InputGroupAddon("€", addon_type="prepend"),
dbc.Input(id="rvInput", type="text", min=0, max=30000, step=1000,value=0, className='bg-secondary')
],
),
dcc.Slider(id='rvSlider',min=0,max=30000,value=0,step=1000,updatemode='drag',
marks={00000: {'label': '0K'},10000: {'label': '10K'},20000: {'label': '20K'},30000: {'label': '30K'}
}
),
]
),
dbc.FormGroup(id='wrapper_duration',
children=[
dbc.Label("Duration", html_for="durationInput"),
dbc.InputGroup(
[
dbc.InputGroupAddon("Months", addon_type="prepend"),
dbc.Input(id="durationInput", type="text", min=0, max=30000, step=1000,value=0, className='bg-secondary')
],
),
dcc.Slider(id='durationSlider',min=12,max=84,value=24,step=1,updatemode='drag',
marks={12: {'label': '12M'},24: {'label': '24M'},36: {'label': '36M'},48: {'label': '48M'},60: {'label': '60M'},72: {'label': '72M'},84: {'label': '84M'}
}
),
]
),
dbc.FormGroup(id='wrapper_rate',
children=[
dbc.Label("Annual rate", html_for="rateInput"),
dbc.InputGroup(
[
dbc.InputGroupAddon("%", addon_type="prepend"),
dbc.Input(id="rateInput", type="text", min=0, max=500, step=10,value=200, className='bg-secondary')
],
className="mb-2",
),
dcc.Slider(id='rateSlider',min=0,max=500,value=200,step=10,updatemode='drag',
marks={
0: {'label': '0%'},100: {'label': '1%'},200: {'label': '2%'},300: {'label': '3%'}, 400: {'label': '4%'}, 500: {'label': '5%'}
},
),
]
),
], className = 'bg-body'
),
],
className='card border-light mb-3'
),
dbc.Card(
[
dbc.CardHeader("Your monthly rent", className="card-title font-weight-bold bg-primary"),
dbc.CardBody(
[
dbc.Alert(
[
html.Div(id='result', className='h2 text-center text-dark my-auto font-weight-bold'),
],
color="warning",
className = 'mb-4 mt-2'
),
dbc.Alert(
[
html.Img(src="../staticfiles/img/advance.png", alt='formula', className='img-fluid text-center my-auto')
],
color='light',
id='formula',
className = 'mb-4'
),
dbc.Alert(
[
dbc.Label("Payments in:", className='font-weight-bold'),
dbc.RadioItems(
options=[
{"label": "Advance", "value": '01'},
{"label": "Arrears", "value": '02'},
],
value='01',
id="mode",
inline=True,
),
],
color="light",
className='text-dark mb-4'
),
dbc.Alert(
[
dbc.Label("With manual rents:", className='font-weight-bold'),
dbc.RadioItems(
options=[
{"label": "No", "value": '02'},
{"label": "Yes", "value": '01'},
],
value='02',
id="manual",
inline=True,
),
],
color="light",
className='text-dark d-none d-md-block'
),
], className = 'bg-body'
),
],
className='card border-warning mb-3'
),
]
),
html.Div(id='table-container',
children=[
dbc.Card(
[
dbc.CardHeader("Your manual rents", className="card-title font-weight-bold text-white bg-primary"),
dbc.CardBody(
[
dct.DataTable(
id='manual_rents',
columns=(
[{'name': ['Starting'], 'id': 'year'}] +
[{'name': [(startdate + relativedelta(months=i)).strftime('%b')], 'id': str(i+1), 'type': 'numeric',
'format': Format(scheme=Scheme.fixed, precision=0,group=Group.yes,groups=3,group_delimiter='.',decimal_delimiter=',',symbol=Symbol.yes, symbol_prefix=u'€')}
for i in range (12)
]
),
data=[],
editable= True,
style_cell={
'backgroundColor': '#888'
},
style_data_conditional=[
{
'if': {'row_index': 'odd'},
'backgroundColor': '#444'
},
],
style_header={
'backgroundColor': '#375a7f',
'fontWeight': 'bold'
},
style_table={'font-size': '1.2rem'}
),
]
),
],
className='d-none d-md-block card border-light mb-3 text-dark',
),
]
),
dbc.Card(
[
dbc.CardHeader("Your graph", className="card-title font-weight-bold bg-primary"),
dbc.CardBody(
[
dcc.Graph(id='graph',figure=fig)
],
),
],
className='d-none d-md-block card border-light mb-3 bg-body'
),
dbc.Card(
[
dbc.CardHeader("Your schedule", className="card-title font-weight-bold bg-primary"),
dbc.CardBody(
[
dct.DataTable(id='schedule',
data=[],
columns=[
{'id': 'date', 'name': 'Date', 'type': 'datetime'},
{'id': 'rent', 'name': 'Rent', 'type': 'numeric', 'format': Format(scheme=Scheme.fixed, precision=1,group=Group.yes,groups=3,group_delimiter='.',decimal_delimiter=',',symbol=Symbol.yes, symbol_prefix=u'€')},
{'id': 'balance', 'name': 'Balance', 'type': 'numeric', 'format': Format(scheme=Scheme.fixed, precision=1,group=Group.yes,groups=3,group_delimiter='.',decimal_delimiter=',',symbol=Symbol.yes, symbol_prefix=u'€')},
],
page_size=12,
export_format="csv",
style_cell={
'backgroundColor': '#888'
},
style_data_conditional=[
{
'if': {'row_index': 'odd'},
'backgroundColor': '#444'
},
],
style_header={
'backgroundColor': '#375a7f',
'fontWeight': 'bold'
},
style_table={'font-size': '1.2rem'}
)
],
className = 'mb-4 bg-body'
),
],
className='d-none d-md-block card border-light mb-3 bg-body'
),
]
)
# Mise à jour alignée des zones input et slider pour amount
@app.expanded_callback(
Output('wrapper_amount', 'children'),
[Input('amountInput', 'value'), Input('amountSlider', "value")]
)
def amount_update(valueInput, valueSlider, **kwargs):
ctx = dash.callback_context
if not ctx.triggered:
trigger_id = "amountSlider.value"
else:
trigger_id = ctx.triggered[0]['prop_id']
if trigger_id == "amountSlider.value":
valueForinput = "{:0,.0f}".format(valueSlider)
valueForslider = valueSlider
if trigger_id == "amountInput.value":
valueInput = valueInput.replace(',','')
valueInput = valueInput.replace('.','')
valueForinput = "{:0,.0f}".format(int(valueInput))
valueForslider = int(valueInput)
return[
dbc.Label("Financed amount", html_for="amountInput", className='mb-2'),
dbc.InputGroup(
[
dbc.InputGroupAddon("€", addon_type="prepend"),
dbc.Input(id="amountInput", type="text", min=10000, max=100000, step=1000,value=valueForinput, debounce=True, className='form-control bg-secondary text-white font-weight-bold'),
],
className="mb-2",
),
dcc.Slider(id='amountSlider',min=10000,max=100000,value=valueForslider,step=1000,updatemode='drag',
marks={10000: {'label': '10K'},20000: {'label': '20K'},40000: {'label': '40K'},50000: {'label': '50K'},60000: {'label': '60K'},80000: {'label': '80K'},100000: {'label': '100K'}
},
tooltip = 'always_visible',
),
]
return dash.no_update
# Mise à jour alignée des zones input et slider pour RV
@app.expanded_callback(
Output('wrapper_rv', 'children'),
[Input('rvInput', 'value'), Input('rvSlider', "value")]
)
def rv_update(valueInput, valueSlider, **kwargs):
ctx = dash.callback_context
if not ctx.triggered:
trigger_id = "rvSlider.value"
else:
trigger_id = ctx.triggered[0]['prop_id']
if trigger_id == "rvSlider.value":
valueForinput = "{:0,.0f}".format(valueSlider)
valueForslider = valueSlider
if trigger_id == "rvInput.value":
valueInput = valueInput.replace(',','')
valueInput = valueInput.replace('.','')
valueForinput = "{:0,.0f}".format(int(valueInput))
valueForslider = int(valueInput)
return [
dbc.Label("Residual value", html_for="rvInput", className='mb-2'),
dbc.InputGroup(
[
dbc.InputGroupAddon("€", addon_type="prepend"),
dbc.Input(id="rvInput", type="text", min=0, max=30000, step=1000,value=valueForinput, debounce=True, className='form-control bg-secondary text-white font-weight-bold'),
],
className="mb-2",
),
dcc.Slider(id='rvSlider',min=0,max=30000,value=valueForslider,step=1000,updatemode='drag',
marks={
00000: {'label': '0K'},10000: {'label': '10K'},20000: {'label': '20K'},30000: {'label': '30K'}
},
tooltip = 'always_visible',
),
]
return dash.no_update
# Mise à jour alignée des zones input et slider pour duration
@app.expanded_callback(
Output('wrapper_duration', 'children'),
[Input('durationInput', 'value'), Input('durationSlider', "value")]
)
def duration_update(valueInput, valueSlider, **kwargs):
ctx = dash.callback_context
if not ctx.triggered:
trigger_id = "durationSlider.value"
else:
trigger_id = ctx.triggered[0]['prop_id']
if trigger_id == "durationSlider.value":
valueForinput = valueSlider
valueForslider = valueSlider
if trigger_id == "durationInput.value":
valueForinput = valueInput
valueForslider = valueInput
return [
dbc.Label("Duration", html_for="durationInput", className='mb-2'),
dbc.InputGroup(
[
dbc.InputGroupAddon("Months", addon_type="prepend"),
dbc.Input(id="durationInput", type="text", min=0, max=30000, step=1000,value=valueForinput, debounce=True, className='form-control bg-secondary text-white font-weight-bold'),
],
className="mb-2",
),
dcc.Slider(id='durationSlider',min=12,max=84,value=valueForslider,step=1,updatemode='drag',
marks={
12: {'label': '12M'},24: {'label': '24M'},36: {'label': '36M'},48: {'label': '48M'},60: {'label': '60M'},72: {'label': '72M'},84: {'label': '84M'}
},
tooltip = 'always_visible',
),
]
return dash.no_update
# Mise à jour alignée des zones input et slider pour rate
@app.expanded_callback(
Output('wrapper_rate', 'children'),
[Input('rateInput', 'value'), Input('rateSlider', "value")]
)
def rate_update(valueInput, valueSlider, **kwargs):
ctx = dash.callback_context
if not ctx.triggered:
trigger_id = "rateSlider.value"
else:
trigger_id = ctx.triggered[0]['prop_id']
if trigger_id == "rateSlider.value":
valueForinput = "{:0,.2f}".format(valueSlider/100)
valueForslider = valueSlider
if trigger_id == "rateInput.value":
valueInput = valueInput.replace('.','')
valueInput = valueInput.replace(',','')
valueForinput = "{:0,.2f}".format(int(valueInput)/100)
valueForslider = valueInput
return [
dbc.Label("Annual rate", html_for="rateInput"),
dbc.InputGroup(
[
dbc.InputGroupAddon("%", addon_type="prepend"),
dbc.Input(id="rateInput", type="text", min=0, max=500, step=10,value=valueForinput, debounce=True, className='form-control bg-secondary text-white font-weight-bold'),
],
className="mb-2",
),
dcc.Slider(id='rateSlider',min=0,max=500,value=valueForslider,step=10,updatemode='drag',
marks={
0: {'label': '0%'},100: {'label': '1%'},200: {'label': '2%'},300: {'label': '3%'}, 400: {'label': '4%'}, 500: {'label': '5%'}
},
tooltip = 'always_visible',
),
]
return dash.no_update
# Affichage des loyers manuels
@app.expanded_callback(
Output('table-container', 'style'),
[Input('manual', 'value')])
def show_manual(manualValue, **kwargs):
if manualValue !='01':
return {'display': 'none'}
else:
return {'display': 'block'}
# Alimentation des loyers manuels en fonction de la durée choisie
@app.expanded_callback(
Output('manual_rents', 'data'),
[Input('durationSlider', 'value'),
Input('manual', 'value')],
[State('manual_rents', 'data')]
)
def create_manual(durationValue, manualValue, rows, **kwargs):
yearref = datetime.datetime.now().year
durationvalue = int(durationValue)
# Calcul du nombre de lignes de la table des loyers manuels : 1 par tranche de 12 mois de la durée choisie ... +1
nblig = int(durationvalue/12)
d = []
year = yearref
for p in range(nblig):
d.append([year,None, None, None, None, None, None, None, None, None, None, None, None])
year=year+1
if nblig != durationvalue/12:
dec = durationvalue - nblig*12
d.append([year,None, None, None, None, None, None, None, None, None, None, None, None])
df= pd.DataFrame(d, columns=['year',"1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12"])
#blocage des saisies au delà de la durée
if nblig != durationvalue/12:
dec = durationvalue - nblig*12
for y in range(nblig+1):
if y==nblig:
for x in range(12, dec, -1):
df[str(x)][y]= 'N/A'
y=y+1
return df.to_dict('rows')
# Calcul du loyer inconnu et création du calendrier de loyers
@app.expanded_callback(
Output('schedule', 'data'),
[
Input('durationSlider', 'value'),
Input('amountSlider', 'value'),
Input('rvSlider', 'value'),
Input('manual_rents', 'data'),
Input('mode', 'value'),
Input('rateSlider', 'value'),
]
)
def compute_schedule(durationValue, amountValue, rvValue, rows, modeValue, rateValue, **kwargs):
#création du calendrier à partir de la table des loyers manuels
rent = []
j=1
amountvalue = int(amountValue)
rvvalue = int(rvValue)
durationValue = int(durationValue)
rateValue = int(rateValue)
for row in rows:
for k in range(1,13):
if(j<= durationValue):
rent.append(row[str(k)])
j=j+1
#calcul des valeurs actuelles des loyers fixes et des coefficients
rate = rateValue/120000
npvvalue = 0
npvcoeff = 0
d = []
crdo= []
rento = []
k=0
j=0
# en mode advance
if modeValue=='01':
for p in rent:
val = 0
coeff = 0
if rent[k] != None and str(rent[k]).isnumeric():
val = (int(rent[k]) / pow((1+rate),k))
else:
coeff = 1 / pow((1+rate),k)
npvvalue = npvvalue + val
npvcoeff = npvcoeff + coeff
k=k+1
npvrv = rvvalue / pow((1+rate),durationValue)
npvfin = amountvalue - npvvalue - npvrv
rent_calculated = float(npvfin / npvcoeff)
crd = amountvalue
for q in rent:
rentschedule = rent_calculated
if rent[j] != None and str(rent[j]).isnumeric():
rentschedule = rent[j]
crd = crd - rentschedule
crd = crd *(1+rate)
rento.append(rentschedule)
crdo.append(crd)
j=j+1
# en mode arrear
else:
for p in rent:
val = 0
coeff = 0
if (rent[k] != None) and str(rent[k]).isnumeric():
val = (int(rent[k]) / pow((1+rate),k+1))
else:
coeff = 1 / pow((1+rate),k+1)
npvvalue = npvvalue + val
npvcoeff = npvcoeff + coeff
k=k+1
npvrv = rvvalue / pow((1+rate),durationValue)
npvfin = amountvalue - npvvalue - npvrv
rent_calculated = float(npvfin / npvcoeff)
crd = amountvalue
for q in rent:
rentschedule = rent_calculated
if rent[j] != None and str(rent[j]).isnumeric():
rentschedule = rent[j]
crd = crd *(1+rate) - rentschedule
rento.append(rentschedule)
crdo.append(crd)
j=j+1
# Alimentation du schedule
i=0
if (modeValue=='01') :
for p in rent:
d.append([(startdate + relativedelta(months=i)).strftime('%b %Y'), rento[i], crdo[i]])
i=i+1
if (modeValue!='01') :
for p in rent:
d.append([(startdate + relativedelta(months=i+1)).strftime('%b %Y'), rento[i], crdo[i]])
i=i+1
df= pd.DataFrame(d, columns=["date", "rent", "balance"])
return df.to_dict('rows')
# Alimentation du graphique
@app.expanded_callback(
Output('graph', 'figure'),
[Input('schedule', 'data')])
def update_graph(rows, **kwargs):
i=0
rentx = []
renty = []
crdx = []
crdy = []
for row in rows:
crdx.append(row['date'])
crdy.append(row['balance'])
rentx.append(row['date'])
renty.append(row['rent'])
i=i+1
# Create figure with secondary y-axis
fig = make_subplots(specs=[[{"secondary_y": True}]])
# Add traces
fig.add_trace(
go.Scatter(x=rentx, y=renty, name="Rent", marker_color='#f6c23e', mode='markers', marker_size=12),
secondary_y=True,
)
fig.add_trace(
go.Bar(x=crdx, y=crdy, name="Balance", marker_color='#858796'),
secondary_y=False,
)
fig.update_layout(
title_text="Balance and rent amounts",
plot_bgcolor= graphcolors['background'],
paper_bgcolor = graphcolors['background'],
font = {'color': graphcolors['text']}
),
# Set y-axes titles
fig.update_yaxes(title_text="<b>Rent</b>", secondary_y=True)
fig.update_yaxes(title_text="<b>Balance</b>", secondary_y=False)
return fig
#Enregistrement en BDD
@app.expanded_callback(
Output('output-one','children'),
[
Input('save_quote_button', 'n_clicks'),
Input('durationSlider', 'value'),
Input('amountSlider', 'value'),
Input('rvSlider', 'value'),
Input('schedule', 'data'),
Input('mode', 'value'),
Input('rateSlider', 'value')
]
)
def callback_c(n, durationValue, amountValue, rvValue, scheduleRows, modeValue, rateValue, **kwargs):
user = kwargs['user']
if n is None:
user = kwargs['user']
return [
html.Div('Lease quote', className='h3 mb-0'),
dbc.Button("Save quote", id="save_quote_button", className="d-none d-md-block btn btn-sm btn-primary"),
]
return dash.no_update
else:
if n > 1:
return [
html.Div('Lease quote', className='h3 mb-0'),
]
customer = get_object_or_404(Customer, pk=1)
contract = Contract()
contract.customer = customer
contract.user = user
contract.status = 'Created'
contract.creation_date = datetime.date.today()
contract.status_date = datetime.date.today()
contract.save()
schedule = Schedule()
schedule.contract = contract
schedule.mode = modeValue
schedule.rv = rvValue
schedule.amount = amountValue
schedule.start_date = startdate
schedule.rate = rateValue/120000
schedule.save()
i=0
for scheduleRow in scheduleRows:
if (modeValue=='01') :
date = startdate + relativedelta(months=i)
else :
date = startdate + relativedelta(months=i+1)
i=i+1
step = Step()
step.schedule = schedule
step.rent = scheduleRow['rent']
step.balance = scheduleRow['balance']
step.date = date
step.save()
return [
html.Div('Lease quote', className='h3 mb-0 text-gray-800'),
html.Div('Quote saved !', className='h3 mb-0 text-gray-800'),
]
# Alimentation de la carte formule
@app.expanded_callback(
Output('formula', 'children'),
[
Input('mode', 'value'),
]
)
def update_formula(modeValue, **kwargs):
formule = '../staticfiles/img/advance.png'
modeValue = int(modeValue)
if modeValue == 2:
formule = '../staticfiles/img/arrear.png'
return [html.Img(src=formule, alt='formula', className='img-fluid text-center my-auto')]
return dash.no_update
# Alimentation de la carte resultat
@app.expanded_callback(
Output('result', 'children'),
[
Input('schedule', 'data'),
Input('manual_rents', 'data'),
Input('mode', 'value'),
]
)
def update_result(scheduleRows, manuals, modeValue, **kwargs):
a=0
i=1
months = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12"]
# calcul du total des loyers manuels
manualSum = 0
manualNb = 0
for manual in manuals:
for k in months:
if (manual[k] != None) and str(manual[k]).isnumeric():
manualSum = manualSum + manual[k]
manualNb = manualNb + 1
# calcul du total de tous les loyers
globalSum = 0
globalNb = 0
for scheduleRow in scheduleRows:
globalSum = globalSum + float(scheduleRow['rent'])
globalNb = globalNb + 1
# calcul du total des loyers non manuels
calcSum = globalSum - manualSum
calclNb = globalNb - manualNb
# calcul et affichage du loyer non manuel
return "€ {0:5.1f}".format(float(calcSum/calclNb))
return dash.no_update
# Activation du bouton save
@app.expanded_callback(
Output('save_quote_button', 'disabled'),
[Input('amountInput', 'value')])
def show_button(on_off, **kwargs):
user = kwargs['user']
if user.is_authenticated:
return False
else:
return True | [
"[email protected]"
]
| |
ae46d3018ef730c3d8c886c21676d65c05981388 | 8cc2c6e674477b425f9d799da39ebf71a0403e4b | /QAf.py | fbe6e0ac68e9d258a54c997f07be8019b3d555bd | []
| no_license | Amrit-stack/Python-assignments-III | 85f2c427013c56a7e2e1529374aa8bb648619c2a | 92e19702a87255529a06ac73b9a092dfae558741 | refs/heads/master | 2022-11-21T11:44:02.867852 | 2020-07-12T12:53:34 | 2020-07-12T12:53:34 | 279,063,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | def binary_search (arr, left, right, x):
if right >= left:
mid = left + (right - left) // 2
if arr[mid] == x:
return mid
elif arr[mid] > x:
return binary_search(arr, left, mid-1, x)
else:
return binary_search(arr, mid + 1, right, x)
else:
return -1
arr = [ 2, 3, 4, 10, 40 ]
x = 40
result = binary_search(arr, 0, len(arr)-1, x)
if result != -1:
print ("Element is present at index % d" % result)
else:
print ("Element is not present in array")
| [
"[email protected]"
]
| |
8c22ff668872cc85bea12d1e8242af7b09b05524 | 3c60806513f6dfb3f17e76caf738cde530dae0fc | /demo.py | dbeff5d73c7752bc8862ea10a9b081e8b45625df | []
| no_license | ArnaudGardille/SinkhornEM | 258116b1963c1a75f3677af4604d4852592f1610 | 1a5471ac709242d6c8e428970b596777fb75254f | refs/heads/master | 2023-03-20T08:21:21.136156 | 2021-03-02T22:17:25 | 2021-03-02T22:17:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,058 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 3 14:16:24 2020
@author: Amin
"""
from visualizations import visualize_average_segmentation
from BayesianGMM.BayesianGMM import BayesianGMM
from DataHandling import DataHandler
import numpy as np
from BUtils import Simulator
config = {'random_init':True, # If True, locations and colors are initialzed randomly
'do_update_sigma': True, # If True, cell shapes get updated through iterations
'gt_sigma': False} # Setting the sigma to their ground truth value
# %% Load the statistical atlas and choose a subset of cells
file = 'atlas.mat'
atlas = DataHandler.load_atlas(file,'tail')
neurons = ['PDA', 'DVB', 'PHAL', 'ALNL', 'PLML']
indices = np.array([atlas['names'].index(neuron) for neuron in neurons])
atlas['names'] = neurons
atlas['mu'] = atlas['mu'][indices,:]
atlas['sigma'] = atlas['sigma'][:,:,indices]
atlas['mu'][:,:3] = atlas['mu'][:,:3] - atlas['mu'][:,:3].min(0) + 5
# %% Generative data hyperparameters
params = {'B':0, # Number of background components (always set to zero)
'N':5000, # Number of observed pixels
'Σ_loc':1, # Hyperparameter for generating neuronal shapes (controls mean size)
'Σ_scale':.1 # Hyperparameter for generating neuronal shapes (controls the )
}
gt = Simulator.simulate_gmm(atlas,N=params['N'],B=params['B'],Σ_loc=params['Σ_loc'],Σ_scale=params['Σ_scale'])
n_trials = 1 # Number of trials
n_iter = 1000 # Max number of iterations (each algorithm is run for 1 second)
# %% Run algorithms
sems = []
vems = []
oems = []
for trial in range(n_trials):
oem = BayesianGMM(atlas, gt['X'].numpy(), noise_props=np.empty(0),random_init=config['random_init'])
oem.do_sinkhorn = False
oem.do_update_pi = True
oem.do_update_sigma = config['do_update_sigma']
if config['gt_sigma']:
oem.sigma = gt['Σ'].permute((1,2,0)).numpy()
oacc = []
vem = BayesianGMM(atlas, gt['X'].numpy(), noise_props=np.empty(0),random_init=config['random_init'])
vem.do_sinkhorn = False
vem.do_update_pi = False
vem.do_update_sigma = config['do_update_sigma']
if config['gt_sigma']:
vem.sigma = gt['Σ'].permute((1,2,0)).numpy()
vacc = []
sem = BayesianGMM(atlas, gt['X'].numpy(), noise_props=np.empty(0),random_init=config['random_init'])
sem.do_sinkhorn = True
sem.do_update_pi = False
sem.do_update_sigma = config['do_update_sigma']
if config['gt_sigma']:
sem.sigma = gt['Σ'].permute((1,2,0)).numpy()
sacc = []
for iter in range(n_iter):
if oem.timing.sum() <= BayesianGMM.max_time:
oem.iterate()
oacc.append(oem.compute_accuracy(atlas['names'], gt['µ'].numpy()[:,:3], 1, radius=1))
if vem.timing.sum() <= BayesianGMM.max_time:
vem.iterate()
vacc.append(vem.compute_accuracy(atlas['names'], gt['µ'].numpy()[:,:3], 1, radius=1))
if sem.timing.sum() <= BayesianGMM.max_time:
sem.iterate()
sacc.append(sem.compute_accuracy(atlas['names'], gt['µ'].numpy()[:,:3], 1, radius=1))
if oem.timing.sum() > BayesianGMM.max_time and\
vem.timing.sum() > BayesianGMM.max_time and\
sem.timing.sum() > BayesianGMM.max_time:
break
oem.acc = oacc
vem.acc = vacc
sem.acc = sacc
oem.gt = {'mu':gt['µ'].data.numpy(), 'sigma':gt['Σ'].data.numpy(), 'pi':gt['π'].data.numpy(), 'Z':gt['Z'].data.numpy()}
vem.gt = {'mu':gt['µ'].data.numpy(), 'sigma':gt['Σ'].data.numpy(), 'pi':gt['π'].data.numpy(), 'Z':gt['Z'].data.numpy()}
sem.gt = {'mu':gt['µ'].data.numpy(), 'sigma':gt['Σ'].data.numpy(), 'pi':gt['π'].data.numpy(), 'Z':gt['Z'].data.numpy()}
oems.append(oem)
vems.append(vem)
sems.append(sem)
ems = [oems, vems, sems]
labels = ['oEM', 'vEM', 'sEM']
visualize_average_segmentation(gt['X'],gt['µ'],gt['Σ'],ems,labels,save=False)
| [
"[email protected]"
]
| |
da2846a18904cb304bdee4a3e21c7faab448b9b5 | ad681d7aebbf77a43c16950b2094cb0e93771146 | /Order Of Growth.py | c4867d66527000bef41f4c3b442001e4d7ce56a6 | [
"Apache-2.0"
]
| permissive | samli6479/PythonLearn | 1f00d3b83f3b0750f4bdca3aeda666b7c9590431 | 2ad78f62d58612132d3a3759aecec4a52381566f | refs/heads/master | 2021-01-12T11:02:01.837883 | 2016-11-04T00:00:20 | 2016-11-04T00:00:20 | 72,797,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | # Order of Growth
# A method for bounding the resources used by a function by the "size" of a problem
# n: size of the problem
# R(n): measurement of some resource used(time or space)
# R(n) = Theata(f(n))
#Theata means that there exist k1 and k2 s.t. k1f(n)<=R(n)<=k2f(n) for all n larger than some minimum m.
from math import sqrt
# Choose k1 =1
def facotr(n):
sqrt_n = sqrt(n) #1 OW Statement outside while 4 or 5
k, total = 1, 0 #1 OW
while k < sqrt_n: #1 IW Statement inside while 3 or 4( include header)
if divides(k, n): #1 IW
total += 2 #1 IW while statemnt iterations: between sqrt(n)-1
k += 1 #1 IW and sqrt(n) ex n=48 or n=49
if k *k ==n: #1 OW
total += 1 #1 OW
return total #1 OW
# Minimum 4+3*(sqrt(n)-1)
# Maximum 5+4*sqrt(n)
# Maximum operations require per statement: some p
# Assumption: Every statment takes some fixed number of operations to execute
# m= 25 then we choose k2 = 5p
# Growth is sqrt(n)
| [
"[email protected]"
]
| |
29c1ae82f792f643a540f96f81573c324424623a | d305e47f7f760e3cb3cfcd101c34f4f2cf30fcbf | /python/opencv/src/opencv/luorixiaoguo.py | d5fa0332ca2a3db0747c1201d1fc439bb618fdaa | []
| no_license | wherego/python | 1fe2e46998bbd58f678ead829c1a3f2bbb843d22 | bdd5715b24f9f32fc223420ede9b1f28d1531385 | refs/heads/master | 2020-12-24T15:49:41.060934 | 2014-12-07T14:00:34 | 2014-12-07T14:00:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 691 | py | #-*- coding: utf-8 -*-
import cv2
import numpy as np
fn="7.jpg"
if __name__ == '__main__':
print 'http://blog.csdn.net/myhaspl'
print '[email protected]'
print
print 'loading %s ...' % fn
print '正在处理中',
img = cv2.imread(fn)
w=img.shape[1]
h=img.shape[0]
ii=0
#生成日落效果
#b[:,:] = img[:,:,0]
#g[:,:] = img[:,:,1]
#r[:,:] = img[:,:,2]
for xi in xrange(0,w):
for xj in xrange (0,h):
img[xj,xi,0]= int(img[xj,xi,0]*0.7)
img[xj,xi,1]= int(img[xj,xi,1]*0.7)
if xi%10==0 :print '.',
cv2.namedWindow('img')
cv2.imshow('img', img)
cv2.waitKey()
cv2.destroyAllWindows() | [
"[email protected]"
]
| |
1c8d3cbc4869819831526f4d76d094711f9424ee | 6bf2425dc79bad40d5a7458dd49b9fbcc4f6a88c | /src/ml_project/pipelines/data_engineering/nodes.py | 520c75cb7a6509f09b1bcbd7889af4152ea1b734 | []
| no_license | smottahedi/ml-project | 6a6df8c665ec7fad59f416ec299a0b3cf2ee829c | 2477e3393740c8eee4ba4e483e84afb0bb0168b8 | refs/heads/main | 2023-07-16T07:04:36.912667 | 2021-09-02T03:25:32 | 2021-09-02T03:25:32 | 402,276,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,340 | py | # Copyright 2021 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is a boilerplate pipeline 'data_engineering'
generated using Kedro 0.17.4
"""
from typing import Any, Dict
import pandas as pd
from sklearn.preprocessing import StandardScaler
def split_data(data: pd.DataFrame, example_test_data_ratio: float) -> Dict[str, Any]:
"""Node for splitting the classical Iris data set into training and test
sets, each split into features and labels.
The split ratio parameter is taken from conf/project/parameters.yml.
The data and the parameters will be loaded and provided to your function
automatically when the pipeline is executed and it is time to run this node.
"""
data.columns = [
"sepal_length",
"sepal_width",
"petal_length",
"petal_width",
"target",
]
# Shuffle all the data
data = data.sample(frac=1).reset_index(drop=True)
# Split to training and testing data
n = data.shape[0]
n_test = int(n * example_test_data_ratio)
training_data = data.iloc[n_test:, :].reset_index(drop=True)
test_data = data.iloc[:n_test, :].reset_index(drop=True)
# Split the data to features and labels
train_data_x = training_data.loc[:, "sepal_length":"petal_width"]
train_data_y = training_data["target"]
test_data_x = test_data.loc[:, "sepal_length":"petal_width"]
test_data_y = test_data["target"]
# When returning many variables, it is a good practice to give them names:
return [
train_data_x,
train_data_y,
test_data_x,
test_data_y,
]
def standardize_features(
train_x: pd.DataFrame,
test_x: pd.DataFrame,
) -> Dict[str, Any]:
scaler = StandardScaler()
scaler.fit(train_x)
train_x = pd.DataFrame(columns=train_x.columns, data=scaler.transform(train_x))
test_x = pd.DataFrame(columns=test_x.columns, data=scaler.transform(test_x))
return [
train_x,
test_x,
]
| [
"[email protected]"
]
| |
0cb7eed95e6805e9352b83c72c8aa7571facc3e4 | 16491ea88d51e13f806b83fbabd8b5543c80d617 | /recommender/recommender.py | c24846f8c1e9d81343593b393a3987d97bbd6ae0 | []
| no_license | spinor29/MachLearn | 98ce33c00c668d53c7e08143fbe2e058e008d0e1 | ea4f19a604a90b2b73eecd80211d616422e29771 | refs/heads/master | 2021-01-01T15:30:34.564099 | 2015-02-21T04:30:35 | 2015-02-21T04:30:35 | 21,888,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,000 | py |
import numpy as np
import scipy as sp
#from scipy import *
from sklearn import linear_model
from pylab import *
import time
def costFunction(params, *Z):
[ X, Y, R, nm, nu, nf, lamb, J_or_G ] = Z[:8]
X = params[:(nm*nf)].reshape(nm, nf)
Theta = params[(nm*nf):].reshape(nu,nf)
X_grad = np.zeros(X.shape)
Theta_grad = np.zeros(Theta.shape)
Diff = X.dot(Theta.T) - Y
J = 0.5 * np.sum(R * (Diff * Diff))
X_grad = (R * Diff).dot(Theta)
Theta_grad = (R * Diff).T.dot(X)
#Add regularization
reg1 = lamb / 2.0 * np.sum(Theta * Theta)
reg2 = lamb / 2.0 * np.sum(X * X)
J += reg1 + reg2
X_grad += lamb * X
Theta_grad += lamb * Theta
grad = np.concatenate((X_grad.flatten(), Theta_grad.flatten()))
if J_or_G == 'J':
return J
elif J_or_G == 'G':
return grad
def J_func(params, *Z):
Y = Z + ('J',)
J = costFunction(params, *Y)
return J
def G_func(params, *Z):
Y = Z + ('G',)
grad = costFunction(params, *Y)
return grad
def normalizeRatings(Y, R):
m = len(Y)
n = len(Y[0])
Ymean = np.zeros(m)
Ynorm = np.zeros((m,n))
for i in range(m):
#for j in range(n):
if long(np.sum(R[i,:])) > 0:
Ymean[i] = np.sum(Y[i,:]*R[i,:])/np.sum(R[i,:])
else:
Ymean[i] = 0.0
for j in range(n):
if int(R[i, j]) == 1:
Ynorm[i,j] = Y[i,j] - Ymean[i]
return Ynorm, Ymean
def loadMovieList():
with open('movie_ids.txt', 'r') as f:
data = f.readlines()
movies = []
for line in data:
movie = line.split(' ', 1)[1].rstrip()
movies.append(movie)
return movies
def loadMovieList2():
with open('movies.dat', 'r') as f:
data = f.readlines()
movies = {}
for line in data:
fields = line.split('::')
movies[long(fields[0])-1] = fields[1]
n = 10
print "\nThe first", n, "movies in data:"
for i in range(n):
print movies[i]
print "......"
return movies
def addNewRatings(Y,R,mlen):
# mlen is the number of all movies
#movieList = loadMovieList()
my_ratings = np.zeros(mlen)
my_R = np.zeros(mlen)
with open('personalRatings.txt', 'r') as f:
data = f.readlines()
f.close()
my_Y = np.zeros(mlen)
my_R = np.zeros(mlen)
for line in data:
fields = line.split('::')
iu, im, r, t = (long(fields[0]), long(fields[1])-1, float(fields[2]), long(fields[3]))
my_Y[im], my_R[im] = r, 1
Y = np.c_[ my_Y, Y ]
R = np.c_[ my_R, R ]
return my_Y, Y, R
def partition(input_data):
lines = input_data.split('\n')
lines.remove('')
np.random.shuffle(lines)
m1 = long(0.6*len(lines))
m2 = long(0.8*len(lines))
return lines[:m1], lines[m1:m2], lines[m2:]
def parseRatings(data, nm, nu):
#lines = input_data.split('\n')
#lines.remove('')
Y = np.zeros((nm,nu))
R = np.zeros((nm,nu))
for line in data:
fields = line.split('::')
iu, im, r, t = (long(fields[0])-1, long(fields[1])-1, float(fields[2]), long(fields[3]))
Y[im, iu] = r
R[im, iu] = 1
return Y, R
def solve_it(input_data, movieList):
#Y = input_data['Y']
#R = input_data['R']
nu = 6040
nm = 3952
data_train, data_cross, data_test = partition(input_data)
#print data_train[0]
Y_train, R_train = parseRatings(data_train, nm, nu)
Y_cross, R_cross = parseRatings(data_cross, nm, nu)
Y_test, R_test = parseRatings(data_test, nm, nu)
# Add ratings of a new user
#my_ratings, Y, R = addNewRatings(Y, R, len(movieList))
my_ratings, Y_train, R_train = addNewRatings(Y_train, R_train, nm)
nu += 1
Y_cross = np.c_[ np.zeros(nm), Y_cross ]
R_cross = np.c_[ np.zeros(nm), R_cross ]
Y_test = np.c_[ np.zeros(nm), Y_test ]
R_test = np.c_[ np.zeros(nm), R_test ]
print
print "Add new ratings:"
for i in range(len(my_ratings)):
if int(my_ratings[i]) > 0:
print my_ratings[i], movieList[i]
# Normalize ratings
Y = Y_train
R = R_train
nm = len(Y)
nu = len(Y[0])
nf = 10
Ynorm, Ymean = normalizeRatings(Y, R)
# Cross validation
# Tune lamb, find the best value
#lamb_values = [1.0, 5.0, 10.0, 20.0]
lamb_values = [10.0]
X_opt = []
Theta_opt = []
emin = 1e16
lamb_opt = 1.0
fw = open('crossValidate.log','w')
start = time.time()
for lamb in lamb_values:
#lamb = 10.0
X = np.random.randn(nm, nf)
Theta = np.random.randn(nu, nf)
initial_params = np.concatenate((X.flatten(), Theta.flatten()))
Z = (X, Ynorm, R, nm, nu, nf, lamb)
# Minimize the cost function
params, J = sp.optimize.fmin_l_bfgs_b(J_func, x0=initial_params, fprime=G_func, args=Z, disp=1, maxiter=100)[:2]
X = params[:nm*nf].reshape(nm, nf)
Theta = params[nm*nf:].reshape(nu, nf)
# Comparing predictions to cross validation set
p = X.dot(Theta.T)
for i in range(nu):
p[:,i] += Ymean # predictions
diff = (p - Y_cross) * R_cross
err_cross = np.sqrt(np.sum(diff * diff)/np.sum(R_cross))
diff = (p - Y_train) * R_train
err_train = np.sqrt(np.sum(diff * diff)/np.sum(R_train))
elog = ("lamba, err_train, err_cross = ", lamb, err_train, err_cross)
print elog
fw.write(str(elog))
if err_cross < emin:
emin, lamb_opt, X_opt, Theta_opt = err_cross, lamb, X, Theta
fw.close()
print "emin, lamb_opt = ", emin, lamb_opt
print "Recommender system learning completed.\n"
end = time.time()
# Predictions
X = X_opt
Theta = Theta_opt
p = X.dot(Theta.T)
my_predictions = p[:,0] + Ymean
id_sort = my_predictions.argsort()[::-1]
print "Top recommendations for you:"
ntop = 0
for i in range(len(R_train)):
if int(np.sum(R_train[id_sort[i],:])) > 5:
print my_predictions[id_sort[i]], movieList[id_sort[i]]
ntop += 1
if ntop == 10: break
print
print "Your original ratings:"
for i in range(len(my_ratings)):
if my_ratings[i] > 0:
print my_ratings[i], movieList[i]
print "Elapsed time:", end - start
import sys
if __name__ == "__main__":
if len(sys.argv) > 1:
file_location = sys.argv[1].strip()
print "Loading data..."
input_data_file = open(file_location, 'r')
ratingList = ''.join(input_data_file.readlines())
input_data_file.close()
# load matlab format file
#ratingList = io.loadmat(file_location)
#movieList = loadMovieList()
movieList = loadMovieList2()
print 'Solving: ', file_location
solve_it(ratingList, movieList)
else:
print 'This test requires an input file. Please select one from the directory. (i.e. python recommender.py ratings.dat)'
| [
"[email protected]"
]
| |
f5557d5ff6492966343a1b46c76dde955a03f5a7 | b15a9d9c7374c4a1fa5ec3ef63603a8c57e8681f | /Design-Patterns-Python/memento/caretaker.py | 6a143d567f4390a284f8bff63c8f3a579f175f00 | []
| no_license | gohils/zemr_notebook | 3f7490ef7a2559655746c3e2e0dbfb835a83891e | 00d53cea9970df44160c51e6ad2bdeadfae2c91f | refs/heads/master | 2023-08-04T14:32:35.428016 | 2023-07-20T11:51:08 | 2023-07-20T11:51:08 | 222,027,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 761 | py | "The Save/Restore Game functionality"
class CareTaker():
"Guardian. Provides a narrow interface to the mementos"
def __init__(self, originator):
self._originator = originator
self._mementos = []
def save(self):
"Store a new Memento of the Characters current state"
print("CareTaker: Game Save")
memento = self._originator.memento
self._mementos.append(memento)
def restore(self, index):
"""
Replace the Characters current attributes with the state
stored in the saved Memento
"""
print("CareTaker: Restoring Characters attributes from Memento")
memento = self._mementos[index]
self._originator.memento = memento
| [
"[email protected]"
]
| |
b1850bb9f42ce970e1af828ad7e8073288267aa4 | 1929443c8e4ec6ccd79777f18d161546867e17ef | /methods/transformers/tests/test_pipelines_fill_mask.py | 2777bee42548e1da90a9efb1537e16c2cba4f6ce | [
"MIT",
"Apache-2.0"
]
| permissive | INK-USC/RiddleSense | 6f4b00546d7f4d5ada12db50929c1f0d7713d541 | a3d57eaf084da9cf6b77692c608e2cd2870fbd97 | refs/heads/main | 2023-08-14T19:01:01.478946 | 2021-07-05T04:06:01 | 2021-07-05T04:06:01 | 376,487,870 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 9,295 | py | import unittest
import pytest
from transformers import pipeline
from transformers.testing_utils import require_tf, require_torch, slow
from .test_pipelines_common import MonoInputPipelineCommonMixin
EXPECTED_FILL_MASK_RESULT = [
[
{"sequence": "<s>My name is John</s>", "score": 0.00782308354973793, "token": 610, "token_str": "ĠJohn"},
{"sequence": "<s>My name is Chris</s>", "score": 0.007475061342120171, "token": 1573, "token_str": "ĠChris"},
],
[
{"sequence": "<s>The largest city in France is Paris</s>", "score": 0.3185044229030609, "token": 2201},
{"sequence": "<s>The largest city in France is Lyon</s>", "score": 0.21112334728240967, "token": 12790},
],
]
EXPECTED_FILL_MASK_TARGET_RESULT = [
[
{
"sequence": "<s>My name is Patrick</s>",
"score": 0.004992353264242411,
"token": 3499,
"token_str": "ĠPatrick",
},
{
"sequence": "<s>My name is Clara</s>",
"score": 0.00019297805556561798,
"token": 13606,
"token_str": "ĠClara",
},
]
]
class FillMaskPipelineTests(MonoInputPipelineCommonMixin, unittest.TestCase):
pipeline_task = "fill-mask"
pipeline_loading_kwargs = {"top_k": 2}
small_models = ["sshleifer/tiny-distilroberta-base"] # Models tested without the @slow decorator
large_models = ["distilroberta-base"] # Models tested with the @slow decorator
mandatory_keys = {"sequence", "score", "token"}
valid_inputs = [
"My name is <mask>",
"The largest city in France is <mask>",
]
invalid_inputs = [
"This is <mask> <mask>" # More than 1 mask_token in the input is not supported
"This is" # No mask_token is not supported
]
expected_check_keys = ["sequence"]
@require_torch
def test_torch_topk_deprecation(self):
# At pipeline initialization only it was not enabled at pipeline
# call site before
with pytest.warns(FutureWarning, match=r".*use `top_k`.*"):
pipeline(task="fill-mask", model=self.small_models[0], topk=1)
@require_torch
def test_torch_fill_mask(self):
valid_inputs = "My name is <mask>"
nlp = pipeline(task="fill-mask", model=self.small_models[0])
outputs = nlp(valid_inputs)
self.assertIsInstance(outputs, list)
# This passes
outputs = nlp(valid_inputs, targets=[" Patrick", " Clara"])
self.assertIsInstance(outputs, list)
# This used to fail with `cannot mix args and kwargs`
outputs = nlp(valid_inputs, something=False)
self.assertIsInstance(outputs, list)
@require_torch
def test_torch_fill_mask_with_targets(self):
valid_inputs = ["My name is <mask>"]
valid_targets = [[" Teven", " Patrick", " Clara"], [" Sam"]]
invalid_targets = [[], [""], ""]
for model_name in self.small_models:
nlp = pipeline(task="fill-mask", model=model_name, tokenizer=model_name, framework="pt")
for targets in valid_targets:
outputs = nlp(valid_inputs, targets=targets)
self.assertIsInstance(outputs, list)
self.assertEqual(len(outputs), len(targets))
for targets in invalid_targets:
self.assertRaises(ValueError, nlp, valid_inputs, targets=targets)
@require_tf
def test_tf_fill_mask_with_targets(self):
valid_inputs = ["My name is <mask>"]
valid_targets = [[" Teven", " Patrick", " Clara"], [" Sam"]]
invalid_targets = [[], [""], ""]
for model_name in self.small_models:
nlp = pipeline(task="fill-mask", model=model_name, tokenizer=model_name, framework="tf")
for targets in valid_targets:
outputs = nlp(valid_inputs, targets=targets)
self.assertIsInstance(outputs, list)
self.assertEqual(len(outputs), len(targets))
for targets in invalid_targets:
self.assertRaises(ValueError, nlp, valid_inputs, targets=targets)
@require_torch
@slow
def test_torch_fill_mask_results(self):
mandatory_keys = {"sequence", "score", "token"}
valid_inputs = [
"My name is <mask>",
"The largest city in France is <mask>",
]
valid_targets = [" Patrick", " Clara"]
for model_name in self.large_models:
nlp = pipeline(
task="fill-mask",
model=model_name,
tokenizer=model_name,
framework="pt",
top_k=2,
)
mono_result = nlp(valid_inputs[0], targets=valid_targets)
self.assertIsInstance(mono_result, list)
self.assertIsInstance(mono_result[0], dict)
for mandatory_key in mandatory_keys:
self.assertIn(mandatory_key, mono_result[0])
multi_result = [nlp(valid_input) for valid_input in valid_inputs]
self.assertIsInstance(multi_result, list)
self.assertIsInstance(multi_result[0], (dict, list))
for result, expected in zip(multi_result, EXPECTED_FILL_MASK_RESULT):
self.assertEqual(set([o["sequence"] for o in result]), set([o["sequence"] for o in result]))
if isinstance(multi_result[0], list):
multi_result = multi_result[0]
for result in multi_result:
for key in mandatory_keys:
self.assertIn(key, result)
self.assertRaises(Exception, nlp, [None])
valid_inputs = valid_inputs[:1]
mono_result = nlp(valid_inputs[0], targets=valid_targets)
self.assertIsInstance(mono_result, list)
self.assertIsInstance(mono_result[0], dict)
for mandatory_key in mandatory_keys:
self.assertIn(mandatory_key, mono_result[0])
multi_result = [nlp(valid_input) for valid_input in valid_inputs]
self.assertIsInstance(multi_result, list)
self.assertIsInstance(multi_result[0], (dict, list))
for result, expected in zip(multi_result, EXPECTED_FILL_MASK_TARGET_RESULT):
self.assertEqual(set([o["sequence"] for o in result]), set([o["sequence"] for o in result]))
if isinstance(multi_result[0], list):
multi_result = multi_result[0]
for result in multi_result:
for key in mandatory_keys:
self.assertIn(key, result)
self.assertRaises(Exception, nlp, [None])
@require_tf
@slow
def test_tf_fill_mask_results(self):
mandatory_keys = {"sequence", "score", "token"}
valid_inputs = [
"My name is <mask>",
"The largest city in France is <mask>",
]
valid_targets = [" Patrick", " Clara"]
for model_name in self.large_models:
nlp = pipeline(task="fill-mask", model=model_name, tokenizer=model_name, framework="tf", topk=2)
mono_result = nlp(valid_inputs[0], targets=valid_targets)
self.assertIsInstance(mono_result, list)
self.assertIsInstance(mono_result[0], dict)
for mandatory_key in mandatory_keys:
self.assertIn(mandatory_key, mono_result[0])
multi_result = [nlp(valid_input) for valid_input in valid_inputs]
self.assertIsInstance(multi_result, list)
self.assertIsInstance(multi_result[0], (dict, list))
for result, expected in zip(multi_result, EXPECTED_FILL_MASK_RESULT):
self.assertEqual(set([o["sequence"] for o in result]), set([o["sequence"] for o in result]))
if isinstance(multi_result[0], list):
multi_result = multi_result[0]
for result in multi_result:
for key in mandatory_keys:
self.assertIn(key, result)
self.assertRaises(Exception, nlp, [None])
valid_inputs = valid_inputs[:1]
mono_result = nlp(valid_inputs[0], targets=valid_targets)
self.assertIsInstance(mono_result, list)
self.assertIsInstance(mono_result[0], dict)
for mandatory_key in mandatory_keys:
self.assertIn(mandatory_key, mono_result[0])
multi_result = [nlp(valid_input) for valid_input in valid_inputs]
self.assertIsInstance(multi_result, list)
self.assertIsInstance(multi_result[0], (dict, list))
for result, expected in zip(multi_result, EXPECTED_FILL_MASK_TARGET_RESULT):
self.assertEqual(set([o["sequence"] for o in result]), set([o["sequence"] for o in result]))
if isinstance(multi_result[0], list):
multi_result = multi_result[0]
for result in multi_result:
for key in mandatory_keys:
self.assertIn(key, result)
self.assertRaises(Exception, nlp, [None])
| [
"[email protected]"
]
| |
1d561d2cb7059ab47fbbf55bf488968c40e1e094 | 9ff8e53cda466f8093481fd9343f1fc3d1849ddf | /replace_isoforms.py | 08604c1cea2be42b3fee388090e2f3da081be3ba | []
| no_license | masakistan/bioscripts | aefaed90be4985fc98bda04056a1490bfb679f0b | 8b86b3bda78a872d5ab6f71dea6179af5920165c | refs/heads/master | 2021-01-17T13:26:02.972142 | 2017-01-02T07:22:14 | 2017-01-02T07:22:14 | 56,871,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,843 | py | import sys, argparse
def parse_gene_seq_ids_file( file_path ):
seq_to_gene = dict()
with open( file_path ) as fh:
for line in fh:
line = line.strip().split()
seq_to_gene[ line[ 1 ] ] = line[ 0 ]
#print len( seq_to_gene )
return seq_to_gene
def parse_gene_seq_ids_files( file_path ):
all_seq_to_gene = dict()
with open( file_path ) as fh:
for line in fh:
base_name = line[ line.rfind( '/' ) + 1 : ].strip()
all_seq_to_gene[ base_name ] = parse_gene_seq_ids_file( line.strip() )
return all_seq_to_gene
def parse_longest_seqs_file( file_path ):
longest_ids = dict()
with open( file_path ) as fh:
for line in fh:
line = line.strip().split()
longest_ids[ line[ 0 ] ] = line[ 1 ]
#print len( longest_ids )
return longest_ids
def parse_longest_seqs_files( file_path ):
all_longest_ids = dict()
with open( file_path ) as fh:
for line in fh:
base_name = line[ line.rfind( '/' ) + 1 : ].strip()
all_longest_ids[ base_name ] = parse_longest_seqs_file( line.strip() )
return all_longest_ids
def filter_groups( orthomcl_path, gene_seq_ids, longest_seq_ids ):
groups = []
with open( orthomcl_path ) as fh:
for line in fh:
line = line.strip().split()
if len( line ) <= 0:
continue
group_specs = dict()
bad_group = False
for spec in line[ 1 : ]:
spec, seq_id = spec.split( '|' )
if spec in group_specs:
if gene_seq_ids[ spec ][ group_specs[ spec ] ] != gene_seq_ids[ spec ][ seq_id ]:
bad_group = True
break
group_specs[ spec ] = longest_seq_ids[ spec ][ gene_seq_ids[ spec ][ seq_id ] ]
if bad_group:
#print "Bad group"
continue
else:
group = []
if len( group_specs ) < 2:
continue
for spec, longest_seq_id in group_specs.iteritems():
#try:
#longest_iso = longest_seq_ids[ spec ][ gene ]
longest_iso = longest_seq_id
#except KeyError:
# print spec + "\t" + gene
group.append( spec + "|" + longest_iso )
#print line
groups.append( line[ 0 ] + " " + " ".join( group ) )
return groups
def main( args ):
gene_seq_ids = parse_gene_seq_ids_files( args.gene_list_path )
#print gene_seq_ids.keys()
#print "*" * 20
#sys.exit( "step 1" )
longest_seq_ids = parse_longest_seqs_files( args.longest_list_path )
#print longest_seq_ids.keys()
#print "*" * 20
#sys.exit( "step 2" )
filtered_groups = filter_groups( args.orthomcl_path, gene_seq_ids, longest_seq_ids )
for group in filtered_groups:
print group
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description = "Replace sequences in OrthoMCL output with longest isoform. Ensure that all sequences in a group are isoforms of the same gene."
)
parser.add_argument( 'gene_list_path',
type = str,
help = "The path to a file containing a list of the files that contain the gene to sequence ids."
)
parser.add_argument( 'longest_list_path',
type = str,
help = "The path to a file containining a list of the files that contain the longest isoform ids."
)
parser.add_argument( 'orthomcl_path',
type = str,
help = "The path to the OrthoMCL output file."
)
args = parser.parse_args()
main( args )
| [
"[email protected]"
]
| |
0a84c7d2819c6909abef3aa8cf9c8e577efad451 | 13f4a06cd439f579e34bf38406a9d5647fe7a0f3 | /nn_ns/parsing/IterParser/ParseResultAST.py | 205e7e97ea49432cf019048a7b1198e730ce036c | []
| no_license | edt-yxz-zzd/python3_src | 43d6c2a8ef2a618f750b59e207a2806132076526 | 41f3a506feffb5f33d4559e5b69717d9bb6303c9 | refs/heads/master | 2023-05-12T01:46:28.198286 | 2023-05-01T13:46:32 | 2023-05-01T13:46:32 | 143,530,977 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 346 | py |
from sand.types.NonMathTree import NonMathTree, LeafNode, OrientedNode, UnorientedNode
class ParseResultAST(NonMathTree):
class __UnboxedTypeID__:pass
class ConRuleNode(OrientedNode, ParseResultAST):pass
class AltRuleNode(UnorientedNode, ParseResultAST):pass
class TerminalNode(LeafNode, ParseResultAST):pass
| [
"[email protected]"
]
| |
82a7de1459e7e0d223566ae0c7cdbaa5df079c74 | 96ee7ef1baa5ebc042de71642c3c8712ef56cf06 | /venv/Scripts/pip3.7-script.py | 26a4d76aaf913f91c7fd32ae8a46878b2261134c | [
"Apache-2.0"
]
| permissive | dipsuji/Phython-Learning | 0439b4d53854e2b35dd05b2bef174d7a3db9b8e0 | 78689d3436a8573695b869a19457875ac77fcee4 | refs/heads/master | 2020-04-04T11:08:57.730233 | 2019-04-25T14:08:59 | 2019-04-25T14:08:59 | 155,879,955 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | #!"S:\Phython Learning\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
| [
"[email protected]"
]
| |
1caf326fb364e9ce7b082c99ca141de64a131f52 | fc12826ab97ecd171fac1553b05dd56700ebc928 | /yapl_parser.py | 0358d3ceb5da9fcb761b18f5c8ab39fc8927aea6 | []
| no_license | m-jojo-s/YAPL | ff76c06f0be1508a2a2e1a1a3353a81405acbf95 | b337260483cb3bd9ab50c4f957ccd35788958e77 | refs/heads/main | 2023-04-10T02:01:18.888457 | 2021-04-24T19:54:06 | 2021-04-24T19:54:06 | 359,183,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,341 | py | import ply.yacc as yacc
from yapl_lexer import *
#sys.tracebacklimit = 0 # to prevent traceback debug output since it is not needed
# to resolve ambiguity, individual tokens assigned a precedence level and associativity.
# tokens ordered from lowest to highest precedence, rightmost terminal judged
precedence = (
('nonassoc', 'LESSER', 'GREATER', "LESSER_EQUAL", "GREATER_EQUAL", "IS_EQUAL", "NOT_EQUAL"),
('left', 'PLUS', 'MINUS'), # +, - same precedence, left associative
("left", "FWSLASH", "ASTERISK"),
("left", "MOD", "TOPI"),
("left", "L_ROUND", "R_ROUND"),
("left", "L_CURLY", "R_CURLY"),
("left", "L_SQR", "R_SQR")
)
start = 'S'
# multiple variables, assigning data from one variable to another
# after the lexing, start parsing
def p_start(p): # non-terminal, starting
"""
S : stmt S
"""
p[0] = [p[1]] + p[2] # list comprehension used to solve recursive grammar, added/appending as well
def p_start_empty(p):
"""
S :
"""
p[0] = []
def p_stmt_exp(p):
"""
stmt : exp SEMICOL
| SEMICOL
| L_ROUND R_ROUND
| L_CURLY R_CURLY
| L_SQR R_SQR
"""
p[0] = ("EXP", p[1])
def p_stmt_blk(p):
"""
stmt_blk : L_CURLY S R_CURLY
"""
p[0] = ("STMT_BLK", p[2])
def p_print_stmt(p):
"""
stmt : PRINT L_ROUND args R_ROUND SEMICOL
"""
p[0] = ('PRINT', p[3])
def p_args_exp(p):
"""
args : args COMMA args
| exp
|
"""
p[0] = ("EMPTY")
if len(p) > 1:
p[0] = (p[1])
if len(p) > 2:
p[0] = (",", p[1], p[3])
def p_if_stmt(p):
"""
stmt : if
"""
p[0] = p[1]
def p_if(p):
"""
if : IF L_ROUND exp R_ROUND stmt_blk else
"""
p[0] = ("IF", p[3], p[5], p[6])
def p_else(p):
"""
else : ELSE stmt_blk
| ELSE if
|
"""
if(len(p) > 2):
p[0] = (p[2])
def p_for_stmt(p):
"""
stmt : FOR L_ROUND stmt exp SEMICOL stmt R_ROUND stmt_blk
"""
p[0] = ("FOR", p[3], p[4], p[6], p[8])
def p_struct_stmt(p):
"""
stmt : STRUCT VAR_NAME dec_blk
"""
p[0] = ("STRUCT", p[2], p[3])
def p_struct_def(p):
"""
stmt : VAR_NAME VAR_NAME SEMICOL
"""
p[0] = ("DEC_STRUCT", p[1], p[2])
def p_dec_stmt(p):
"""
stmt : declare
| dec_blk
"""
p[0] = p[1]
def p_dec_blk(p):
"""
dec_blk : L_CURLY declare R_CURLY
"""
p[0] = p[2]
def p_declare(p):
"""
declare : dec declare
|
"""
p[0] = ("END") # mark end of chain
if len(p) > 2:
p[0] = ("DEC_CHAIN", p[1], p[2])
def p_dec(p):
"""
dec : DATA_TYPE VAR_NAME SEMICOL
| DATA_TYPE VAR_NAME ASSIGNMENT exp SEMICOL
"""
if(len(p) == 4):
p[0] = ("DECLARE", p[1], p[2])
elif(len(p) == 6):
p[0] = ("DEC_ASS", ("DECLARE", p[1], p[2]), ("ASSIGN", p[2], p[4]) )
def p_func_dec(p):
"""
stmt : FUNC_DEC VAR_NAME param_blk stmt_blk
"""
p[0] = ("FUNC_DEC", p[2], p[3], p[4])
def p_param_blk(p):
"""
param_blk : L_ROUND param R_ROUND
"""
p[0] = p[2]
def p_param(p):
"""
param : param COMMA param
| DATA_TYPE VAR_NAME
|
"""
if len(p) > 3:
p[0] = (",", p[1], p[3])
elif len(p) > 2:
p[0] = (",", (p[1], p[2]))
else:
p[0] = (",", ("string", "EMPTY"))
def p_return_stmt(p):
"""
stmt : RETURN exp SEMICOL
| RETURN SEMICOL
"""
if len(p) <= 3:
p[0] = ("RETURN", "EMPTY")
else:
p[0] = ("RETURN", p[2])
def p_assign_stmt(p):
"""
stmt : VAR_NAME ASSIGNMENT exp SEMICOL
| exp ASSIGNMENT exp SEMICOL
"""
p[0] = ("ASSIGN", p[1], p[3])
def p_exp_brackets(p):
"""
exp : L_ROUND exp R_ROUND
| L_CURLY exp R_CURLY
| L_SQR exp R_SQR
"""
p[0] = (p[2])
def p_exp_bin(p):
"""
exp : exp PLUS exp
| exp MINUS exp
| exp ASTERISK exp
| exp FWSLASH exp
| exp TOPI exp
| exp MOD exp
| exp LESSER exp
| exp GREATER exp
| exp LESSER_EQUAL exp
| exp GREATER_EQUAL exp
| exp IS_EQUAL exp
| exp NOT_EQUAL exp
| exp LOGICAL exp
| exp DOT VAR_NAME
"""
p[0] = (p[2], p[1], p[3])
def p_exp_call(p):
"""
exp : VAR_NAME L_ROUND args R_ROUND
"""
p[0] = ("FUNC_CALL", p[1], p[3])
def p_exp_neg(p):
"""
exp : MINUS exp
"""
p[0] = (p[1], ('NUM', 0), p[2])
def p_exp_not(p):
"""
exp : NOT exp
"""
p[0] = (p[1], p[2], p[2])
def p_exp_uni(p):
"""
exp : VAR_NAME PLUS_PLUS
| VAR_NAME MINUS_MINUS
"""
p[0] = ("ASSIGN", p[1], (p[2][0], ("ACCESS", p[1]), ("NUM", 1)))
def p_exp_var(p):
"""
exp : VAR_NAME
"""
p[0] = ("ACCESS", p[1])
def p_exp_num(p):
"""
exp : INT
| FLOAT
"""
p[0] = ('NUM', p[1])
def p_exp_str(p):
"""
exp : STRING
| CHAR
"""
p[0] = ("STR", p[1])
def p_exp_bool(p):
"""
exp : BOOL
"""
p[0] = ("BOOL", p[1])
def p_error(p):
if p == None:
print("Syntax error at end of file")
exit(1)
print("Syntax error at token", p.value, p.type, p.lexpos)
exit(1)
parser = yacc.yacc() # start parsing, yacc object created | [
"[email protected]"
]
| |
5dff5ce729438b3c534d2d4da22d067350b99165 | 4e32131f3c9cf8355378eadbb71158a3df77ec48 | /ocr/utils/builders.py | ceb0c49b8a3db9b0c38ebb1b30ac757c6c9b326f | []
| no_license | reymondzzzz/transformer_ocr | c6e227f8c05406b0b20d10ffe9ca6f6c8c5b9cd5 | 347dfc3512550a211185caec2bfaaa19ce021b43 | refs/heads/master | 2023-04-11T19:59:07.991523 | 2021-04-12T08:41:01 | 2021-04-12T08:41:01 | 279,027,780 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,880 | py | from typing import Tuple
import albumentations as Albumentations
import albumentations.pytorch as AlbumentationsPytorch
import pretrainedmodels
import pytorch_lightning.callbacks as LightningCallbacks
import torchmetrics as PLMetrics
# import pytorch_loss as PytorchExtraLosses
import timm
import torch
import torch.nn as TorchNNModules
import torch.optim as OptimizerLib
import torch.optim.lr_scheduler as LRSchedulerLib
import ocr.datasets as Datasets
import ocr.decoder as Decoders
import ocr.encoder as Encoders
import ocr.blocks as Blocks
import ocr.loss as Losses
import ocr.metrics as CustomMetrics
import ocr.modelling.backbones as Backbones
import ocr.modelling.heads as Heads
import ocr.transforms as Transforms
__all__ = [
'build_lightning_module',
'build_backbone_from_cfg',
'build_head_from_cfg',
'build_transform_from_cfg',
'build_dataset_from_cfg',
'build_loss_from_cfg',
'build_metric_from_cfg',
'build_optimizer_from_cfg',
'build_lr_scheduler_from_cfg',
'build_callbacks_from_cfg'
]
def _base_transform_from_cfg(config, modules_to_find):
assert isinstance(config, dict) and 'type' in config, f'Check config type validity: {config}'
args = config.copy()
obj_type_name = args.pop('type')
real_type = None
for module in modules_to_find:
if not hasattr(module, obj_type_name):
continue
real_type = getattr(module, obj_type_name)
if real_type:
break
assert real_type is not None, f'{obj_type_name} is not registered type in any modules: {modules_to_find}'
return real_type(**args)
def build_lightning_module(config):
import ocr.modules as Modules
return _base_transform_from_cfg(config, [Modules])
def build_backbone_from_cfg(config) -> Tuple[torch.nn.Module, int]:
args = config.copy()
backbone_type_name = args.pop('type')
if hasattr(Backbones, backbone_type_name):
backbone = getattr(Backbones, backbone_type_name)(**args)
output_channels = backbone.output_channels
elif backbone_type_name in pretrainedmodels.__dict__:
backbone = pretrainedmodels.__dict__[backbone_type_name](**args)
if 'squeezenet' in backbone_type_name:
backbone = backbone.features
output_channels = 512
else:
backbone.forward = backbone.features
output_channels = backbone.last_linear.in_features
elif backbone_type_name in timm.list_models():
backbone = timm.create_model(backbone_type_name, **args)
backbone.forward = backbone.forward_features
output_channels = backbone.classifier.in_features
else:
assert False, f'{backbone_type_name} not found in backbones factory'
return backbone, output_channels
def build_head_from_cfg(input_channels: int, config):
config['input_channels'] = input_channels
return _base_transform_from_cfg(config, [Heads])
def build_transform_from_cfg(config):
def _builder(cfg):
modules = [Albumentations, AlbumentationsPytorch, Transforms]
if 'transforms' in cfg:
cfg['transforms'] = [
_builder(transform_cfg) for transform_cfg in cfg['transforms']
]
return _base_transform_from_cfg(cfg, modules)
return _builder(config)
def build_dataset_from_cfg(transforms, config):
config['transforms'] = transforms
return _base_transform_from_cfg(config, [Datasets])
def build_loss_from_cfg(config):
type = config['type']
if hasattr(TorchNNModules, type):
loss_module = _base_transform_from_cfg(config, [TorchNNModules])
return Losses.pytorch_inject_loss(loss_module)
return _base_transform_from_cfg(config, [Losses])#, PytorchExtraLosses])
def build_encoder_from_cfg(input_channels: int, config):
config['input_channels'] = input_channels
return _base_transform_from_cfg(config, [Encoders])
def build_decoder_from_cfg(config):
return _base_transform_from_cfg(config, [Decoders])
def build_metric_from_cfg(config):
return _base_transform_from_cfg(config, [CustomMetrics, PLMetrics])
def build_optimizer_from_cfg(params, config):
modules = [OptimizerLib]
try:
import adabelief_pytorch
modules.append(adabelief_pytorch)
except ImportError:
pass
try:
import ranger_adabelief
modules.append(ranger_adabelief)
except ImportError:
pass
try:
import ranger
modules.append(ranger)
except ImportError:
pass
config['params'] = params
return _base_transform_from_cfg(config, modules)
def build_lr_scheduler_from_cfg(optimizer, config):
config['optimizer'] = optimizer
return _base_transform_from_cfg(config, [LRSchedulerLib])
def build_callbacks_from_cfg(config):
return _base_transform_from_cfg(config, [LightningCallbacks])
| [
"[email protected]"
]
| |
81aa159a3fffbe35c2f49ba73dfafe34c2c9ce02 | 3ba3a0e970644fb9a8f55bf47900fcc8bd940c71 | /euler102.py | 08a267640c5084532b130e24beae3e30b33298ee | []
| no_license | jacekkubacki/projectEulerInPython | be6f3e9cf814a5513d682685fc28a816aa0b3890 | a372e2f48507f3741b504eea892de49d7d630c85 | refs/heads/master | 2021-01-17T13:08:20.648105 | 2020-07-19T20:25:11 | 2020-07-19T20:25:11 | 22,376,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,123 | py | #!/usr/bin/env python3
# Triangle containment
# Problem 102
#
# Three distinct points are plotted at random on a Cartesian plane, for which -1000 <= x, y <= 1000, such that a triangle is formed.
#
# Consider the following two triangles:
#
# A(-340,495), B(-153,-910), C(835,-947)
#
# X(-175,41), Y(-421,-714), Z(574,-645)
#
# It can be verified that triangle ABC contains the origin, whereas triangle XYZ does not.
#
# Using triangles.txt (right click and 'Save Link/Target As...'), a 27K text file containing the co-ordinates of one thousand "random" triangles, find the number of triangles for which the interior contains the origin.
#
# NOTE: The first two examples in the file represent the triangles in the example given above.
# Solution:
# The linear equation of a line passing through two points (x1,y1) and (x2,y2) is:
# (x2 - x1)*(y - y1) = (y2 - y1)*(x - x1) => (x2 - x1)*(y - y1) - (y2 - y1)*(x - x1) = 0
#
# Let f(x, y) = (x2 - x1)*(y - y1) - (y2 - y1)*(x - x1).
# f(x, y) = 0 only if (x,y) lies on the line.
# If it doesn't, then f(x,y) is either positive or negative depending on which side of the line (x,y) lies.
# Notice that (0,0) is inside the triangle ABC if:
# 1. A and (0,0) lie on the same side of the line passing through B and C
# 2. B and (0,0) lie on the same side of the line passing through A and C
# 3. C and (0,0) lie on the same side of the line passing through A and B
def sign(x):
"""Returns sign of x"""
return (x > 0) - (x < 0)
def line(x1, y1, x2, y2, x, y):
"""Substitutes x and y in the linear equation of a line that passes through two points: (x1,y1) and (x2,y2)"""
return (x2 - x1) * (y - y1) - (y2 - y1) * (x - x1)
result = 0
with open('p102_triangles.txt') as f:
for row in f:
ax, ay, bx, by, cx, cy = tuple(map(float, row.split(',')))
if sign(line(bx, by, cx, cy, ax, ay)) == sign(line(bx, by, cx, cy, 0, 0)) and\
sign(line(ax, ay, cx, cy, bx, by)) == sign(line(ax, ay, cx, cy, 0, 0)) and\
sign(line(ax, ay, bx, by, cx, cy)) == sign(line(ax, ay, bx, by, 0, 0)):
result += 1
print("Result:", result)
| [
"[email protected]"
]
| |
6a3aa7b0e23d7132cd569036c9ba4c32875eb08e | 475303b69537b3739c7846ca0ab13ab02ad6340f | /src/graphs/models/toxic_comment.py | d938602b608cb096aa9667f75f83727e672f2a48 | []
| no_license | Guest400123064/FedKGQA | 8a0328faccee6819181879cb52ee530a2b0cfa6d | 6a51b354d7526bd7e63b08607f86f53ce5e84afc | refs/heads/main | 2023-04-21T00:51:39.541230 | 2021-05-22T14:46:08 | 2021-05-22T14:46:08 | 342,565,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,261 | py | import torch
import torch.nn as nn
from easydict import EasyDict
class ToxicComtModel(nn.Module):
def __init__(self, config: EasyDict):
super().__init__()
self.config = config
self.seq_encoder = CommentEncoder(config.comment_encoder)
self.out_layer = OutputLayer(config.output_layer)
return
def forward(self, x):
hid = self.seq_encoder(x)
out = self.out_layer(hid)
return out
class CommentEncoder(nn.Module):
def __init__(self, config: EasyDict):
super().__init__()
self.config = config
self.rnn = nn.RNN(
config.input_size,
hidden_size=config.hidden_size,
num_layers=config.num_layers,
batch_first=True
)
return
def forward(self, x):
_, hid_state = self.rnn(x)
return hid_state
class OutputLayer(nn.Module):
def __init__(self, config: EasyDict):
super().__init__()
self.config = config
self.linear = nn.Linear(
self.config.n_factor
, out_features=self.config.n_target
, bias=True
)
return
def forward(self, x):
lin = self.linear(x)
return torch.sigmoid(lin)
| [
"[email protected]"
]
| |
8dfab0b34286b301bbb0cd4a0508c6d7549c2f09 | fc6033386df4b83abe0e49e9aab03adcdd921134 | /maxflow.py | 09055f9b7db1b0a776674eee8f201618c3a736e6 | []
| no_license | showmen-gupta/algorithms | 0db0b85a891cc47015e9330dbdecca3764e92d3a | c86c8b582c5bec715c3b55135387bc9e66925277 | refs/heads/master | 2021-05-11T21:14:54.201070 | 2018-01-14T20:44:54 | 2018-01-14T20:44:54 | 117,465,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,601 | py | class G:
def __init__(self, g):
self.g = g # residual g
self.R = len(g)
'''Returns true if there is a path from source 's' to sink 't' in
residual g. Also fills parent[] to store the path '''
def BFS(self, s, t, parent):
# Mark all the vertices as not visited
visited = [False] * (self.R)
# Create a queue for BFS
queue = []
# Mark the source node as visited and enqueue it
queue.append(s)
visited[s] = True
# Standard BFS Loop
while queue:
# Dequeue a vertex from queue and print it
u = queue.pop(0)
# Get all adjacent vertices of the dequeued vertex u
# If a adjacent has not been visited, then mark it
# visited and enqueue it
for ind, val in enumerate(self.g[u]):
if visited[ind] == False and val > 0:
queue.append(ind)
visited[ind] = True
parent[ind] = u
# If we reached sink in BFS starting from source, then return
# true, else false
return True if visited[t] else False
# Returns tne maximum flow from s to t in the given g
def FrdFul(self, source, sink):
# This array is filled by BFS and to store path
parent = [-1] * (self.R)
max_flow = 0 # There is no flow initially
# Augment the flow while there is path from source to sink
while self.BFS(source, sink, parent):
# Find minimum residual capacity of the edges along the
# path filled by BFS. Or we can say find the maximum flow
# through the path found.
path_flow = float("Inf")
s = sink
while (s != source):
path_flow = min(path_flow, self.g[parent[s]][s])
s = parent[s]
# Add path flow to overall flow
max_flow += path_flow
# update residual capacities of the edges and reverse edges
# along the path
v = sink
while (v != source):
u = parent[v]
self.g[u][v] -= path_flow
self.g[v][u] += path_flow
v = parent[v]
return max_flow
# Create a g given in the above diagram
g = [[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0]]
g = G(g)
source = 0;
sink = 5
print ("The maximum possible flow is %d " % g.FrdFul(source, sink)) | [
"[email protected]"
]
| |
17ecaa88d0e2ac422aa3e090e3e08974e7e40179 | b7a120b56a4e0379e24523e76781144af23b5b91 | /venv/bin/easy_install-3.7 | 1502696c9705bb5d9b5d0e589f7d285ec774edca | []
| no_license | liuqi10/python_base | bd2cadb1e8e1f9f1dbfb44f71c6b73933b0a5903 | d56058a75240dd644739517d03d13eabcd67d3d9 | refs/heads/master | 2021-05-25T07:59:30.518515 | 2020-04-07T08:37:31 | 2020-04-07T08:53:25 | 253,729,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | 7 | #!/Users/lq/PycharmProjects/python_base/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"[email protected]"
]
| |
4b94ea0efb14d60e69e0110fd84977c9ba7a7611 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/models/virtual_machine_scale_set_public_ip_address_configuration_py3.py | 76a82b78db8773b9a74688ddbdadeac51ed6ec07 | [
"MIT"
]
| permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 2,197 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineScaleSetPublicIPAddressConfiguration(Model):
"""Describes a virtual machines scale set IP Configuration's PublicIPAddress
configuration.
All required parameters must be populated in order to send to Azure.
:param name: Required. The publicIP address configuration name.
:type name: str
:param idle_timeout_in_minutes: The idle timeout of the public IP address.
:type idle_timeout_in_minutes: int
:param dns_settings: The dns settings to be applied on the publicIP
addresses .
:type dns_settings:
~azure.mgmt.compute.v2018_04_01.models.VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings
:param ip_tags: The list of IP tags associated with the public IP address.
:type ip_tags:
list[~azure.mgmt.compute.v2018_04_01.models.VirtualMachineScaleSetIpTag]
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings'},
'ip_tags': {'key': 'properties.ipTags', 'type': '[VirtualMachineScaleSetIpTag]'},
}
def __init__(self, *, name: str, idle_timeout_in_minutes: int=None, dns_settings=None, ip_tags=None, **kwargs) -> None:
super(VirtualMachineScaleSetPublicIPAddressConfiguration, self).__init__(**kwargs)
self.name = name
self.idle_timeout_in_minutes = idle_timeout_in_minutes
self.dns_settings = dns_settings
self.ip_tags = ip_tags
| [
"[email protected]"
]
| |
a68519e47c61580f353d0d2941d49c1dc2e26010 | 21526140eff7799e6f3ffa13e610cec7a3c07c4f | /data/loadwhiskeys.py | acd03e929e74cb1817a712ae00fbc6930bb20c08 | []
| no_license | soumyagk/CS6456 | b5b392f78263f1f11fede248cd5f5565a59b1b01 | 5258ab8375f40a23b25a01e4f6475763e8ec2352 | refs/heads/master | 2021-05-31T14:23:29.006617 | 2015-11-24T13:51:26 | 2015-11-24T13:51:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,665 | py | import csv
import pymysql
csvfile = open('whiskeys.csv', 'r')
conn = pymysql.connect(host='localhost', port=3306, user='root', passwd='', db='gestviz')
cur = conn.cursor()
fieldnames = ("Name","Rating","Country","Category","Price","ABV","Age","Brand")
reader = csv.DictReader( csvfile, fieldnames)
v_rec_id = 1;
for row in reader:
if v_rec_id > 1:
cur.execute("insert into gestviz_data(db_id,rec_id,field_id,field_name,value) values(3, "+str(v_rec_id)+", 22, 'Name', '"+row['Name'].replace("'","''")+"')")
cur.execute("insert into gestviz_data(db_id,rec_id,field_id,field_name,value) values(3, "+str(v_rec_id)+", 23, 'Rating', '"+row['Rating'].replace("'","''")+"')")
cur.execute("insert into gestviz_data(db_id,rec_id,field_id,field_name,value) values(3, "+str(v_rec_id)+", 24, 'Country', '"+row['Country'].replace("'","''")+"')")
cur.execute("insert into gestviz_data(db_id,rec_id,field_id,field_name,value) values(3, "+str(v_rec_id)+", 25, 'Category', '"+row['Category'].replace("'","''")+"')")
cur.execute("insert into gestviz_data(db_id,rec_id,field_id,field_name,value) values(3, "+str(v_rec_id)+", 26, 'Price', '"+row['Price'].replace("'","''")+"')")
cur.execute("insert into gestviz_data(db_id,rec_id,field_id,field_name,value) values(3, "+str(v_rec_id)+", 27, 'ABV', '"+row['ABV'].replace("'","''")+"')")
cur.execute("insert into gestviz_data(db_id,rec_id,field_id,field_name,value) values(3, "+str(v_rec_id)+", 28, 'Age', '"+row['Age'].replace("'","''")+"')")
cur.execute("insert into gestviz_data(db_id,rec_id,field_id,field_name,value) values(3, "+str(v_rec_id)+", 29, 'Brand', '"+row['Brand'].replace("'","''")+"')")
v_rec_id+= 1; | [
"[email protected]"
]
| |
a772879e485592a2933d28abf327fa778c4be088 | 7ac5ea4a31eb2e382b144206febe663b9531afaa | /main.py | 15b43fdf66c16250ee9931fc50883bc1306bf1a1 | []
| no_license | roseate8/SOC-prediction | 0e3b04d8286c319192627034987b80cdb85d8b08 | 7e664a2fa111004a215639113e3aab72e08e6d6e | refs/heads/master | 2020-09-17T05:58:34.678938 | 2020-05-25T18:50:42 | 2020-05-25T18:50:42 | 224,012,465 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,664 | py | from battery import Battery
from kalman import ExtendedKalmanFilter as EKF
from protocol import launch_experiment_protocol
import numpy as np
import math as m
def get_EKF(R0, R1, C1, std_dev, time_step):
# initial state (SoC is intentionally set to a wrong value)
# x = [[SoC], [RC voltage]]
x = np.matrix([[0.5],\
[0.0]])
exp_coeff = m.exp(-time_step/(C1*R1))
# state transition model
F = np.matrix([[1, 0 ],\
[0, exp_coeff]])
# control-input model
B = np.matrix([[-time_step/(Q_tot * 3600)],\
[ R1*(1-exp_coeff)]])
# variance from std_dev
var = std_dev ** 2
# measurement noise
R = var
# state covariance
P = np.matrix([[var, 0],\
[0, var]])
# process noise covariance matrix
Q = np.matrix([[var/50, 0],\
[0, var/50]])
def HJacobian(x):
return np.matrix([[battery_simulation.OCV_model.deriv(x[0,0]), -1]])
def Hx(x):
return battery_simulation.OCV_model(x[0,0]) - x[1,0]
return EKF(x, F, B, P, Q, R, Hx, HJacobian)
def plot_everything(time, true_voltage, mes_voltage, true_SoC, estim_SoC, current):
import matplotlib.pyplot as plt
fig = plt.figure()
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
# ax1 = plt.axes([0, 0, 3, 5])
# ax2 = plt.axes([0, 0, 3, 5])
# ax3 = plt.axes([0, 0, 3, 5])
# title, labels
ax1.set_title('')
ax1.set_xlabel('Time / s')
ax1.set_ylabel('voltage / V')
ax2.set_xlabel('Time / s')
ax2.set_ylabel('Soc')
ax3.set_xlabel('Time / s')
ax3.set_ylabel('Current / A')
ax1.plot(time, true_voltage, label="True voltage")
ax1.plot(time, mes_voltage, label="Mesured voltage")
ax2.plot(time, true_SoC, label="True SoC")
ax2.plot(time, estim_SoC, label="Estimated SoC")
ax3.plot(time, current, label="Current")
ax1.legend()
ax2.legend()
ax3.legend()
plt.show()
# for i in len(true_SoC):
# print(true_SoC[i] - estim_SoC[i])
if __name__ == '__main__':
# total capacity
Q_tot = 3.2
# Thevenin model values
R0 = 0.062
R1 = 0.01
C1 = 3000
# time period
time_step = 10
battery_simulation = Battery(Q_tot, R0, R1, C1)
# discharged battery
battery_simulation.actual_capacity = 0
# measurement noise standard deviation
std_dev = 0.015
#get configured EKF
Kf = get_EKF(R0, R1, C1, std_dev, time_step)
time = [0]
true_SoC = [battery_simulation.state_of_charge]
estim_SoC = [Kf.x[0,0]]
true_voltage = [battery_simulation.voltage]
mes_voltage = [battery_simulation.voltage + np.random.normal(0,0.1,1)[0]]
current = [battery_simulation.current]
def update_all(actual_current):
battery_simulation.current = actual_current
battery_simulation.update(time_step)
time.append(time[-1]+time_step)
current.append(actual_current)
true_voltage.append(battery_simulation.voltage)
mes_voltage.append(battery_simulation.voltage + np.random.normal(0, std_dev, 1)[0])
Kf.predict(u=actual_current)
Kf.update(mes_voltage[-1] + R0 * actual_current)
true_SoC.append(battery_simulation.state_of_charge)
estim_SoC.append(Kf.x[0,0])
return battery_simulation.voltage #mes_voltage[-1]
# launch experiment
launch_experiment_protocol(Q_tot, time_step, update_all)
# plot stuff
plot_everything(time, true_voltage, mes_voltage, true_SoC, estim_SoC, current)
| [
"[email protected]"
]
| |
0a5b6982fcdfc59b094c36f964a57334db398003 | 81f943eab36ac03e9d58677722a86aac512e108d | /knn.py | 20dea10bc4c17d7b83926809981d3fffbdbb39da | []
| no_license | Ritika-Singhal/KNN-Algorithm | 39595e55967a2fd1db941c017d7d37175fe7c8ef | e46c5cd5053db3be04560eeef2b337cda8cecb62 | refs/heads/master | 2020-07-31T06:52:37.944109 | 2019-09-24T07:28:08 | 2019-09-24T07:28:08 | 210,521,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,364 | py | import numpy as np
from collections import Counter
class KNN:
def __init__(self, k, distance_function):
"""
:param k: int
:param distance_function
"""
self.k = k
self.distance_function = distance_function
def train(self, features, labels):
"""
In this function, features is simply training data which is a 2D list with float values.
:param features: List[List[float]]
:param labels: List[int]
"""
self.train_features = features
self.train_labels = labels
def predict(self, features):
"""
This function takes 2D list of test data points, similar to those from train function. Here, I process
every test data point, reuse the get_k_neighbours function to find the nearest k neighbours for each test
data point, find the majority of labels for these neighbours as the predict label for that testing data point.
Thus, got N predicted label for N test data point.
This function returns a list of predicted labels for all test data points.
:param features: List[List[float]]
:return: List[int]
"""
test_features = features
test_labels = []
for i in range(len(test_features)):
k_neighbors = Counter(self.get_k_neighbors(test_features[i]))
majority_label = k_neighbors.most_common(1)[0][0]
test_labels.append(majority_label)
return test_labels
def get_k_neighbors(self, point):
"""
This function takes one single data point and finds k-nearest neighbours in the training set.
We already have the k value, distance function and stored all training data in KNN class with the
train function. This function returns a list of labels of all k neighours.
:param point: List[float]
:return: List[int]
"""
dist_list = []
for i in range(len(self.train_features)):
dist_list.append((self.distance_function(point, self.train_features[i]), self.train_labels[i]))
distances = np.array(dist_list, dtype=({'names':('distance', 'label'), 'formats':(float, int)}))
distances = np.sort(distances, order='distance')
return distances['label'][:self.k]
if __name__ == '__main__':
print(np.__version__)
| [
"[email protected]"
]
| |
d0006c97e0d1863494d71eb5d015224002a7ed02 | f48d0c0f485951eba6032c5caee02ddfac123801 | /文本分类/Bag of Words Meets Bags of Popcorn/main2.py | e3da802d0cd22ca411d9b2a4992c0d4c41cfa708 | []
| no_license | linlinxiaostudent/DeepLearning_AE | 31b54e1f269cfcc5e4ddb3646638049ac7229eb1 | 9e5d9c96270ddff3d377f25e7cd46fe15ee23c26 | refs/heads/master | 2020-03-10T10:50:43.332134 | 2018-04-13T13:08:10 | 2018-04-13T13:08:10 | 129,342,227 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,794 | py |
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import os
import re
import nltk
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer as TFIDF
data = pd.read_csv('labeledTrainData.tsv', header=0, delimiter="\t", quoting=3)
def review_to_words(raw_review):
# 这个函数的功能就是将原始的数据经过预处理变成一系列的词。
# 输入是原始的数据(一条电影评论)。
# 输出是一系列的词(经过预处理的评论)
review_text = BeautifulSoup(raw_review).get_text()
letters_only = re.sub("[^a-zA-Z]", " ", review_text)
words = letters_only.lower().split()
stops = set(stopwords.words("english"))
meaningful_words = [w for w in words if not w in stops]
return (" ".join(meaningful_words))
num_reviews = data ['review'].size
clean_data_reviews = []
for i in range(0, num_reviews):
if ((i + 1) % 1000 == 0):
print('Review %d of %d \n' % (i + 1, num_reviews))
clean_data_reviews.append(review_to_words(data['review'][i]))
tfidf = TFIDF(min_df=2,max_features=None,
strip_accents='unicode',analyzer='word',
token_pattern=r'\w{1,}',ngram_range=(1, 3),
use_idf=1,smooth_idf=1,
sublinear_tf=1,stop_words = 'english') # 去掉英文停用词
# 合并训练和测试集以便进行TFIDF向量化操作
tfidf.fit(clean_data_reviews)
clean_data_reviews = tfidf.transform(clean_data_reviews)
# 恢复成训练集和测试集部分
train_x = clean_data_reviews[:20000]
label_train=data['sentiment'][:20000]
test_x = clean_data_reviews[20000:]
label_test=data['sentiment'][20000:]
from sklearn.linear_model import LogisticRegression as LR
from sklearn.grid_search import GridSearchCV
grid_values = {'C':[30]}
# 设定打分为roc_auc
model_LR = GridSearchCV(LR(penalty = 'L2', dual = True, random_state = 0),
grid_values, scoring = 'roc_auc', cv = 20)
model_LR.fit(train_x, label_train )
# 20折交叉验证 ###为什么这么写??
GridSearchCV(cv=20, estimator=LR(C=1.0, class_weight=None, dual=True,
fit_intercept=True, intercept_scaling=1,
penalty='L2', random_state=0, tol=0.0001),
fit_params={}, iid=True, n_jobs=1,
param_grid={'C': [30]}, pre_dispatch='2*n_jobs', refit=True,
scoring='roc_auc', verbose=0)
#输出结果
print (model_LR.grid_scores_)
test_predicted = np.array(model_LR.predict(test_x))
label_test_array = []
for i in range(20000, num_reviews):
label_test_array.append(label_test[i])
num =0
for i in range(0,len(label_test_array)):
if (test_predicted[i]==label_test_array[i]):
num=num+1
print('acc:',num/len(label_test )) | [
"[email protected]"
]
| |
c1c8be7822203830cca468d1e183c39f3458843a | 484b7daa8f7e73271be1b6de37afdc5825717f4f | /catcolony/models.py | 2adfc7f37b9c914974f5737621a7c1e3ac1de872 | []
| no_license | maxpinomaa/cat-video-thing | 409200ff3c18f7034aa072177a3c2e37ee531c9a | 529d453d9b8764377268a2783b6cc6ac0ae80e1c | refs/heads/master | 2021-01-10T07:23:56.607822 | 2016-03-23T20:06:32 | 2016-03-23T20:06:32 | 52,385,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | from django.db import models
from django.utils import timezone
from embed_video.fields import EmbedVideoField
from taggit.managers import TaggableManager
class Item(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
text = models.TextField()
video = EmbedVideoField()
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True)
tags = TaggableManager()
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
| [
"[email protected]"
]
| |
5793547e4f2688f451442dce1b433dfd365ef5a8 | 9715a7d27f9b146632f964b643ee7243a7e9a38c | /match-sift.py | b7275f8f1c0e357c2af2b24419bc14fbb03ef725 | []
| no_license | uakfdotb/skyquery | 3eb9b2265992127a3c5b3b3612c32ddea0f39195 | dc67b98ee8034711c274408640e3582d20482673 | refs/heads/master | 2020-07-07T03:49:57.856424 | 2019-08-21T19:13:18 | 2019-08-21T19:13:18 | 203,237,682 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,632 | py | from discoverlib import geom, grid_index
import get_db
import cv2
import json
import math
import multiprocessing
import numpy
import os
from PIL import Image
import scipy.ndimage
import sys
video_id = int(sys.argv[1])
db = get_db.get_db()
BASE_PATH = 'ortho-masked.jpg'
FRAME_PATH = 'frames/{}/'.format(video_id)
LK_PARAMETERS = dict(winSize=(21, 21), maxLevel=2, criteria=(cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS, 30, 0.01))
# in ortho-imagery resolution units which was 2cm/pixel but resized 4cm/pixel
# and time units is framerate
MAX_SPEED = 75
sift = cv2.xfeatures2d.SIFT_create()
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_L1)
base_im = scipy.ndimage.imread(BASE_PATH)
base_keypoints, base_desc = sift.detectAndCompute(base_im, None)
index = grid_index.GridIndex(256)
for i, kp in enumerate(base_keypoints):
p = geom.Point(kp.pt[0], kp.pt[1])
index.insert(p, i)
def points_to_poly_str(points):
strs = ['{},{}'.format(points[j, 0], points[j, 1]) for j in xrange(points.shape[0])]
return ' '.join(strs)
def homography_from_flow(prev_homography, prev_gray, cur_gray):
positions = []
for i in xrange(0, prev_gray.shape[0]-50, 50):
for j in xrange(0, prev_gray.shape[1]-50, 50):
positions.append((i, j))
positions_np = numpy.array(positions, dtype='float32').reshape(-1, 1, 2)
def flip_pos(positions):
return numpy.stack([positions[:, :, 1], positions[:, :, 0]], axis=2)
next_positions, st, err = cv2.calcOpticalFlowPyrLK(prev_gray, cur_gray, flip_pos(positions_np), None, **LK_PARAMETERS)
if next_positions is None:
return None
next_positions = flip_pos(next_positions)
differences = next_positions[:, 0, :] - positions_np[:, 0, :]
differences_okay = differences[numpy.where(st[:, 0] == 1)]
median = [numpy.median(differences_okay[:, 0]), numpy.median(differences_okay[:, 1])]
good = (numpy.square(differences[:, 0] - median[0]) + numpy.square(differences[:, 1] - median[1])) < 16
if float(numpy.count_nonzero(good)) / differences.shape[0] < 0.7:
return None
# translate previous homography based on the flow result
translation = [numpy.median(differences[:, 0]), numpy.median(differences[:, 1])]
H_translation = numpy.array([[1, 0, -translation[1]], [0, 1, -translation[0]], [0,0,1]], dtype='float32')
return prev_homography.dot(H_translation)
frame_idx_to_fname = {}
for fname in os.listdir(FRAME_PATH):
if '.jpg' not in fname:
continue
frame_idx = int(fname.split('.jpg')[0])
frame_idx_to_fname[frame_idx] = fname
prev_bounds = None
prev_frame, prev_gray = None, None
prev_homography = None
prev_counter = 0
#db.execute("SELECT id, idx FROM video_frames WHERE bounds IS NULL AND video_id = %s ORDER BY idx", [video_id])
db.execute("SELECT id, idx FROM video_frames WHERE video_id = %s ORDER BY idx", [video_id])
for row in db.fetchall():
#while True:
# db.execute("SELECT id, idx FROM video_frames WHERE bounds IS NULL AND video_id = %s ORDER BY RAND() LIMIT 1", [video_id])
# rows = db.fetchall()
# if len(rows) != 1:
# break
# row = rows[0]
frame_id, frame_idx = row
frame_fname = frame_idx_to_fname[frame_idx]
print 'process {}'.format(frame_idx)
frame = scipy.ndimage.imread(FRAME_PATH + frame_fname)
frame = cv2.resize(frame, (frame.shape[1]/2, frame.shape[0]/2))
frame_gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
H = None
if prev_homography is not None and prev_counter < 5:
H = homography_from_flow(prev_homography, prev_gray, frame_gray)
prev_counter += 1
if H is None:
keypoints, desc = sift.detectAndCompute(frame, None)
if prev_bounds is None:
query_keypoints, query_desc = base_keypoints, base_desc
else:
indices = index.search(prev_bounds.add_tol(2*MAX_SPEED))
indices = numpy.array(list(indices), dtype='int32')
query_keypoints = []
for i in indices:
query_keypoints.append(base_keypoints[i])
query_desc = base_desc[indices]
matches = matcher.knnMatch(queryDescriptors=query_desc, trainDescriptors=desc, k=2)
good = []
for m, n in matches:
if m.distance < 0.6*n.distance:
good.append(m)
src_pts = numpy.float32([keypoints[m.trainIdx].pt for m in good]).reshape(-1,1,2)
dst_pts = numpy.float32([query_keypoints[m.queryIdx].pt for m in good]).reshape(-1,1,2)
try:
H, _ = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
except Exception as e:
print 'warning: exception on frame {}: {}'.format(frame_idx, e)
db.execute("UPDATE video_frames SET bounds = '' WHERE id = %s", [frame_id])
prev_bounds = None
continue
prev_counter = 0
if H is None:
db.execute("UPDATE video_frames SET bounds = '' WHERE id = %s", [frame_id])
prev_bounds = None
continue
bound_points = numpy.array([
[0, 0],
[frame.shape[1], 0],
[frame.shape[1], frame.shape[0]],
[0, frame.shape[0]],
], dtype='float32').reshape(-1, 1, 2)
transformed_points = cv2.perspectiveTransform(bound_points, H)
bounds = None
for p in transformed_points[:, 0, :]:
p = geom.Point(p[0], p[1])
if bounds is None:
bounds = p.bounds()
else:
bounds = bounds.extend(p)
print bounds
if prev_bounds is not None:
intersection_area = float(bounds.intersection(prev_bounds).area())
union_area = float(bounds.area() + prev_bounds.area()) - intersection_area
iou = intersection_area / union_area
if iou < 0.6:
print 'iou failed! ({})'.format(iou)
print bounds, prev_bounds
db.execute("UPDATE video_frames SET bounds = '' WHERE id = %s", [frame_id])
prev_bounds = None
continue
poly_str = points_to_poly_str(transformed_points[:, 0, :])
db.execute("UPDATE video_frames SET bounds = %s WHERE id = %s", [poly_str, frame_id])
prev_bounds, prev_frame, prev_gray, prev_homography = bounds, frame, frame_gray, H
# transform detections
db.execute(
"SELECT id, frame_polygon FROM detections WHERE frame_id = %s AND polygon IS NULL",
[frame_id]
)
points = []
detections = []
for row in db.fetchall():
poly_parts = row[1].split(' ')
poly_points = []
for part in poly_parts:
point_parts = part.split(',')
poly_points.append((int(point_parts[0])/2, int(point_parts[1])/2))
detections.append((int(row[0]), len(poly_points)))
points.extend(poly_points)
if len(points) > 0:
points = numpy.array(points, dtype='float32').reshape(-1, 1, 2)
transformed_points = cv2.perspectiveTransform(points, H)
i = 0
for detection_id, num_points in detections:
poly_str = points_to_poly_str(transformed_points[i:i+num_points, 0, :])
db.execute("UPDATE detections SET polygon = %s WHERE id = %s", [poly_str, detection_id])
print poly_str, detection_id
i += num_points
assert i == transformed_points.shape[0]
| [
"[email protected]"
]
| |
298082e18a03176a471a740af59ae20082832547 | 0999efba1b1664c633fac61d25acdae33253e01a | /appli/Scanfiles.py | 620a0dcb636040e16e3e9b0e83e79a11b494ce32 | []
| no_license | kanenk/scanfiles | 7bb4fbd3583cf0fa7168899d171baac5e25eb1a4 | 3c849d78201ba38e6af9832d1b8abf1ff887e693 | refs/heads/master | 2021-01-01T06:05:57.750530 | 2015-07-17T13:32:26 | 2015-07-17T13:32:26 | 39,254,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,170 | py | #!/usr/bin/python3
# Filename : scanfile.py
# Author : kane
# Email : [email protected]
# Date : 2014.08.01 15:49:00
# release : 1.0
# Python : v2.7.6
import os
from datetime import *
from ConnectDB import ConnectDB
from ReadHeader import ReadHeader
from os.path import join
class ScanFiles(object):
def __init__(self, debug):
self.DEBUG = debug
if self.DEBUG == 1:
print("scanfiles.py : constructor...")
self.__rh = ReadHeader(self.DEBUG)
def __del__(self):
if self.DEBUG == 1:
print("scanfiles.py : destructor...")
def getDatabaseName(self):
return self.__databasename
def setDatabaseName(self, database):
self.__databasename = database
def processFile(self, path):
countFiles = 0
numberOfDoubles = 0
self.__connectDB = ConnectDB(self.__databasename, 0)
# traitement des fichiers se trouvant dans le dossier
for root, dirs, files in os.walk(path):
for name in files:
if name.endswith((".dat")):
# creation du chemin complet vers le fichier dat
fullPath = join(root,name)
# determination de la date de creation du fichier
t = os.path.getmtime(fullPath)
ctime = datetime.fromtimestamp(t)
# calcul du nombre de jours depuis la date de creation du fichier
time_to_today = abs(date.today() - ctime.date())
if self.DEBUG == 1 or self.DEBUG == 2:
print("Scanfiles.py : Time from file creation date to today in days = %s" % time_to_today.days)
self.__rh.setFilename(fullPath)
self.__rh.openFile()
filecreationdate = self.__rh.getFileCreationDate()
filepath = self.__rh.getFilePath()
filename = self.__rh.getFileName()
filesize = self.__rh.getFileSize()
# Teste si le fichier existe deja dans la base de donnee
# Si le fichier n'existe pas, l'inserer dans la base de donnees
if self.__connectDB.selectDB(filecreationdate, name, root, filesize) == False:
self.__connectDB.insertDB(filecreationdate, name, root, filesize)
else:
numberOfDoubles = self.__connectDB.findDoubles(name)
print("Scanfiles.py : fichier existe deja dans la base de donnee...")
print("Scanfiles.py : number of files", name,"present in database = ", numberOfDoubles)
if __name__ == '__main__':
databasename = "scanfiles.db"
path = './sourcedir2/'
debug = 2;
# instanciation des classes ScanFiles, ReadHeader et ConnectDB
scanfiles = ScanFiles(debug)
readHeader = ReadHeader(debug)
connectDB = ConnectDB(databasename, debug)
scanfiles.setDatabaseName(databasename)
scanfiles.processFile(path)
numberOfFiles = connectDB.countFiles()
| [
"[email protected]"
]
| |
c011b7b08fb155bd148236837436fd398b3bfe3f | 81939a436389adeaa01c421aecf6cbd6ff6dc7dc | /src/algs/gsa/operators.py | c1f348dab44a0684adfb5519500e0c9130cba1f2 | []
| no_license | HighExecutor/PyWFScheduler | 37c9ed9c60f6466e143183ca7f5f67ac1e80c40b | c80c1cbcc5c4da47f51c50b89e48458414762691 | refs/heads/master | 2022-06-24T02:48:55.054544 | 2022-06-17T21:05:42 | 2022-06-17T21:05:42 | 226,922,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,173 | py | """
1) Vector representation
2) generate() - generation of initial solutions
3) fitness(p) (with mofit field)
4) mass(p, worst, best)
5) force_vector_matrix(pop, kbest, G)
6) position(p, velocity) - function of getting new position
7) G(g, i) - changing of G-constant, where i - a number of a current iteration
8) kbest(kbest, i) - changing of kbest
"""
import random
import math
from deap.base import Fitness
from src.algs.common.MapOrdSchedule import build_schedule, MAPPING_SPECIE, ORDERING_SPECIE
from src.algs.common.utilities import mapping_as_vector
from src.core.environment.Utility import Utility
from src.experiments.cga.utilities.common import hamming_distances
from src.algs import SimpleRandomizedHeuristic
def generate(wf, rm, estimator):
sched = SimpleRandomizedHeuristic(wf, rm.get_nodes(), estimator).schedule()
return schedule_to_position(sched)
def force_vector_matrix(pop, kbest, G, e=0.0):
"""
returns matrix of VECTORS size of 'pop_size*kbest'
distance between two vectors is estimated with hamming distance
Note: pop is sorted by decreasing of goodness, so to find kbest
we only need to take first kbest of pop
"""
sub = lambda seq1, seq2: [0 if s1 == s2 else 1 for s1, s2 in zip(seq1, seq2)]
zero = lambda: [0 for _ in range(len(pop[0]))]
def estimate_force(a, b):
a_string = a.as_vector()
b_string = b.as_vector()
R = hamming_distances(a_string, b_string)
## TODO: here must be a multiplication of a vector and a number
val = (G*(a.mass*b.mass)/R + e)
f = [val * d for d in sub(a_string, b_string)]
return f
mat = [[zero() if p == b else estimate_force(p, b) for b in pop[0:kbest]] for p in pop]
return mat
def position(wf, rm, estimator, position, velocity):
## TODO: do normal architecture of relations in the first place
## TODO: rework it in an elegant way
raise NotImplementedError()
unchecked_tasks = wf.get_all_unique_tasks()
def change(d):
if d.startswith("ID"):
s = set(node.name for node in rm.get_nodes())
s.remove(d)
s = list(s)
new_name = d if len(s) == 0 else s[random.randint(0, len(s) - 1)]
else:
s = set(t.id for t in tasks)
s.remove(d)
s = [el for el in s if not el.checked]
## TODO: add condition for checking of precedence
if len(s) == 0:
## TODO: check case
new_name = d
else:
while len(s) > 0:
el = s[random.randint(0, len(s) - 1)]
task = wf.byId(el)
if all(is_checked(el) for p in task.parents):
task.checked = True
new_name = el
break
else:
s.remove(el)
pass
threshold = 0.4
new_vector = [change(d) if vd > threshold else d for vd, d in zip(velocity, position.as_vector())]
new_position = Position.from_vector(new_vector)
return new_position
def G(ginit, i, iter_number, all_iter_number=None):
ng = ginit*(1 - i/iter_number) if all_iter_number is None else ginit*(1 - i/all_iter_number)
return ng
def Kbest(kbest_init, kbest, i, iter_number, all_iter_number=None):
"""
basic implementation of kbest decreasing
"""
iter_number = iter_number if all_iter_number is None else all_iter_number
d = iter_number / kbest_init
nkbest = math.ceil(abs(kbest_init - i/d))
return nkbest
def schedule_to_position(schedule):
"""
this function converts valid schedule
to mapping and ordering strings
"""
items = lambda: iter((item, node) for node, items in schedule.mapping.items() for item in items)
if not all(i.is_unstarted() for i, _ in items()):
raise ValueError("Schedule is not valid. Not all elements have unstarted state.")
mapping = {i.job.id: n.name for i, n in items()}
ordering = sorted([i for i, _ in items()], key=lambda x: x.start_time)
ordering = [el.job.id for el in ordering]
return Position(mapping, ordering) | [
"1123581321qwe"
]
| 1123581321qwe |
a8941635b8bd727ba5cc5cf04482c0015a9fc46e | 1eb4dd793a8b9467fd45a1ac35e5446fdde6c732 | /database.py | bfce504bbd78f71b957be12256a3bc2bbc18ce3a | []
| no_license | dinowais/SteelEye | e0879be9719b230da6dbc684f72ab8f58f91ee55 | 440a8ef75c046247a7c5a2a591a6b7ca49c5e208 | refs/heads/master | 2023-04-28T18:36:44.023926 | 2021-05-23T10:15:39 | 2021-05-23T10:15:39 | 370,003,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
SQLALCHEMY_DATABASE_URL = "sqlite:///./sql_app.db"
# SQLALCHEMY_DATABASE_URL = "postgresql://user:password@postgresserver/db"
engine = create_engine(
SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
# Base.metadata.create_all(engine) | [
"[email protected]"
]
| |
5dc1dd51756ee330466da3f484fc22691e0247e4 | dd50dc51a0cde14363356245b45f6ae4508db247 | /ipnd_notes_comments.py | 4c70d55e2e5397bc5caf66389ac56220b4935987 | []
| no_license | supaheckafresh/IPND_Stage_5 | 3d06899eb02ae30db0e74f0e747f99cf433c84b3 | 5ad37bb4becf7914d5754570119ad84e181fc522 | refs/heads/master | 2016-09-05T23:27:10.209062 | 2015-06-30T19:58:10 | 2015-06-30T19:58:10 | 38,330,056 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,804 | py | from google.appengine.ext import ndb
from google.appengine.api import users
import os
import cgi
import urllib
import time
import jinja2
import webapp2
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_environment = jinja2.Environment(
loader = jinja2.FileSystemLoader(template_dir),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class Author(ndb.Model):
identity = ndb.StringProperty(indexed=True)
name = ndb.StringProperty(indexed=False)
email = ndb.StringProperty(indexed=False)
class Comment(ndb.Model):
author = ndb.StructuredProperty(Author)
content = ndb.StringProperty(indexed=False)
timestamp = ndb.DateTimeProperty(auto_now_add=True)
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
class MainPage(Handler):
def get_template_values(self, blank_comment_error=''):
comments_query = Comment.query().order(-Comment.timestamp)
comments, cursor, more = comments_query.fetch_page(25)
user = users.get_current_user()
if user:
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
else:
user = 'Anonymous Poster'
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
template_values = {
'user': user,
'comments': comments,
'url': url,
'url_linktext': url_linktext,
'blank_comment_error': blank_comment_error,
}
return template_values
def get(self):
template = jinja_environment.get_template('page_body.html')
self.write(template.render(self.get_template_values()))
def post(self):
comment = Comment()
if users.get_current_user():
comment.author = Author(
identity=users.get_current_user().user_id(),
name=users.get_current_user().nickname(),
email=users.get_current_user().email())
comment.content = self.request.get('comment')
if comment.content == '' or comment.content.isspace():
self.redirect('/error#comments')
else:
comment.put()
time.sleep(1) #TODO: get rid of this when I figure why new comments don't display on immediate redirect
self.redirect('/#comments')
class ErrorHandler(MainPage):
def get(self):
template = jinja_environment.get_template('page_body.html')
self.write(template.render(self.get_template_values('Please write a comment and resubmit')))
app = webapp2.WSGIApplication([
('/', MainPage),
('/sign', MainPage),
('/error', ErrorHandler),
], debug=True) | [
"[email protected]"
]
| |
4673777d1c1a994069de18c0acda79831f581168 | 611055f18da392e5a63b2d80ce102701201981eb | /src/apps/comentarios/admin.py | 52f74fce4df52fd09dd0fe7013e06fc2089b1463 | []
| no_license | danielhuamani/django-backbone | facf6f2ced78991577957bd2f8bb8c42255cd795 | 6523e19d8599753ccf28b6a2d4f511ec0fe0f1c7 | refs/heads/master | 2021-01-10T12:47:26.514543 | 2015-11-18T17:12:02 | 2015-11-18T17:12:02 | 45,657,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | from django.contrib import admin
from .models import Comentario
# Register your models here. #
admin.site.register(Comentario)
| [
"[email protected]"
]
| |
9298894090d4c51fc83e043ba6abe8d100c3ead7 | 7fe1869ec99c99744a3c92a44bf1cdaac43ba245 | /book_tools/pymobi/__init__.py | 66e7169e529bd92f0a5db2a7edcfd90605a12411 | [
"MIT"
]
| permissive | Aladex/sopds-fb2sax-sqlalchemy | 1fb77e11b18a1f8ce86d77eaf6a187e197692941 | 648ca37901ef3bc9478fbcf61d743a00f13cdfdc | refs/heads/master | 2022-10-27T10:50:32.479783 | 2022-10-09T04:26:02 | 2022-10-09T04:26:02 | 253,155,416 | 0 | 0 | MIT | 2022-07-06T20:34:56 | 2020-04-05T04:29:05 | Python | UTF-8 | Python | false | false | 185 | py |
__author__ = 'Yugang LIU'
__email__ = '[email protected]'
__version__ = '0.1.3'
__license__ = 'GPLv3'
from book_tools.pymobi.mobi import BookMobi
from book_tools.pymobi.util import *
| [
"[email protected]"
]
| |
de3e575f2387290b028793eb66449df3e1387a60 | c26985d3c075dbb3c0f1b9bf00d4fb2cbfcac5fe | /site_management/tests/test_urls.py | aeb6fcc9a7563cd10cb24618191a7f3b3795682b | []
| no_license | Code-Institute-Submissions/on_the_rocks | 2e20fa17a9a817ac86919e5a528f140aeb9a19d6 | 2728877f6554c7552a2ee9139740a079a2343dfd | refs/heads/master | 2023-01-02T14:57:47.037723 | 2020-10-31T23:18:35 | 2020-10-31T23:18:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,352 | py | from django.test import SimpleTestCase
from django.urls import resolve, reverse
from site_management.views import site_management, approve_review,\
delete_review, user_profile, order_history
class TestUrls(SimpleTestCase):
def test_site_management_url_resolved(self):
url = reverse('site_management')
self.assertEqual(url, '/site_management/')
self.assertEqual(resolve(url).func, site_management)
def test_approve_review_url_resolved(self):
url = reverse('approve_review', args=[2])
self.assertEqual(url, '/site_management/approve_review/2/')
self.assertEqual(resolve(url).func, approve_review)
def test_delete_review_url_resolved(self):
url = reverse('delete_review', args=[2])
self.assertEqual(url, '/site_management/delete_review/2/')
self.assertEqual(resolve(url).func, delete_review)
def test_user_profile_url_resolved(self):
url = reverse('user_profile', args=["Dave23"])
self.assertEqual(url, '/site_management/user_profile/Dave23/')
self.assertEqual(resolve(url).func, user_profile)
def test_order_history_url_resolved(self):
url = reverse('order_history', args=["2d456tsd"])
self.assertEqual(url, '/site_management/order_history/2d456tsd/')
self.assertEqual(resolve(url).func, order_history)
| [
"[email protected]"
]
| |
e8daaf5d3be0632b16552f307402e1e84024c44b | 55ad15f616ff7e9f20a7d448b8ea6193de7dc586 | /subprocess_wrapper.py | 90ef592e8899e8593dec9c7dad2ad62bbc30b9f4 | []
| no_license | sammac/samstools | 0abcbcd0ba4ee404cc66c384f8624f84c983bf81 | 0051623ab0125a4bdf767d1e62c946b44b24d2b8 | refs/heads/master | 2020-04-22T23:48:23.524621 | 2013-09-05T22:39:07 | 2013-09-05T22:39:07 | 10,346,553 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,093 | py | #!/usr/bin/env python
from subprocess import Popen as sp_Popen, PIPE as sp_PIPE
from threading import Thread as th_Thread
class SPWrapper(object):
"""Supply a command, then run, specifiying timeout"""
def __init__(self, cmd):
self.cmd = cmd
self.process = None
self.timed_out = None
def run(self, timeout=None):
"""Runs subprocess in a thread with optional timeout.
Returns (returncode,stdout,stderr,timed_out)"""
def target():
self.process = sp_Popen(self.cmd, stdout=sp_PIPE, stderr=sp_PIPE,
shell=True)
self.output = self.process.communicate()
thread = th_Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
try:
self.timed_out = True
self.process.terminate()
thread.join()
except:
pass
else:
self.timed_out = False
return self.process.returncode,self.output[0],self.output[1],
self.timed_out
| [
"[email protected]"
]
| |
ee5d9088a648e83c220c2dc7e4f83db84f9ab93e | f02e654d5590a861804e3220ed76ba2192e1699b | /aslam/deprecated/ASLAM/deprecated/old2/test.py | bdc9562460aa075503b52776c3db9d3ae345080c | [
"MIT",
"BSD-3-Clause"
]
| permissive | AmarNathH/software | 73e2afd3affaf2c1595b406480edac8b8fb2fcac | e225810c7501250f48add43349a64f49450cc79f | refs/heads/master | 2020-12-02T20:50:18.439874 | 2017-07-03T16:51:07 | 2017-07-03T16:51:07 | 96,219,939 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | #!/usr/bin/env python2.7
from classes import *
import numpy as n
S = State(5, 1, 5, 1)
for x in range(5): S.objects[str(x)] = Object()
S.update()
S.hObs('0', 45, 5)
S.dObs('0', 10**(1./2), 0.5)
S.update()
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection = '3d')
pmap = S.objects['0'].pmap
xv, yv, zv = [], [], []
for x in range(len(pmap)):
for y in range(len(pmap[0])):
xv += [x / GRIDSCALE]
yv += [y / GRIDSCALE]
zv += [pmap[x][y]]
ax.scatter(xv, yv, zv)
plt.show()
#for i in range(len(x)):
# for j in range(len(y)):
| [
"[email protected]"
]
| |
d62e8f7b4422bd4d811bf2f519efdfd6bdc74feb | 9b07d27c172787358836f29f8c7c2d489ecac5e7 | /nCrawler.py | 4283658b3d81594c78a16cd6e017b17a4fc5008d | []
| no_license | cassar1/Crawler | cb05b91b2477a63c9e647df342855ebbef57465d | 0bf5effe7d0af0d33628f2413f009448c0fb343a | refs/heads/master | 2021-01-13T01:39:58.514385 | 2014-03-20T11:55:15 | 2014-03-20T11:55:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22 | py | __author__ = 'Jurgen'
| [
"[email protected]"
]
| |
3eb31eaae7cb87b8e84c1b0b56b39c193af3feee | 1b598ee68cd6d840990298f470cbd9ff60ccd01b | /tests/test_face_cascade.py | c0d650a2b4a148e7f4ef71f7a2fd971bf1b29f05 | [
"MIT"
]
| permissive | luizcarloscf/cv2utils | 1cfbbc24bc8c4bf319868dd1a95746f5d93002be | d5efdb8c8e9d86529b235384a90ba53266bc4668 | refs/heads/master | 2023-02-16T08:34:26.243785 | 2023-02-02T13:55:50 | 2023-02-02T13:55:50 | 207,383,388 | 1 | 1 | MIT | 2023-02-02T13:55:52 | 2019-09-09T19:01:25 | Python | UTF-8 | Python | false | false | 915 | py | import cv2
import pytest
from cv2utils import FaceCascade
def test_detect_faces():
image = cv2.imread("face.jpg")
face_detector = FaceCascade()
result = face_detector.detect_faces(image)
assert len(result) == 1
assert type(result[0]) is dict
assert 'box' in result[0]
assert 'label' in result[0]
assert type(result[0]['box']) is list
assert all([True if type(i) is int else False for i in result[0]['box']]) is True
assert type(result[0]['label']) is str
assert len(result[0]['box']) == 4
def test_invalid_image():
not_image = cv2.imread("requirements-test.txt")
face_detector = FaceCascade()
with pytest.raises(ValueError):
face_detector.detect_faces(not_image)
def test_no_face():
face_detector = FaceCascade()
no_face = cv2.imread("no_face.jpg")
result = face_detector.detect_faces(no_face)
assert len(result) == 0
| [
"[email protected]"
]
| |
76347a0bc807d2e3b00e30fef2748954370b3171 | 99c4d4a6592fded0e8e59652484ab226ac0bd38c | /code/batch-2/dn5 - tviti/M-17135-2263.py | 9deedfe8d85e527c7a5c6e89ba8391269f3c8492 | []
| no_license | benquick123/code-profiling | 23e9aa5aecb91753e2f1fecdc3f6d62049a990d5 | 0d496d649247776d121683d10019ec2a7cba574c | refs/heads/master | 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,569 | py | def unikati(s):
sez = []
for x in s:
if x not in sez:
sez.append(x)
return sez
def avtor(tvit):
a = ""
for x in range(len(tvit)):
if tvit[x] == ":":
break
else:
a += tvit[x]
return a
def izloci_besedo(beseda):
beseda_1 = ""
for x in range(len(beseda)):
if beseda[x].isalnum() == True:
beseda_1 += beseda[x]
elif beseda[x] == "-" and beseda[x-1].isalnum() == True and beseda[x+1].isalnum() == True:
beseda_1 += beseda[x]
return beseda_1
def vsi_avtorji(tviti):
sez = []
for x in tviti:
avtor_ime = avtor(x)
if avtor_ime not in sez:
sez.append(avtor_ime)
return sez
def se_zacne_z(tvit, c):
sez = tvit.split()
sez_besed = []
for x in sez:
if x[0] == c:
if x[-1].isalnum() == True:
sez_besed.append(x[1:])
else:
sez_besed.append(x[1:-1])
return sez_besed
def vse_afne(tviti):
sez_imen = []
for x in tviti:
besede = x.split()
for x in besede:
if x[0] == "@":
if x[-1].isalnum() == True:
if x[1:] not in sez_imen:
sez_imen.append(x[1:])
else:
if x[1:-1] not in sez_imen:
sez_imen.append(x[1:-1])
return sez_imen
def vse_osebe(tviti):
sez = vse_afne(tviti)
sez_imen = vsi_avtorji(tviti)
n = 0
for x in range(len(sez)):
if sez[n] not in sez_imen:
sez_imen.append(sez[n])
n += 1
sez = sorted(sez_imen)
return sez
def vsi_hashtagi(tviti):
sez = []
for x in tviti:
besede = x.split()
for x in besede:
if x[0] == "#":
if x[-1].isalnum() == True:
if x[1:] not in sez:
sez.append(x[1:])
else:
if x[1:-1] not in sez:
sez.append(x[1:-1])
return sez
def zberi_se_zacne_z(tviti, c):
sez_besed = []
for x in tviti:
sez = x.split()
for x in sez:
if x[0] == c:
if x[-1].isalnum() == True:
if x[1:] not in sez_besed:
sez_besed.append(x[1:])
else:
if x[1:-1] not in sez_besed:
sez_besed.append(x[1:-1])
return sez_besed
def custva(tviti, hashtagi):
sez_imen = []
for x in tviti:
sez = x.split()
avtor = sez[0][:-1]
for x in sez:
if x[0] == "#":
if x[1:] in hashtagi and avtor not in sez_imen:
sez_imen.append(avtor)
return sorted(sez_imen)
def se_poznata(tviti, oseba1, oseba2):
zakljucek = False
sez = [oseba1, oseba2]
for x in sez:
for y in tviti:
besede = y.split()
for s in besede:
sez_besed = []
if s[0] == "@":
if besede[0][:-1] == x:
if s[-1].isalnum() == True:
if s[1:] not in sez_besed:
sez_besed.append(s[1:])
else:
if s[1:-1] not in sez_besed:
sez_besed.append(s[1:-1])
for d in sez_besed:
if x == oseba1:
if oseba2 in sez_besed:
zakljucek = True
else:
if oseba1 in sez_besed:
zakljucek = True
return zakljucek
import unittest
class TestTviti(unittest.TestCase):
tviti = [
"sandra: Spet ta dež. #dougcajt",
"berta: @sandra Delaj domačo za #programiranje1",
"sandra: @berta Ne maram #programiranje1 #krneki",
"ana: kdo so te @berta, @cilka, @dani? #krneki",
"cilka: jst sm pa #luft",
"benjamin: pogrešam ano #zalosten",
"ema: @benjamin @ana #split? po dvopičju, za začetek?",
]
def test_unikat(self):
self.assertEqual(unikati([1, 2, 1, 1, 3, 2]), [1, 2, 3])
self.assertEqual(unikati([1, 3, 2, 1, 1, 3, 2]), [1, 3, 2])
self.assertEqual(unikati([1, 5, 4, 3, 2]), [1, 5, 4, 3, 2])
self.assertEqual(unikati([1, 1, 1, 1, 1]), [1])
self.assertEqual(unikati([1]), [1])
self.assertEqual(unikati([]), [])
self.assertEqual(unikati(["Ana", "Berta", "Cilka", "Berta"]), ["Ana", "Berta", "Cilka"])
def test_avtor(self):
self.assertEqual(avtor("janez: pred dvopičjem avtor, potem besedilo"), "janez")
self.assertEqual(avtor("ana: malo krajse ime"), "ana")
self.assertEqual(avtor("benjamin: pomembne so tri stvari: prva, druga in tretja"), "benjamin")
def test_vsi_avtorji(self):
self.assertEqual(vsi_avtorji(self.tviti), ["sandra", "berta", "ana", "cilka", "benjamin", "ema"])
self.assertEqual(vsi_avtorji(self.tviti[:3]), ["sandra", "berta"])
def test_izloci_besedo(self):
self.assertEqual(izloci_besedo("@ana"), "ana")
self.assertEqual(izloci_besedo("@@ana!!!"), "ana")
self.assertEqual(izloci_besedo("ana"), "ana")
self.assertEqual(izloci_besedo("!#$%\"=%/%()/Ben-jamin'"), "Ben-jamin")
def test_vse_na_crko(self):
self.assertEqual(se_zacne_z("Benjamin $je $skocil! Visoko!", "$"), ["je", "skocil"])
self.assertEqual(se_zacne_z("Benjamin $je $skocil! #Visoko!", "$"), ["je", "skocil"])
self.assertEqual(se_zacne_z("ana: kdo so te @berta, @cilka, @dani? #krneki", "@"), ["berta", "cilka", "dani"])
def test_zberi_na_crko(self):
self.assertEqual(zberi_se_zacne_z(self.tviti, "@"), ['sandra', 'berta', 'cilka', 'dani', 'benjamin', 'ana'])
self.assertEqual(zberi_se_zacne_z(self.tviti, "#"), ['dougcajt', 'programiranje1', 'krneki', 'luft', 'zalosten', 'split'])
def test_vse_afne(self):
self.assertEqual(vse_afne(self.tviti), ['sandra', 'berta', 'cilka', 'dani', 'benjamin', 'ana'])
def test_vsi_hashtagi(self):
self.assertEqual(vsi_hashtagi(self.tviti), ['dougcajt', 'programiranje1', 'krneki', 'luft', 'zalosten', 'split'])
def test_vse_osebe(self):
self.assertEqual(vse_osebe(self.tviti), ['ana', 'benjamin', 'berta', 'cilka', 'dani', 'ema', 'sandra'])
class TestDodatna(unittest.TestCase):
tviti = [
"sandra: Spet ta dež. #dougcajt",
"berta: @sandra Delaj domačo za #programiranje1",
"sandra: @berta Ne maram #programiranje1 #krneki",
"ana: kdo so te @berta, @cilka, @dani? #krneki",
"cilka: jst sm pa #luft",
"benjamin: pogrešam ano #zalosten",
"ema: @benjamin @ana #split? po dvopičju, za začetek?",
]
def test_custva(self):
self.assertEqual(custva(self.tviti, ["dougcajt", "krneki"]), ["ana", "sandra"])
self.assertEqual(custva(self.tviti, ["luft"]), ["cilka"])
self.assertEqual(custva(self.tviti, ["meh"]), [])
def test_se_poznata(self):
self.assertTrue(se_poznata(self.tviti, "ana", "berta"))
self.assertTrue(se_poznata(self.tviti, "ema", "ana"))
self.assertFalse(se_poznata(self.tviti, "sandra", "ana"))
self.assertFalse(se_poznata(self.tviti, "cilka", "luft"))
self.assertFalse(se_poznata(self.tviti, "cilka", "balon"))
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
]
| |
49af3d314c37c310a9fcc94f87b731f8337355e4 | f75e668cd21924f11e39d79eab978574d4f223c1 | /esframework/tests/event_handling/event_bus_test.py | c8b572037ad54bc08c2fd744e2248152429998a5 | []
| no_license | weemen/esframework | 141b2723214f11e11aec43d49ff98142b5f11eba | 044d9e12bca274b4ca52ccb0058b44e47beabbfd | refs/heads/master | 2020-03-28T19:31:18.872323 | 2019-01-12T17:16:21 | 2019-01-12T17:16:21 | 148,983,855 | 1 | 0 | null | 2019-01-12T17:16:22 | 2018-09-16T10:10:30 | Python | UTF-8 | Python | false | false | 3,184 | py | import unittest
from esframework.event_handling.event_bus import BasicBus
from esframework.exceptions import EventBusException
from esframework.tests.assets import EventA
from esframework.tests.assets.event_handling import SimpleEventListener
class TestBasicBus(unittest.TestCase):
""" testing the basic bus features """
def test_it_can_subscribe_listeners(self):
event_listener = SimpleEventListener()
event_bus = BasicBus()
event_bus.subscribe(event_listener)
self.assertEqual(1, len(event_bus.get_event_listeners()))
def test_it_can_unsubscribe_listeners(self):
event_listener = SimpleEventListener()
event_bus = BasicBus()
event_bus.subscribe(event_listener)
event_bus.unsubscribe(event_listener)
self.assertEqual(0, len(event_bus.get_event_listeners()))
def test_it_can_emit_a_stream_of_events(self):
event_listener = SimpleEventListener()
event_bus = BasicBus()
event_bus.subscribe(event_listener)
stream_of_events = [
EventA("792E4DDA-5AE2-4BF3-A834-62D09892DC62", "foo"),
EventA("EC407041-8454-44E2-873F-951B227B3BFB", "bar")
]
event_bus.emit(stream_of_events)
self.assertEqual(2, len(event_listener.get_received_messages()))
def test_it_cannot_subscribe_to_non_event_listeners(self):
event_bus = BasicBus()
listener = object()
with self.assertRaises(EventBusException) as ex:
event_bus.subscribe(listener)
self.assertEqual(
str(ex.exception),
"Only classes based on EventListener can subscribe")
def test_it_cannot_unsubscribe_from_non_event_listeners(self):
event_bus = BasicBus()
listener = object()
with self.assertRaises(EventBusException) as ex:
event_bus.unsubscribe(listener)
self.assertEqual(
str(ex.exception),
"Only classes based on EventListener can unsubscribe")
def test_it_cannot_unsubscribe_a_non_existing_event_listener(self):
event_listener = SimpleEventListener()
event_bus = BasicBus()
with self.assertRaises(EventBusException) as ex:
event_bus.unsubscribe(event_listener)
self.assertEqual(
str(ex.exception),
"Cannot unsubscribe non existing listener from list")
def test_it_can_only_emit_a_list_as_stream(self):
event_listener = SimpleEventListener()
event_bus = BasicBus()
event_bus.subscribe(event_listener)
with self.assertRaises(EventBusException) as ex:
event_bus.emit(object())
self.assertEqual(
str(ex.exception),
"event stream must be a list")
def test_it_can_only_emit_a_list_with_domain_events(self):
event_listener = SimpleEventListener()
event_bus = BasicBus()
event_bus.subscribe(event_listener)
with self.assertRaises(EventBusException) as ex:
event_bus.emit([
1
])
self.assertEqual(
str(ex.exception),
"domain event must be of type DomainEvent")
| [
"[email protected]"
]
| |
c37a39159451217ffd7944df64ecb8052010e5ef | 8d18a96e67798938c64a5a05e1338820679be6b5 | /DocApp/main/migrations/0002_auto_20200501_2121.py | 1939408f6fcd56c8b55977c4ff8e998cbb0f5ca2 | []
| no_license | vishwaspuri/DocApp | 043a59805558e9dc0a87e1bcee77ef6e3b854481 | 0afd2c653f4e1a07d5ca413c7b9c4b88f09858e4 | refs/heads/master | 2022-09-06T06:52:17.401280 | 2020-05-14T09:11:53 | 2020-05-14T09:11:53 | 255,527,876 | 0 | 7 | null | 2020-05-26T19:00:20 | 2020-04-14T06:25:33 | JavaScript | UTF-8 | Python | false | false | 1,208 | py | # Generated by Django 3.0.5 on 2020-05-01 21:21
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='profile',
name='is_provider',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='profile',
name='id',
field=models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False),
),
migrations.CreateModel(
name='Connection',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('created_on', models.DateTimeField(auto_now_add=True)),
('provider', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='connections', to='main.Profile')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='userconnections', to='main.Profile')),
],
),
]
| [
"[email protected]"
]
| |
56f8397cd80c31bf0258a6c8726c43dfa3581ba0 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5688567749672960_1/Python/Jakube/A.py | 4b619d8aaec440fa7678ace44a08aae319de1d8e | []
| no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,162 | py | def splitter(number):
s = str(number)
return int(s[:len(s)//2] or "0"), int(s[len(s)//2:]), len(s[len(s)//2:])
def compute(number):
steps = 0
while number:
# get second part of the number
half1, half2, l = splitter(number)
if half2 == 0:
steps += 1
number -= 1
half1, half2, l = splitter(number)
steps += half2 - 1
number -= half2 -1
number = half1 * 10**l + 1
if number == 1:
return steps + 1
# switch
if str(number) != str(number)[::-1]:
number = int(str(number)[::-1])
steps += 1
mi = int(str(number)[1:] or str(number))
number -= mi
steps += mi
def read_number(f):
return int(f.readline().strip())
def main():
with open('A-large.in', 'r') as f:
test_cases = read_number(f)
for test_case in range(test_cases):
number = read_number(f)
#print(number)
print('Case #{}: {}'.format(test_case + 1, compute(number)))
if __name__ == '__main__':
main() | [
"[email protected]"
]
| |
d0ac595a122ecd472ef080d0b8bd510635b637ea | 6fab6422c26e00cde21f51f8f10eb88ff5c458af | /api/serializers.py | 51f7198a3741d3245a04b600aeef1d4bc543c61a | []
| no_license | nicksonlangat/alzy-api | ffa3f43198fa0a6e8f58b88ae3f206e4c69f6cfb | d4a95da469d0895eb0c8a2897f3927e61da89aa9 | refs/heads/master | 2023-02-28T08:36:15.600769 | 2021-02-09T22:37:03 | 2021-02-09T22:37:03 | 337,120,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 930 | py | from django.contrib.auth.models import User
from rest_framework import serializers
from .models import *
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email', 'password')
extra_kwargs = {'password' : {'write_only': True, 'required': True}}
def create(self, validated_data):
user = User.objects.create_user(**validated_data)
return user
class ReminderSerializer(serializers.ModelSerializer):
class Meta:
model = Reminder
fields = ('id', 'title','details','deadline',)
def create(self, validated_data): #overwrite built in create fn.
# create new instance of the model
reminder=Reminder.objects.create(**validated_data)
return reminder
class FileSerializer(serializers.ModelSerializer):
class Meta:
model = File
fields = "__all__" | [
"[email protected]"
]
| |
10661eaf4db62200c682b0ed7858c28b09a06815 | 41e36b381affe7e9c6ed0681b7eba99b6e96f8f8 | /backend/grocery_assistant/grocery_assistant/tests/test_recipes_post.py | 7546eacaf12bcc4dd6693082eb7447a6748103d1 | []
| no_license | vbifaa/foodgram-project-react | 8dc321d968ec064bcc76aede2a21769837a0ba35 | 30db5ef4968c723f3b664a614ed187e22f2015f7 | refs/heads/master | 2023-07-11T21:17:55.291522 | 2021-08-22T18:14:53 | 2021-08-22T18:14:53 | 388,137,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,252 | py | from .data.set_data import SetOneRecipeData
class TestPostRecipe(SetOneRecipeData):
def test_correct_post_recipe(self):
response = self.auth_client.post(
'/api/recipes/', self.recipes[self.recipe_id], format='json'
)
self.assert_correct_recipe_response(
response=response,
status_code=201,
correct_data=self.get_recipe_response_data(
recipe_id=self.recipe_id,
author_response_data=self.author_data
)
)
def test_not_auth_post_recipe(self):
response = self.guest_client.post(
'/api/recipes/', self.recipes[self.recipe_id], format='json'
)
self.assert_bad_request(
response=response,
status_code=401,
schema_field='detail'
)
def test_post_recipe_with_not_exist_tag(self):
recipe = self.recipes[self.recipe_id]
recipe['tags'].append(len(self.tags) + 1)
response = self.auth_client.post(
'/api/recipes/', self.recipes[self.recipe_id], format='json'
)
self.assert_bad_request(
response=response,
status_code=400,
schema_field='tags'
)
| [
"[email protected]"
]
| |
f3c46d47d4582718dfb6dd5b01fc9693777fc6bd | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/network/azure-mgmt-dns/azure/mgmt/dns/v2023_07_01_preview/aio/_dns_management_client.py | 27d21876b1846ae591194de288047cefd6a1b680 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
]
| permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 5,306 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models as _models
from ..._serialization import Deserializer, Serializer
from ._configuration import DnsManagementClientConfiguration
from .operations import DnsResourceReferenceOperations, DnssecConfigsOperations, RecordSetsOperations, ZonesOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class DnsManagementClient: # pylint: disable=client-accepts-api-version-keyword
"""The DNS Management Client.
:ivar dnssec_configs: DnssecConfigsOperations operations
:vartype dnssec_configs:
azure.mgmt.dns.v2023_07_01_preview.aio.operations.DnssecConfigsOperations
:ivar record_sets: RecordSetsOperations operations
:vartype record_sets: azure.mgmt.dns.v2023_07_01_preview.aio.operations.RecordSetsOperations
:ivar zones: ZonesOperations operations
:vartype zones: azure.mgmt.dns.v2023_07_01_preview.aio.operations.ZonesOperations
:ivar dns_resource_reference: DnsResourceReferenceOperations operations
:vartype dns_resource_reference:
azure.mgmt.dns.v2023_07_01_preview.aio.operations.DnsResourceReferenceOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2023-07-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = DnsManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client: AsyncARMPipelineClient = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.dnssec_configs = DnssecConfigsOperations(
self._client, self._config, self._serialize, self._deserialize, "2023-07-01-preview"
)
self.record_sets = RecordSetsOperations(
self._client, self._config, self._serialize, self._deserialize, "2023-07-01-preview"
)
self.zones = ZonesOperations(
self._client, self._config, self._serialize, self._deserialize, "2023-07-01-preview"
)
self.dns_resource_reference = DnsResourceReferenceOperations(
self._client, self._config, self._serialize, self._deserialize, "2023-07-01-preview"
)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "DnsManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details: Any) -> None:
await self._client.__aexit__(*exc_details)
| [
"[email protected]"
]
| |
e8bad14d95e08fc8e990e74f3bdf81de17ebc718 | 23b7fa714698be444d82ac649314616495c66235 | /petl/transform/__init__.py | 5dac1f6cd765844d320a78b291999fef24a54ef6 | [
"MIT"
]
| permissive | mbelmadani/petl | a38ed1e595157fb556fe86ae32e796f6eff60a7a | b6867f056bf44d699f8f7b8432769e4b5127e937 | refs/heads/master | 2021-04-03T09:04:56.785188 | 2019-08-06T15:09:40 | 2019-08-06T15:09:40 | 124,597,339 | 0 | 0 | MIT | 2018-03-09T21:53:44 | 2018-03-09T21:53:44 | null | UTF-8 | Python | false | false | 2,444 | py | from __future__ import absolute_import, print_function, division
from petl.transform.basics import cut, cutout, movefield, cat, annex, \
addfield, addfieldusingcontext, addrownumbers, addcolumn, rowslice, head, \
tail, skipcomments, stack
from petl.transform.headers import rename, setheader, extendheader, \
pushheader, skip, prefixheader, suffixheader, sortheader
from petl.transform.conversions import convert, convertall, replace, \
replaceall, update, convertnumbers, format, formatall, interpolate, \
interpolateall
from petl.transform.sorts import sort, mergesort, issorted
from petl.transform.selects import select, selectop, selectcontains, \
selecteq, selectfalse, selectge, selectgt, selectin, selectis, \
selectisinstance, selectisnot, selectle, selectlt, selectne, selectnone, \
selectnotin, selectnotnone, selectrangeclosed, selectrangeopen, \
selectrangeopenleft, selectrangeopenright, selecttrue, \
selectusingcontext, rowlenselect, facet, biselect
from petl.transform.joins import join, leftjoin, rightjoin, outerjoin, \
crossjoin, antijoin, lookupjoin, unjoin
from petl.transform.hashjoins import hashjoin, hashleftjoin, hashrightjoin, \
hashantijoin, hashlookupjoin
from petl.transform.reductions import rowreduce, mergeduplicates,\
aggregate, groupcountdistinctvalues, groupselectfirst, groupselectmax, \
groupselectmin, merge, fold, Conflict, groupselectlast
from petl.transform.fills import filldown, fillright, fillleft
from petl.transform.regex import capture, split, search, searchcomplement, \
sub
from petl.transform.reshape import melt, recast, transpose, pivot, flatten, \
unflatten
from petl.transform.maps import fieldmap, rowmap, rowmapmany, rowgroupmap
from petl.transform.unpacks import unpack, unpackdict
from petl.transform.dedup import duplicates, unique, distinct, conflicts, \
isunique
from petl.transform.setops import complement, intersection, \
recordcomplement, diff, recorddiff, hashintersection, hashcomplement
from petl.transform.intervals import intervaljoin, intervalleftjoin, \
intervaljoinvalues, intervalantijoin, intervallookup, intervallookupone, \
intervalrecordlookup, intervalrecordlookupone, intervalsubtract, \
facetintervallookup, facetintervallookupone, facetintervalrecordlookup, \
facetintervalrecordlookupone, collapsedintervals
from petl.transform.validation import validate
| [
"[email protected]"
]
| |
bc72cc0f0343ca37bc40790a466c5e2c0b09be43 | 2f46c6463d4f871a72d4296c3dae00f029e892f1 | /src/cogent3/maths/stats/jackknife.py | 33192edc584ffa4dc6506935473a1e778893a7bd | [
"BSD-3-Clause"
]
| permissive | BrendanBeaton/cogent3 | a09376c55f24da837690219157770ad94e917579 | e10f4f933921d52b000096b7c016190a1602add6 | refs/heads/master | 2022-12-02T07:59:11.112306 | 2020-06-30T05:40:33 | 2020-06-30T05:40:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,782 | py | import numpy as np
from cogent3.util.table import Table
__author__ = "Anuj Pahwa, Gavin Huttley"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = ["Anuj Pahwa", "Gavin Huttley"]
__license__ = "BSD-3"
__version__ = "2020.6.30a"
__maintainer__ = "Gavin Huttley"
__email__ = "[email protected]"
__status__ = "Production"
def index_gen(length):
data = tuple(range(length))
def gen(i):
temp = list(data)
temp.pop(i)
return temp
return gen
class JackknifeStats(object):
"""Computes the jackknife statistic for a particular statistical function
as outlined by 'Tukey's Jackknife Method' Biometry by Sokal/Rohlf."""
def __init__(self, length, calc_stat, gen_index=index_gen):
"""
Parameters
----------
length : int
The length of the data set (since data is not passed to this class).
calc_stat : callable
A callback function that computes the required statistic of a defined dataset.
gen_index
A callback function that generates a list of indices that are used to sub-sample the dataset.
"""
super(JackknifeStats, self).__init__()
self.n = length
self.calc_stat = calc_stat
self.gen_index = gen_index(self.n)
self._subset_statistics = None
self._pseudovalues = None
self._jackknifed_stat = None
self._sample_statistic = None
self._standard_error = None
def jackknife(self):
"""Computes the jackknife statistics and standard error"""
n = self.n
n_minus_1 = n - 1
# compute the statistic in question on the whole data set
self._sample_statistic = self.calc_stat(list(range(self.n)))
n_sample_statistic = n * self._sample_statistic
# compute the jackknife statistic for the data by removing an element
# in each iteration and computing the statistic.
subset_statistics = []
pseudovalues = []
for index in range(self.n):
stat = self.calc_stat(self.gen_index(index))
subset_statistics.append(stat)
pseudovalue = n_sample_statistic - n_minus_1 * stat
pseudovalues.append(pseudovalue)
self._pseudovalues = np.array(pseudovalues)
self._subset_statistics = np.array(subset_statistics)
self._jackknifed_stat = self._pseudovalues.mean(axis=0)
# Compute the approximate standard error of the jackknifed estimate
# of the statistic
variance = np.square(self._pseudovalues - self._jackknifed_stat).sum(axis=0)
variance_norm = np.divide(variance, n * n_minus_1)
self._standard_error = np.sqrt(variance_norm)
@property
def sample_stat(self):
if self._sample_statistic is None:
self.jackknife()
return self._sample_statistic
@property
def jackknifed_stat(self):
if self._jackknifed_stat is None:
self.jackknife()
return self._jackknifed_stat
@property
def standard_error(self):
if self._standard_error is None:
self.jackknife()
return self._standard_error
@property
def sub_sample_stats(self):
"""Return a table of the sub-sample statistics"""
# if the statistics haven't been run yet.
if self._subset_statistics is None:
self.jackknife()
# generate table
title = "Subsample Stats"
rows = []
for index in range(self.n):
row = [index]
subset_statistics = self._subset_statistics[index]
try:
for value in subset_statistics:
row.append(value)
except TypeError:
row.append(subset_statistics)
rows.append(row)
header = ["i"]
subset_stats = self._subset_statistics[0]
try:
num_datasets = len(subset_stats)
for i in range(num_datasets):
header.append("Stat_%s-i" % i)
except TypeError:
header.append("Stat-i")
return Table(data=rows, header=header, title=title)
@property
def pseudovalues(self):
"""Return a table of the Pseudovalues"""
# if the statistics haven't been run yet.
if self._pseudovalues is None:
self.jackknife()
# detailed table
title = "Pseudovalues"
rows = []
for index in range(self.n):
row = [index]
pseudovalues = self._pseudovalues[index]
try:
for value in pseudovalues:
row.append(value)
except TypeError:
row.append(pseudovalues)
rows.append(row)
header = ["i"]
pseudovalues = self._pseudovalues[0]
try:
num_datasets = len(pseudovalues)
for i in range(num_datasets):
header.append("Pseudovalue_%s-i" % i)
except TypeError:
header.append("Pseudovalue-i")
return Table(data=rows, header=header, title=title)
@property
def summary_stats(self):
"""Return a summary table with the statistic value(s) calculated for the
the full data-set, the jackknife statistics and standard errors."""
# if the statistics haven't been run yet.
if self._jackknifed_stat is None:
self.jackknife()
header = ["Sample Stat", "Jackknife Stat", "Standard Error"]
title = "Summary Statistics"
rows = np.vstack(
(self._sample_statistic, self._jackknifed_stat, self._standard_error)
)
rows = rows.transpose()
return Table(header=header, data=rows, title=title)
| [
"[email protected]"
]
| |
4d110ba899e09c73a7e36c1890edd98ee87c735a | 8a5a3f1ad1bdc659b74c9899888853c987047507 | /agile_project/agile_project/settings.py | 9e272ca414edeac3218f050326117ebd1314e2ce | []
| no_license | tomaszblawucki/stan-gregorsky | c126dc78193b2a1c6eb69a124680bfc3f6c318a8 | 25784304b8b8e0633308f7400b4d70858af6d9bc | refs/heads/master | 2020-04-23T16:25:13.702519 | 2019-06-10T14:23:53 | 2019-06-10T14:23:53 | 171,297,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,887 | py | """
Django settings for agile_project project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e8z01pr1la8!&w8z6hj)clu1_r&*4jp-y#f-w8j4kk@@t#15si'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['10.0.2.2', 'localhost']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#rest framework
'rest_framework',
'rest_framework.authtoken',
#project apps
'events_app',
'group_management_app',
'ideas_app',
'messages_app',
'tasks_app',
'users_app',
]
# c4c15f3c192281c6e5bc46a726f80229c220f6f2
AUTH_USER_MODEL = 'users_app.User'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'agile_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'agile_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication', # <-- And here
],
}
# Email service configuration
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'agileappsmtp'
EMAIL_HOST_PASSWORD = 'maszynaturinga'
EMAIL_PORT = 587
| [
"[email protected]"
]
| |
44adf6b6092d9e48ed92769805836377be265962 | 2b694821034ac1415b7610849a58af92e8af3f64 | /main.py | 1a8062ba765b25800027bdc97d89b6a9423395ac | []
| no_license | HermesSJC/PythonTranslate | 25d1d4bc7a77ccaa9caa8c0f48da401eb120cb3a | 14c0df89ef4b554908e34cd8811a776374edf695 | refs/heads/master | 2020-04-12T19:37:47.266709 | 2019-04-16T05:20:12 | 2019-04-16T05:20:12 | 162,713,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | from MainWindow import MainWindow
from PySide2.QtWidgets import QApplication
import sys
def main():
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main() | [
"[email protected]"
]
| |
862c81b2ef0de78fa644bf52c982c1328e8d71a2 | 8d9b037e466dd18e878bac7e42a1d0ef25097161 | /metrics/generate_samples.py | a5683e4ce9e5120c36f30ccf8ffb13f2bed3355b | []
| no_license | klory/MPG_Arxiv | fa4946f14b9a0deb2523b0bdbdfa37abc8f47205 | 4cd5b29b2d4640c49b3fc92d04131bf33aec250a | refs/heads/main | 2023-08-27T02:01:53.104604 | 2021-11-03T23:32:01 | 2021-11-03T23:32:01 | 406,527,697 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,986 | py | import torch
import os
import pdb
import numpy as np
import sys
sys.path.append('../')
import common
def randomize_ingr_label(bs):
ingr_label = torch.zeros(bs, 10)
for i in range(bs):
idxs = np.random.choice(10, np.random.randint(4), replace=False)
ingr_label[i, idxs] = 1.0
return ingr_label
if __name__ == '__main__':
from metrics.utils import load_args
args = load_args()
# assertations
assert 'ckpt_path' in args.__dict__
assert 'device' in args.__dict__
assert 'batch_size' in args.__dict__
assert 'model_name' in args.__dict__
cur_dir = os.path.dirname(os.path.realpath(__file__))
print('current file dir:', cur_dir)
if 'stackgan2/' in args.ckpt_dir:
from stackgan2.generate_batch import BatchGenerator
os.chdir('../stackgan2/')
elif 'AttnGAN/' in args.ckpt_dir:
from AttnGAN.code.generate_batch_Attn import BatchGenerator
os.chdir('../AttnGAN/code/')
elif 'mpg/' in args.ckpt_dir:
assert 'truncation' in args.__dict__
from mpg.generate_batch import BatchGenerator
os.chdir('../mpg/')
device = args.device
seed = args.seed
torch.manual_seed(seed)
np.random.seed(seed)
args.ckpt_path = common.ROOT / args.ckpt_path
batch_generator = BatchGenerator(args)
os.chdir(cur_dir)
save_dir = f'outputs/seed={seed}'
os.makedirs(save_dir, exist_ok=True)
stem = args.ckpt_path.stem
# ****************************************************************
# Generate some images
# ****************************************************************
print('generating images...')
txt, real, fake, label = batch_generator.generate_all()
fp = f'{save_dir}/{args.model_name}={stem}_trunc={args.truncation:.2f}.png'
common.save_captioned_image(txt, fake, fp, font=15, opacity=0.2, color=(255,255,0), loc=(0,0), nrow=int(np.sqrt(args.batch_size)), pad_value=1)
print(f'saved to {fp}') | [
"[email protected]"
]
| |
2c7f52b950e7ba35aa93cd70115be97c7e9623a2 | 2c6ecf3feb62fda6753cac49be6d8cee25740dae | /venv/Scripts/easy_install-script.py | 2ae9ecd6aed1ce6921f4f9523d463ea8bd9fa501 | []
| no_license | ce-shop/CeShopAdminAPI | 4b6a796e588c4374a5b3bc0ced9fb63a0b9f31c0 | 44ee5d0732a474a1e4c67ac5a3012194d897399f | refs/heads/master | 2020-12-21T02:39:23.027961 | 2020-01-26T07:24:16 | 2020-01-26T07:24:16 | 236,280,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | #!D:\www\CeShopAdminAPI\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"[email protected]"
]
| |
ed098e93dfb8cbb2fbfaa7a1aaafa82196b49f69 | 557dc2a440c9d8fb6fb76b6256bea9fc5853213a | /3-7-1ddd.py | a89d2f72c2b45f9812e9e05031221aae47231996 | []
| no_license | dashiki78/python_section2 | f5d6e760116dbda7836e082880534147398bd65c | a54fc4939c6506ae74fbf538dca8ba775c71f389 | refs/heads/master | 2020-03-22T02:27:26.168575 | 2018-07-02T00:26:24 | 2018-07-02T00:26:24 | 139,371,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,998 | py | import sys
import io
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
import time
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')
class NcafeWriteAtt:
#초기화 실행(webdriver 설정)
def __init__(self):
firefox_options = Options()
firefox_options.add_argument("--headless") #CLI (User-agent)
self.driver = webdriver.Firefox(firefox_options=firefox_options,executable_path="c:/section3/webdriver/firefox/geckodriver")
self.driver.implicitly_wait(5)
#네이버 카페 로그인 && 출석 체크
def writeAttendCheck(self):
self.driver.get('https://nid.naver.com/nidlogin.login')
self.driver.find_element_by_name('id').send_keys('dashiki78')
self.driver.find_element_by_name('pw').send_keys('26495538c200')
self.driver.find_element_by_xpath('//*[@id="frmNIDLogin"]/fieldset/input').click()
self.driver.implicitly_wait(30)
self.driver.get('https://cafe.naver.com/AttendanceView.nhn?search.clubid=11786850&search.menuid=6')
self.driver.implicitly_wait(30)
self.driver.switch_to_frame('cafe_main')
self.driver.find_element_by_id('cmtinput').send_keys('반갑습니다!!^^*.')
self.driver.find_element_by_xpath('//*[@id="main-area"]/div[6]/table/tbody/tr[4]/td/table/tbody/tr/td[2]/a/img').click()
time.sleep(3)
# 소멸자
def __del__(self):
#self.driver.close() #현재 실행 포커스 된 영역을 종료
self.driver.quit() #Seleninum 전체 프로그램 종료
print("Removed driver Object")
#실행
if __name__ == '__main__':
#객체 생성
a = NcafeWriteAtt()
#시작 시간
start_time = time.time()
#프로그램 실행
a.writeAttendCheck()
#종료 시간 출력
print("---Total %s seconds ---" % (time.time() - start_time))
#객체 소멸
del a
| [
"[email protected]"
]
| |
1197d22b4092f0070ba99d63e0074573c7e860f4 | 6045f8519065f17b9d832a8e051723a520b58e3c | /ex Basic Sytax/2. Drink Something.py | bc6082c6982ee35b8a65971bc335d24452e1b965 | []
| no_license | a-angeliev/Python-Fundamentals-SoftUni | a308a6c94eb705a3319f6e081543c1cad0b1b37d | a9a5eba0376ebc7395daeda527408d1e59d58316 | refs/heads/master | 2023-07-19T05:55:28.104160 | 2021-09-11T18:25:58 | 2021-09-11T18:25:58 | 399,575,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | n = int(input())
if n<=14:
print("drink toddy")
elif n<=18:
print("drink coke")
elif n<=21:
print("drink beer")
else:
print("drink whisky") | [
"[email protected]"
]
| |
4f6cbcf3e0543fbc33767b8e8ceb9f39d449180c | b5e075462c902a5cdafb524dac8afa9371ef059a | /Library/integration/test_instance_id.py | 827cbe2e4ce99507786cecab1d217bfd256daf4a | []
| no_license | jigarkb/Invesmate | 25fd512be274f979a85f884b05d7452e52af1ac5 | 8d74314b1dc3c640599e66922375cc420e357f34 | refs/heads/master | 2021-09-18T14:33:58.913770 | 2018-07-15T23:50:21 | 2018-07-15T23:50:21 | 107,160,161 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 938 | py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for firebase_admin.instance_id module."""
import pytest
from firebase_admin import instance_id
def test_delete_non_existing():
with pytest.raises(instance_id.ApiCallError) as excinfo:
instance_id.delete_instance_id('non-existing')
assert str(excinfo.value) == 'Instance ID "non-existing": Failed to find the instance ID.'
| [
"[email protected]"
]
| |
709b50b04fda4aa769d4863ac3b1568a456ad4e7 | 60a0b9f7df16030dac7726c8d4b2bc88579a5e87 | /tests/test_sandbox.py | cb63764d1b463c192cf33c5a2f63552d88fbea70 | []
| no_license | usacs/aristotle | 4ef9d6b6de47d8a12db6e663305d4d081c63e159 | 0413d0240eb9ef8b120e68789b82fe93d3db846d | refs/heads/master | 2022-08-27T12:59:17.301597 | 2020-05-28T07:17:56 | 2020-05-28T07:17:56 | 267,346,219 | 0 | 0 | null | 2020-05-30T08:41:03 | 2020-05-27T14:46:41 | Python | UTF-8 | Python | false | false | 46 | py | import pytest
def test_ok():
assert True
| [
"[email protected]"
]
| |
260b8b5eca11766e13b26825faa4fc8c3e1ab396 | c1fbe2b0b691549979b318ab658cfd0b89e070c7 | /synapseutils/migrate_functions.py | ff4bf6d461fb9fb3f2fc1178e1f1c05001b05318 | [
"Apache-2.0"
]
| permissive | jkiang13/synapsePythonClient | dc469539cd7ace94c62cacf53fbd85f58ecbcc72 | ffeca2f8e33ebb51e4840feb50396d759cf93bdf | refs/heads/develop | 2021-07-11T14:38:18.887179 | 2021-05-28T22:55:48 | 2021-05-28T22:55:48 | 247,107,195 | 0 | 1 | Apache-2.0 | 2021-02-17T00:15:32 | 2020-03-13T15:46:47 | Python | UTF-8 | Python | false | false | 53,053 | py | import concurrent.futures
import csv
from enum import Enum
import json
import logging
import math
import sys
import traceback
import typing
import synapseclient
from synapseclient.core.constants import concrete_types
from synapseclient.core import pool_provider
from synapseclient.core import utils
from synapseclient.table import join_column_names
from synapseclient.core.upload.multipart_upload import (
MAX_NUMBER_OF_PARTS,
multipart_copy,
shared_executor,
)
"""
Contains functions for migrating the storage location of Synapse entities.
Entities can be updated or moved so that their underlying file handles are stored
in the new location.
"""
def test_import_sqlite3():
# sqlite3 is part of the Python standard library and is available on the vast majority
# of Python installations and doesn't require any additional software on the system.
# it may be unavailable in some rare cases though (for example Python compiled from source
# without ay sqlite headers available). we dynamically import it when used to avoid making
# this dependency hard for all client usage, however.
try:
import sqlite3 # noqa
except ImportError:
sys.stderr.write("""\nThis operation requires the sqlite3 module which is not available on this
installation of python. Using a Python installed from a binary package or compiled from source with sqlite
development headers available should ensure that the sqlite3 module is available.""")
raise
# we use a much larger default part size for part copies than we would for part uploads.
# with part copies the data transfer is within AWS so don't need to concern ourselves
# with upload failures of the actual bytes.
# this value aligns with what some AWS client libraries use e.g.
# https://github.com/aws/aws-sdk-java/blob/1.11.995/aws-java-sdk-s3/src/main/java/com/amazonaws/services/s3/transfer/TransferManagerConfiguration.java#L46
DEFAULT_PART_SIZE = 100 * utils.MB
class _MigrationStatus(Enum):
# an internal enum for use within the sqlite db
# to track the state of entities as they are indexed
# and then migrated.
INDEXED = 1
MIGRATED = 2
ALREADY_MIGRATED = 3
ERRORED = 4
class _MigrationType(Enum):
# container types (projects and folders) are only used during the indexing phase.
# we record the containers we've indexed so we don't reindex them on a subsequent
# run using the same db file (or reindex them after an indexing dry run)
PROJECT = 1
FOLDER = 2
# files and table attached files represent file handles that are actually migrated
FILE = 3
TABLE_ATTACHED_FILE = 4
@classmethod
def from_concrete_type(cls, concrete_type):
if concrete_type == concrete_types.PROJECT_ENTITY:
return cls.PROJECT
elif concrete_type == concrete_types.FOLDER_ENTITY:
return cls.FOLDER
elif concrete_type == concrete_types.FILE_ENTITY:
return cls.FILE
elif concrete_type == concrete_types.TABLE_ENTITY:
return cls.TABLE_ATTACHED_FILE
raise ValueError("Unhandled type {}".format(concrete_type))
class _MigrationKey(typing.NamedTuple):
id: str
type: _MigrationType
version: int
row_id: int
col_id: int
def _get_row_dict(cursor, row, include_empty):
return {
col[0]: row[i] for i, col in enumerate(cursor.description)
if (include_empty or row[i] is not None) and col[0] != 'rowid'
}
class MigrationResult:
"""A MigrationResult is a proxy object to the underlying sqlite db.
It provides a programmatic interface that allows the caller to iterate over the
file handles that were migrated without having to connect to or know the schema
of the sqlite db, and also avoids the potential memory liability of putting
everything into an in memory data structure that could be a liability when
migrating a huge project of hundreds of thousands/millions of entities.
As this proxy object is not thread safe since it accesses an underlying sqlite db.
"""
def __init__(self, syn, db_path):
self._syn = syn
self.db_path = db_path
def get_counts_by_status(self):
"""
Returns a dictionary of counts by the migration status of each indexed file/version.
Keys are as follows:
* INDEXED - the file/version has been indexed and will be migrated on a call to migrate_indexed_files
* MIGRATED - the file/version has been migrated
* ALREADY_MIGRATED - the file/version was already stored at the target storage location and no migration is needed
* ERRORED - an error occurred while indexing or migrating the file/version
""" # noqa
import sqlite3
with sqlite3.connect(self.db_path) as conn:
cursor = conn.cursor()
# for the purposes of these counts, containers (Projects and Folders) do not count.
# we are counting actual files only
result = cursor.execute(
'select status, count(*) from migrations where type in (?, ?) group by status',
(_MigrationType.FILE.value, _MigrationType.TABLE_ATTACHED_FILE.value)
)
counts_by_status = {status.name: 0 for status in _MigrationStatus}
for row in result:
status = row[0]
count = row[1]
counts_by_status[_MigrationStatus(status).name] = count
return counts_by_status
def get_migrations(self):
"""
A generator yielding each file/version in the migration index.
A dictionary of the properties of the migration row is yielded as follows:
id - the Synapse id
type - the concrete type of the entity
version - the verson of the file entity (if applicable)
row_id - the row of the table attached file (if applicable)
col_id - the column id of the table attached file (if applicable)
from_storage_location_id - the previous storage location id where the file/version was stored
from_file_handle_id - the id file handle of the existing file/version
to_file_handle_id - if migrated, the new file handle id
status - one of INDEXED, MIGRATED, ALREADY_MIGRATED, ERRORED indicating the status of the file/version
exception - if an error was encountered indexing/migrating the file/version its stack is here
"""
import sqlite3
with sqlite3.connect(self.db_path) as conn:
cursor = conn.cursor()
last_id = None
column_names = None
rowid = -1
while True:
results = cursor.execute(
"""
select
rowid,
id,
type,
version,
row_id,
col_id,
from_storage_location_id,
from_file_handle_id,
to_file_handle_id,
file_size,
status,
exception
from migrations
where
rowid > ?
and type in (?, ?)
order by
rowid
limit ?
""",
(
rowid,
_MigrationType.FILE.value, _MigrationType.TABLE_ATTACHED_FILE.value,
_get_batch_size()
)
)
row_count = 0
for row in results:
row_count += 1
# using the internal sqlite rowid for ordering only
rowid = row[0]
# exclude the sqlite internal rowid
row_dict = _get_row_dict(cursor, row, False)
entity_id = row_dict['id']
if entity_id != last_id:
# if the next row is dealing with a different entity than the last table
# id then we discard any cached column names we looked up
column_names = {}
row_dict['type'] = 'file' if row_dict['type'] == _MigrationType.FILE.value else 'table'
for int_arg in (
'version',
'row_id',
'from_storage_location_id',
'from_file_handle_id',
'to_file_handle_id'
):
int_val = row_dict.get(int_arg)
if int_val is not None:
row_dict[int_arg] = int(int_val)
col_id = row_dict.pop('col_id', None)
if col_id is not None:
column_name = column_names.get(col_id)
# for usability we look up the actual column name from the id,
# but that involves a lookup so we cache them for re-use across
# rows that deal with the same table entity
if column_name is None:
column = self._syn.restGET("/column/{}".format(col_id))
column_name = column_names[col_id] = column['name']
row_dict['col_name'] = column_name
row_dict['status'] = _MigrationStatus(row_dict['status']).name
yield row_dict
last_id = entity_id
if row_count == 0:
# out of rows
break
def as_csv(self, path):
"""
Output a flat csv file of the contents of the Migration index.
Its columns are as follows:
id - the Synapse id
type - the concrete type of the entity
version - the verson of the file entity (if applicable)
row_id - the row of the table attached file (if applicable)
col_name - the column name of the column the table attached file resides in (if applicable)
from_storage_location_id - the previous storage location id where the file/version was stored
from_file_handle_id - the id file handle of the existing file/version
to_file_handle_id - if migrated, the new file handle id
status - one of INDEXED, MIGRATED, ALREADY_MIGRATED, ERRORED indicating the status of the file/version
exception - if an error was encountered indexing/migrating the file/version its stack is here
"""
with open(path, 'w', newline='') as csv_file:
csv_writer = csv.writer(csv_file)
# headers
csv_writer.writerow([
'id',
'type',
'version',
'row_id',
'col_name',
'from_storage_location_id',
'from_file_handle_id',
'to_file_handle_id',
'status',
'exception'
])
for row_dict in self.get_migrations():
row_data = [
row_dict['id'],
row_dict['type'],
row_dict.get('version'),
row_dict.get('row_id'),
row_dict.get('col_name'),
row_dict.get('from_storage_location_id'),
row_dict.get('from_file_handle_id'),
row_dict.get('to_file_handle_id'),
row_dict['status'],
row_dict.get('exception')
]
csv_writer.writerow(row_data)
def _get_executor(syn):
executor = pool_provider.get_executor(thread_count=syn.max_threads)
# default the number of concurrent file copies to half the number of threads in the pool.
# since we share the same thread pool between managing entity copies and the multipart
# upload, we have to prevent thread starvation if all threads are consumed by the entity
# code leaving none for the multipart copies
max_concurrent_file_copies = max(int(syn.max_threads / 2), 1)
return executor, max_concurrent_file_copies
def _get_batch_size():
# just a limit on certain operations to put an upper bound on various
# batch operations so they are chunked. a function to make it easily mocked.
# don't anticipate needing to adjust this for any real activity
return 500
def _ensure_schema(cursor):
# ensure we have the sqlite schema we need to be able to record and sort our
# entity file handle migration.
# one-row table of a json dictionary records the parameters used to create the index
cursor.execute("create table if not exists migration_settings (settings text not null)")
# our representation of migratable file handles is flat including both file entities
# and table attached files, so not all columns are applicable to both. row id and col id
# are only used by table attached files, for example.
cursor.execute(
"""
create table if not exists migrations (
id text not null,
type integer not null,
version integer null,
row_id integer null,
col_id integer null,
parent_id null,
status integer not null,
exception text null,
from_storage_location_id null,
from_file_handle_id text null,
to_file_handle_id text null,
file_size integer null,
primary key (id, type, row_id, col_id, version)
)
"""
)
# we get counts grouping on status
cursor.execute("create index if not exists ix_status on migrations(status)")
# we check to see if there is already a migrated copy of a file handle before doing a copy
cursor.execute(
"create index if not exists ix_file_handle_ids on migrations(from_file_handle_id, to_file_handle_id)"
)
def _wait_futures(conn, cursor, futures, pending_keys, return_when, continue_on_error):
completed, futures = concurrent.futures.wait(futures, return_when=return_when)
completed_file_handle_ids = set()
for completed_future in completed:
to_file_handle_id = None
ex = None
try:
key, from_file_handle_id, to_file_handle_id = completed_future.result()
completed_file_handle_ids.add(from_file_handle_id)
status = _MigrationStatus.MIGRATED.value
except _MigrationError as migration_ex:
# for the purposes of recording and re-raise we're not interested in
# the _MigrationError, just the underlying cause
ex = migration_ex.__cause__
key = migration_ex.key
completed_file_handle_ids.add(migration_ex.from_file_handle_id)
status = _MigrationStatus.ERRORED.value
tb_str = ''.join(traceback.format_exception(type(ex), ex, ex.__traceback__)) if ex else None
update_statement = """
update migrations set
status = ?,
to_file_handle_id = ?,
exception = ?
where
id = ?
and type = ?
"""
update_args = [status, to_file_handle_id, tb_str, key.id, key.type]
for arg in ('version', 'row_id', 'col_id'):
arg_value = getattr(key, arg)
if arg_value is not None:
update_statement += "and {} = ?\n".format(arg)
update_args.append(arg_value)
else:
update_statement += "and {} is null\n".format(arg)
cursor.execute(update_statement, tuple(update_args))
conn.commit()
pending_keys.remove(key)
if not continue_on_error and ex:
raise ex from None
return futures, completed_file_handle_ids
def index_files_for_migration(
syn: synapseclient.Synapse,
entity,
dest_storage_location_id: str,
db_path: str,
source_storage_location_ids: typing.Iterable[str] = None,
file_version_strategy='new',
include_table_files=False,
continue_on_error=False,
):
"""
Index the given entity for migration to a new storage location. This is the first step in migrating an entity
to a new storage location using synapseutils.
This function will create a sqlite database at the given db_path that can be subsequently passed
to the migrate_indexed_files function for actual migration. This function itself does not modify the given entity
in any way.
:param syn: A Synapse object with user's login, e.g. syn = synapseclient.login()
:param entity: A Synapse entity whose files should be migrated. Can be a Project, Folder,
File entity, or Table entity. If it is a container (a Project or Folder) its
contents will be recursively indexed.
:param dest_storage_location_id: The id of the new storage location to be migrated to.
:param db_path: A path on disk where a sqlite db can be created to store the contents of the
created index.
:param source_storage_location_ids: An optional iterable of storage location ids that will be migrated. If provided,
files outside of one of the listed storage locations will not be indexed
for migration. If not provided, then all files not already in the destination
storage location will be indexed for migrated.
:param file_version_strategy: One of "new" (default), "all", "latest", "skip" as follows:
* "new" - will create a new version of file entities in the new storage location, leaving existing versions unchanged
* "all" - all existing versions will be migrated in place to the new storage location
* "latest" - the latest version will be migrated in place to the new storage location
* "skip" - skip migrating file entities. use this e.g. if wanting to e.g. migrate table attached files in a container while leaving the files unchanged
:param include_table_files: Whether to migrate files attached to tables. If False (default) then e.g. only
file entities in the container will be migrated and tables will be untouched.
:param continue_on_error: Whether any errors encountered while indexing an entity (access etc) will be raised
or instead just recorded in the index while allowing the index creation
to continue. Default is False (any errors are raised).
:return: A MigrationResult object that can be used to inspect the contents of the index
or output the index to a CSV for manual inspection.
""" # noqa
root_id = utils.id_of(entity)
# accept an Iterable, but easier to work internally if we can assume a list of strings
source_storage_location_ids = [str(s) for s in source_storage_location_ids or []]
file_version_strategies = {'new', 'all', 'latest', 'skip'}
if file_version_strategy not in file_version_strategies:
raise ValueError(
"Invalid file_version_strategy: {}, must be one of {}".format(
file_version_strategy,
file_version_strategies
)
)
if file_version_strategy == 'skip' and not include_table_files:
raise ValueError('Skipping both files entities and table attached files, nothing to migrate')
_verify_storage_location_ownership(syn, dest_storage_location_id)
test_import_sqlite3()
import sqlite3
with sqlite3.connect(db_path) as conn:
cursor = conn.cursor()
_ensure_schema(cursor)
_verify_index_settings(
cursor,
db_path,
root_id,
dest_storage_location_id,
source_storage_location_ids,
file_version_strategy,
include_table_files
)
conn.commit()
entity = syn.get(root_id, downloadFile=False)
try:
_index_entity(
conn,
cursor,
syn,
entity,
None,
dest_storage_location_id,
source_storage_location_ids,
file_version_strategy,
include_table_files,
continue_on_error,
)
except _IndexingError as indexing_ex:
logging.exception(
"Aborted due to failure to index entity %s of type %s. Use the continue_on_error option to skip "
"over entities due to individual failures.",
indexing_ex.entity_id,
indexing_ex.concrete_type,
)
raise indexing_ex.__cause__
return MigrationResult(syn, db_path)
def _confirm_migration(cursor, force, storage_location_id):
# we proceed with migration if either using the force option or if
# we can prompt the user with the count if items that are going to
# be migrated and receive their confirmation from shell input
confirmed = force
if not force:
count = cursor.execute(
"select count(*) from migrations where status = ?",
(_MigrationStatus.INDEXED.value,)
).fetchone()[0]
if count == 0:
logging.info("No items for migration.")
elif sys.stdout.isatty():
uinput = input("{} items for migration to {}. Proceed? (y/n)? ".format(
count,
storage_location_id
))
confirmed = uinput.strip().lower() == 'y'
else:
logging.info(
"%s items for migration. "
"force option not used, and console input not available to confirm migration, aborting. "
"Use the force option or run from an interactive shell to proceed with migration.",
count
)
return confirmed
def _check_file_handle_exists(cursor, from_file_handle_id):
# check if there is already a copied file handle for the given.
# if so we can re-use it rather than making another copy
row = cursor.execute(
"""
select
to_file_handle_id
from
migrations
where
from_file_handle_id = ?
and to_file_handle_id is not null
""",
(from_file_handle_id,)
).fetchone()
return row[0] if row else None
def migrate_indexed_files(
syn: synapseclient.Synapse,
db_path: str,
create_table_snapshots=True,
continue_on_error=False,
force=False
):
"""
Migrate files previously indexed in a sqlite database at the given db_path using the separate
index_files_for_migration function. The files listed in the index will be migrated according to the
configuration of that index.
:param syn: A Synapse object with user's login, e.g. syn = synapseclient.login()
:param db_path: A path on disk where a sqlite db was created using the index_files_for_migration
function.
:param create_table_snapshots: When updating the files in any table, whether the a snapshot of the table is
first created (default True).
:param continue_on_error: Whether any errors encountered while migrating will be raised
or instead just recorded in the sqlite database while allowing the migration
to continue. Default is False (any errors are raised).
:param force: If running in an interactive shell, migration requires an interactice confirmation.
This can be bypassed by using the force=True option.
:return: A MigrationResult object that can be used to inspect the results of the migration.
"""
executor, max_concurrent_file_copies = _get_executor(syn)
test_import_sqlite3()
import sqlite3
with sqlite3.connect(db_path) as conn:
cursor = conn.cursor()
_ensure_schema(cursor)
settings = _retrieve_index_settings(cursor)
if settings is None:
# no settings were available at the index given
raise ValueError(
"Unable to retrieve existing index settings from '{}'. "
"Either this path does represent a previously created migration index file or the file is corrupt."
)
dest_storage_location_id = settings['dest_storage_location_id']
if not _confirm_migration(cursor, force, dest_storage_location_id):
logging.info("Migration aborted.")
return
key = _MigrationKey(id='', type=None, row_id=-1, col_id=-1, version=-1)
futures = set()
# we keep track of the file handles that are currently being migrated
# so that if we encounter multiple entities associated with the same
# file handle we can copy the file handle once and update all the entities
# with the single copied file handle
pending_file_handle_ids = set()
completed_file_handle_ids = set()
# we keep track of the entity keys (syn id + version) so that we know
# if we encounter the same one twice. normally we wouldn't but when we backtrack
# to update any entities skipped because of a shared file handle we might
# query for the same key as is already being operated on.
pending_keys = set()
batch_size = _get_batch_size()
while True:
# we query for additional file or table associated file handles to migrate in batches
# ordering by synapse id. there can be multiple file handles associated with a particular
# synapse id (i.e. multiple file entity versions or multiple table attached files per table),
# so the ordering and where clause need to account for that.
# we also include in the query any unmigrated files that were skipped previously through
# the query loop that share a file handle with a file handle id that is now finished.
version = key.version if key.version is not None else -1
row_id = key.row_id if key.row_id is not None else -1
col_id = key.col_id if key.col_id is not None else -1
query_kwargs = {
'indexed_status': _MigrationStatus.INDEXED.value,
'id': key.id,
'file_type': _MigrationType.FILE.value,
'table_type': _MigrationType.TABLE_ATTACHED_FILE.value,
'version': version,
'row_id': row_id,
'col_id': col_id,
# ensure that we aren't ever adding more items to the shared executor than allowed
'limit': min(batch_size, max_concurrent_file_copies - len(futures)),
}
# we can't use both named and positional literals in a query, so we use named
# literals and then inline a string for the values for our file handle ids
# since these are a dynamic list of values
pending_file_handle_in = "('" + "','".join(pending_file_handle_ids) + "')"
completed_file_handle_in = "('" + "','".join(completed_file_handle_ids) + "')"
results = cursor.execute(
f"""
select
id,
type,
version,
row_id,
col_id,
from_file_handle_id,
file_size
from migrations
where
status = :indexed_status
and (
(
((id > :id and type in (:file_type, :table_type))
or (id = :id and type = :file_type and version is not null and version > :version)
or (id = :id and type = :table_type and (row_id > :row_id or (row_id = :row_id and col_id > :col_id))))
and from_file_handle_id not in {pending_file_handle_in}
) or
(
id <= :id
and from_file_handle_id in {completed_file_handle_in}
)
)
order by
id,
type,
row_id,
col_id,
version
limit :limit
""", # noqa
query_kwargs,
)
row_count = 0
for row in results:
row_count += 1
row_dict = _get_row_dict(cursor, row, True)
key_dict = {
k: v for k, v in row_dict.items()
if k in ('id', 'type', 'version', 'row_id', 'col_id')
}
last_key = key
key = _MigrationKey(**key_dict)
from_file_handle_id = row_dict['from_file_handle_id']
if key in pending_keys or from_file_handle_id in pending_file_handle_ids:
# if this record is already being migrated or it shares a file handle
# with a record that is being migrated then skip this.
# if it the record shares a file handle it will be picked up later
# when its file handle is completed.
continue
file_size = row_dict['file_size']
pending_keys.add(key)
to_file_handle_id = _check_file_handle_exists(conn.cursor(), from_file_handle_id)
if not to_file_handle_id:
pending_file_handle_ids.add(from_file_handle_id)
if key.type == _MigrationType.FILE.value:
if key.version is None:
migration_fn = _create_new_file_version
else:
migration_fn = _migrate_file_version
elif key.type == _MigrationType.TABLE_ATTACHED_FILE.value:
if last_key.id != key.id and create_table_snapshots:
syn.create_snapshot_version(key.id)
migration_fn = _migrate_table_attached_file
else:
raise ValueError("Unexpected type {} with id {}".format(key.type, key.id))
def migration_task(syn, key, from_file_handle_id, to_file_handle_id, file_size, storage_location_id):
# a closure to wrap the actual function call so that we an add some local variables
# to the return tuple which will be consumed when the future is processed
with shared_executor(executor):
try:
# instrument the shared executor in this thread so that we won't
# create a new executor to perform the multipart copy
to_file_handle_id = migration_fn(
syn,
key,
from_file_handle_id,
to_file_handle_id,
file_size,
storage_location_id)
return key, from_file_handle_id, to_file_handle_id
except Exception as ex:
raise _MigrationError(key, from_file_handle_id, to_file_handle_id) from ex
future = executor.submit(
migration_task,
syn,
key,
from_file_handle_id,
to_file_handle_id,
file_size,
dest_storage_location_id,
)
futures.add(future)
if row_count == 0 and not pending_file_handle_ids:
# we've run out of migratable sqlite rows, we have nothing else
# to submit, so we break out and wait for all remaining
# tasks to conclude.
break
if len(futures) >= max_concurrent_file_copies or row_count < batch_size:
# if we have no concurrency left to process any additional entities
# or if we're near the end of he migration and have a small
# remainder batch then we wait for one of the processing migrations
# to finish. a small batch doesn't mean this is the last batch since
# a completed file handle here could be associated with another
# entity that we deferred before because it shared the same file handle id
futures, completed_file_handle_ids = _wait_futures(
conn,
cursor,
futures,
pending_keys,
concurrent.futures.FIRST_COMPLETED,
continue_on_error,
)
pending_file_handle_ids -= completed_file_handle_ids
if futures:
# wait for all remaining migrations to conclude before returning
_wait_futures(
conn,
cursor,
futures,
pending_keys,
concurrent.futures.ALL_COMPLETED,
continue_on_error
)
return MigrationResult(syn, db_path)
def _verify_storage_location_ownership(syn, storage_location_id):
# if this doesn't raise an error we're okay
try:
syn.restGET("/storageLocation/{}".format(storage_location_id))
except synapseclient.core.exceptions.SynapseHTTPError:
raise ValueError(
"Error verifying storage location ownership of {}. You must be creator of the destination storage location"
.format(storage_location_id)
)
def _retrieve_index_settings(cursor):
# index settings are stored as a json-string in a one-row table
import sqlite3
settings = None
try:
results = cursor.execute("select settings from migration_settings")
row = results.fetchone()
if row:
settings = json.loads(row[0])
except (sqlite3.OperationalError, ValueError) as ex:
raise ValueError(
"Unable to parse index settings, the index may be corrupt or created by an older version "
"of this function. You will need to re-create the index."
) from ex
return settings
def _verify_index_settings(
cursor,
db_path,
root_id,
dest_storage_location_id,
source_storage_location_ids,
file_version_strategy,
include_table_files,
):
existing_settings = _retrieve_index_settings(cursor)
if existing_settings is not None:
settings = locals()
for setting in (
'root_id',
'dest_storage_location_id',
'source_storage_location_ids',
'file_version_strategy',
'include_table_files',
):
parameter = settings[setting]
existing_value = existing_settings[setting]
if not parameter == existing_value:
# value does not match the existing index settings.
# we can't resume indexing with an existing index file using a different setting.
raise ValueError(
"Index parameter does not match the setting recorded in the existing index file. "
"To change the index settings start over by deleting the file or using a different path. "
"Expected {} '{}', found '{}' in index file '{}'".format(
setting,
existing_value,
parameter,
db_path,
)
)
else:
# this is a new index file, no previous values to compare against,
# instead record the current settings
settings_str = json.dumps({
'root_id': root_id,
'dest_storage_location_id': dest_storage_location_id,
'source_storage_location_ids': source_storage_location_ids,
'file_version_strategy': file_version_strategy,
'include_table_files': 1 if include_table_files else 0,
})
cursor.execute("insert into migration_settings (settings) values (?)", (settings_str,))
def _check_indexed(cursor, entity_id):
# check if we have indexed the given entity in the sqlite db yet.
# if so it can skip reindexing it. supports resumption.
indexed_row = cursor.execute(
"select 1 from migrations where id = ?",
(entity_id,)
).fetchone()
if indexed_row:
logging.debug('%s already indexed, skipping', entity_id)
return True
logging.debug('%s not yet indexed, indexing now', entity_id)
return False
def _get_version_numbers(syn, entity_id):
for version_info in syn._GET_paginated("/entity/{id}/version".format(id=entity_id)):
yield version_info['versionNumber']
def _include_file_storage_location_in_index(
file_handle,
source_storage_location_ids,
to_storage_location_id,
):
# helper determines whether a file is included in the index depending on its storage location.
# if source_storage_location_ids are specified the from storage location must be in it.
# if the current storage location already matches the destination location then we also
# include it in the index, we'll mark it as already migrated.
from_storage_location_id = file_handle.get('storageLocationId')
if (
(file_handle.get('concreteType') == concrete_types.S3_FILE_HANDLE) and
(
not source_storage_location_ids or
str(from_storage_location_id) in source_storage_location_ids or
str(from_storage_location_id) == str(to_storage_location_id)
)
):
migration_status = _MigrationStatus.INDEXED.value \
if str(from_storage_location_id) != str(to_storage_location_id) \
else _MigrationStatus.ALREADY_MIGRATED.value
return migration_status
# this file is not included in this index
return None
def _index_file_entity(
cursor,
syn,
entity_id,
parent_id,
to_storage_location_id,
source_storage_location_ids,
file_version_strategy
):
logging.info('Indexing file entity %s', entity_id)
# 2-tuples of entity, version # to record
entity_versions = []
if file_version_strategy == 'new':
# we'll need the etag to be able to do an update on an entity version
# so we need to fetch the full entity now
entity = syn.get(entity_id, downloadFile=False)
entity_versions.append((
entity,
None # no version number, this record indicates we will create a new version
))
elif file_version_strategy == 'all':
# one row for each existing version that will all be migrated
for version in _get_version_numbers(syn, entity_id):
entity = syn.get(entity_id, version=version, downloadFile=False)
entity_versions.append((
entity,
version)
)
elif file_version_strategy == 'latest':
# one row for the most recent version that will be migrated
entity = syn.get(entity_id, downloadFile=False)
entity_versions.append((
entity,
entity.versionNumber
))
if entity_versions:
insert_values = []
for (entity, version) in entity_versions:
migration_status = _include_file_storage_location_in_index(
entity._file_handle,
source_storage_location_ids,
to_storage_location_id
)
if migration_status:
file_size = entity._file_handle['contentSize']
insert_values.append((
entity_id,
_MigrationType.FILE.value,
version,
parent_id,
entity._file_handle['storageLocationId'],
entity.dataFileHandleId,
file_size,
migration_status
))
if insert_values:
cursor.executemany(
"""
insert into migrations (
id,
type,
version,
parent_id,
from_storage_location_id,
from_file_handle_id,
file_size,
status
) values (?, ?, ?, ?, ?, ?, ?, ?)
""",
insert_values
)
def _get_table_file_handle_rows(syn, table_id):
file_handle_columns = [c for c in syn.getTableColumns(table_id) if c['columnType'] == 'FILEHANDLEID']
if file_handle_columns:
file_column_select = join_column_names(file_handle_columns)
results = syn.tableQuery("select {} from {}".format(file_column_select, table_id))
for row in results:
file_handles = {}
# first two cols are row id and row version, rest are file handle ids from our query
row_id, row_version = row[:2]
file_handle_ids = row[2:]
for i, file_handle_id in enumerate(file_handle_ids):
if file_handle_id:
col_id = file_handle_columns[i]['id']
file_handle = syn._getFileHandleDownload(
file_handle_id,
table_id,
objectType='TableEntity'
)['fileHandle']
file_handles[col_id] = file_handle
yield row_id, row_version, file_handles
def _index_table_entity(
cursor,
syn,
entity,
parent_id,
dest_storage_location_id,
source_storage_location_ids
):
entity_id = utils.id_of(entity)
logging.info('Indexing table entity %s', entity_id)
row_batch = []
def _insert_row_batch(row_batch):
if row_batch:
cursor.executemany(
"""insert into migrations
(
id,
type,
parent_id,
row_id,
col_id,
version,
from_storage_location_id,
from_file_handle_id,
file_size,
status
) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
row_batch
)
for row_id, row_version, file_handles in _get_table_file_handle_rows(syn, entity_id):
for col_id, file_handle in file_handles.items():
migration_status = _include_file_storage_location_in_index(
file_handle,
source_storage_location_ids,
dest_storage_location_id,
)
if migration_status:
file_size = file_handle['contentSize']
row_batch.append((
entity_id,
_MigrationType.TABLE_ATTACHED_FILE.value,
parent_id,
row_id,
col_id,
row_version,
file_handle['storageLocationId'],
file_handle['id'],
file_size,
migration_status
))
if len(row_batch) % _get_batch_size() == 0:
_insert_row_batch(row_batch)
row_batch = []
if row_batch:
_insert_row_batch(row_batch)
def _index_container(
conn,
cursor,
syn,
container_entity,
parent_id,
dest_storage_location_id,
source_storage_location_ids,
file_version_strategy,
include_table_files,
continue_on_error
):
entity_id = utils.id_of(container_entity)
concrete_type = utils.concrete_type_of(container_entity)
logging.info('Indexing %s %s', concrete_type[concrete_type.rindex('.') + 1:], entity_id)
include_types = []
if file_version_strategy != 'skip':
include_types.extend(('folder', 'file'))
if include_table_files:
include_types.append('table')
children = syn.getChildren(entity_id, includeTypes=include_types)
for child in children:
_index_entity(
conn,
cursor,
syn,
child,
entity_id,
dest_storage_location_id,
source_storage_location_ids,
file_version_strategy,
include_table_files,
continue_on_error,
)
# once all the children are recursively indexed we mark this parent itself as indexed
container_type = (
_MigrationType.PROJECT.value
if concrete_types.PROJECT_ENTITY == concrete_type
else _MigrationType.FOLDER.value
)
cursor.execute(
"insert into migrations (id, type, parent_id, status) values (?, ?, ?, ?)",
[entity_id, container_type, parent_id, _MigrationStatus.INDEXED.value]
)
def _index_entity(
conn,
cursor,
syn,
entity,
parent_id,
dest_storage_location_id,
source_storage_location_ids,
file_version_strategy,
include_table_files,
continue_on_error
):
# recursive function to index a given entity into the sqlite db.
entity_id = utils.id_of(entity)
concrete_type = utils.concrete_type_of(entity)
try:
if not _check_indexed(cursor, entity_id):
# if already indexed we short circuit (previous indexing will be used)
if concrete_type == concrete_types.FILE_ENTITY:
_index_file_entity(
cursor,
syn,
entity_id,
parent_id,
dest_storage_location_id,
source_storage_location_ids,
file_version_strategy,
)
elif concrete_type == concrete_types.TABLE_ENTITY:
_index_table_entity(
cursor,
syn,
entity,
parent_id,
dest_storage_location_id,
source_storage_location_ids,
)
elif concrete_type in [concrete_types.FOLDER_ENTITY, concrete_types.PROJECT_ENTITY]:
_index_container(
conn,
cursor,
syn,
entity,
parent_id,
dest_storage_location_id,
source_storage_location_ids,
file_version_strategy,
include_table_files,
continue_on_error,
)
conn.commit()
except _IndexingError:
# this is a recursive function, we don't need to log the error at every level so just
# pass up exceptions of this type that wrap the underlying exception and indicate
# that they were already logged
raise
except Exception as ex:
if continue_on_error:
logging.warning("Error indexing entity %s of type %s", entity_id, concrete_type, exc_info=True)
tb_str = ''.join(traceback.format_exception(type(ex), ex, ex.__traceback__))
cursor.execute(
"""
insert into migrations (
id,
type,
parent_id,
status,
exception
) values (?, ?, ?, ?, ?)
""",
(
entity_id,
_MigrationType.from_concrete_type(concrete_type).value,
parent_id,
_MigrationStatus.ERRORED.value,
tb_str,
)
)
else:
raise _IndexingError(entity_id, concrete_type) from ex
def _get_part_size(file_size):
return max(DEFAULT_PART_SIZE, math.ceil((file_size / MAX_NUMBER_OF_PARTS)))
def _create_new_file_version(syn, key, from_file_handle_id, to_file_handle_id, file_size, storage_location_id):
logging.info('Creating new version for file entity %s', key.id)
entity = syn.get(key.id, downloadFile=False)
source_file_handle_association = {
'fileHandleId': from_file_handle_id,
'associateObjectId': key.id,
'associateObjectType': 'FileEntity',
}
# copy to a new file handle if we haven't already
if not to_file_handle_id:
to_file_handle_id = multipart_copy(
syn,
source_file_handle_association,
storage_location_id=storage_location_id,
part_size=_get_part_size(file_size)
)
entity.dataFileHandleId = to_file_handle_id
syn.store(entity)
return to_file_handle_id
def _migrate_file_version(syn, key, from_file_handle_id, to_file_handle_id, file_size, storage_location_id):
logging.info('Migrating file entity %s version %s', key.id, key.version)
source_file_handle_association = {
'fileHandleId': from_file_handle_id,
'associateObjectId': key.id,
'associateObjectType': 'FileEntity',
}
# copy to a new file handle if we haven't already
if not to_file_handle_id:
to_file_handle_id = multipart_copy(
syn,
source_file_handle_association,
storage_location_id=storage_location_id,
part_size=_get_part_size(file_size),
)
file_handle_update_request = {
'oldFileHandleId': from_file_handle_id,
'newFileHandleId': to_file_handle_id,
}
# no response, we rely on a 200 here
syn.restPUT(
"/entity/{id}/version/{versionNumber}/filehandle".format(
id=key.id,
versionNumber=key.version,
),
json.dumps(file_handle_update_request),
)
return to_file_handle_id
def _migrate_table_attached_file(syn, key, from_file_handle_id, to_file_handle_id, file_size, storage_location_id):
logging.info('Migrating table attached file %s, row %s, col %s', key.id, key.row_id, key.col_id)
source_file_handle_association = {
'fileHandleId': from_file_handle_id,
'associateObjectId': key.id,
'associateObjectType': 'TableEntity',
}
# copy to a new file handle if we haven't already
if not to_file_handle_id:
to_file_handle_id = multipart_copy(
syn,
source_file_handle_association,
storage_location_id=storage_location_id,
part_size=_get_part_size(file_size),
)
row_mapping = {str(key.col_id): to_file_handle_id}
partial_rows = [synapseclient.table.PartialRow(row_mapping, key.row_id)]
partial_rowset = synapseclient.PartialRowset(key.id, partial_rows)
syn.store(partial_rowset)
return to_file_handle_id
class _MigrationError(Exception):
def __init__(self, key, from_file_handle_id, to_file_handle_id):
self.key = key
self.from_file_handle_id = from_file_handle_id
self.to_file_handle_id = to_file_handle_id
class _IndexingError(Exception):
def __init__(self, entity_id, concrete_type):
self.entity_id = entity_id
self.concrete_type = concrete_type
| [
"[email protected]"
]
| |
8a2fa77e9a860b3b2ac21b16b7d0e7fe45df1e7a | f5a87723b69e4c52f4f95c27c4d3bdbbe801212b | /homeassistant/components/pi_hole/config_flow.py | 5acaffd13b1b53afe90082e2186b85a02de9055d | [
"Apache-2.0"
]
| permissive | craigjmidwinter/home-assistant | b5ef821213dfeacb67e12fa6a5f76cfd898b9aff | 68ca0a05c8a849ba374539e3c6a883555a567abf | refs/heads/dev | 2023-07-20T00:00:10.335031 | 2021-12-04T00:13:16 | 2021-12-04T00:13:16 | 75,567,206 | 1 | 0 | Apache-2.0 | 2023-09-13T06:57:26 | 2016-12-04T21:57:33 | Python | UTF-8 | Python | false | false | 6,146 | py | """Config flow to configure the Pi-hole integration."""
from __future__ import annotations
import logging
from typing import Any
from hole import Hole
from hole.exceptions import HoleError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.pi_hole.const import (
CONF_LOCATION,
CONF_STATISTICS_ONLY,
DEFAULT_LOCATION,
DEFAULT_NAME,
DEFAULT_SSL,
DEFAULT_STATISTICS_ONLY,
DEFAULT_VERIFY_SSL,
DOMAIN,
)
from homeassistant.const import (
CONF_API_KEY,
CONF_HOST,
CONF_NAME,
CONF_PORT,
CONF_SSL,
CONF_VERIFY_SSL,
)
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers.aiohttp_client import async_get_clientsession
_LOGGER = logging.getLogger(__name__)
class PiHoleFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Pi-hole config flow."""
VERSION = 1
def __init__(self) -> None:
"""Initialize the config flow."""
self._config: dict = {}
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow initiated by the user."""
return await self.async_step_init(user_input)
async def async_step_import(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow initiated by import."""
return await self.async_step_init(user_input, is_import=True)
async def async_step_init(
self, user_input: dict[str, Any] | None, is_import: bool = False
) -> FlowResult:
"""Handle init step of a flow."""
errors = {}
if user_input is not None:
host = (
user_input[CONF_HOST]
if is_import
else f"{user_input[CONF_HOST]}:{user_input[CONF_PORT]}"
)
name = user_input[CONF_NAME]
location = user_input[CONF_LOCATION]
tls = user_input[CONF_SSL]
verify_tls = user_input[CONF_VERIFY_SSL]
endpoint = f"{host}/{location}"
if await self._async_endpoint_existed(endpoint):
return self.async_abort(reason="already_configured")
try:
await self._async_try_connect(host, location, tls, verify_tls)
except HoleError as ex:
_LOGGER.debug("Connection failed: %s", ex)
if is_import:
_LOGGER.error("Failed to import: %s", ex)
return self.async_abort(reason="cannot_connect")
errors["base"] = "cannot_connect"
else:
self._config = {
CONF_HOST: host,
CONF_NAME: name,
CONF_LOCATION: location,
CONF_SSL: tls,
CONF_VERIFY_SSL: verify_tls,
}
if is_import:
api_key = user_input.get(CONF_API_KEY)
return self.async_create_entry(
title=name,
data={
**self._config,
CONF_STATISTICS_ONLY: api_key is None,
CONF_API_KEY: api_key,
},
)
self._config[CONF_STATISTICS_ONLY] = user_input[CONF_STATISTICS_ONLY]
if self._config[CONF_STATISTICS_ONLY]:
return self.async_create_entry(title=name, data=self._config)
return await self.async_step_api_key()
user_input = user_input or {}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_HOST, default=user_input.get(CONF_HOST, "")): str,
vol.Required(
CONF_PORT, default=user_input.get(CONF_PORT, 80)
): vol.Coerce(int),
vol.Required(
CONF_NAME, default=user_input.get(CONF_NAME, DEFAULT_NAME)
): str,
vol.Required(
CONF_LOCATION,
default=user_input.get(CONF_LOCATION, DEFAULT_LOCATION),
): str,
vol.Required(
CONF_STATISTICS_ONLY,
default=user_input.get(
CONF_STATISTICS_ONLY, DEFAULT_STATISTICS_ONLY
),
): bool,
vol.Required(
CONF_SSL,
default=user_input.get(CONF_SSL, DEFAULT_SSL),
): bool,
vol.Required(
CONF_VERIFY_SSL,
default=user_input.get(CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL),
): bool,
}
),
errors=errors,
)
async def async_step_api_key(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle step to setup API key."""
if user_input is not None:
return self.async_create_entry(
title=self._config[CONF_NAME],
data={
**self._config,
CONF_API_KEY: user_input.get(CONF_API_KEY, ""),
},
)
return self.async_show_form(
step_id="api_key",
data_schema=vol.Schema({vol.Optional(CONF_API_KEY): str}),
)
async def _async_endpoint_existed(self, endpoint: str) -> bool:
existing_endpoints = [
f"{entry.data.get(CONF_HOST)}/{entry.data.get(CONF_LOCATION)}"
for entry in self._async_current_entries()
]
return endpoint in existing_endpoints
async def _async_try_connect(
self, host: str, location: str, tls: bool, verify_tls: bool
) -> None:
session = async_get_clientsession(self.hass, verify_tls)
pi_hole = Hole(host, session, location=location, tls=tls)
await pi_hole.get_data()
| [
"[email protected]"
]
| |
4ea052981899c8b2d541490b7e479a087ba003cd | aab628ac8bbabee5be5eacd62f02dd57887b1368 | /posts/admin.py | 162b24c22d97a22a02816b1f92fc9e5eee09f5e8 | []
| no_license | arundhyani/trydjango | f28d3cca0c2073e8e49ac46e945facabb5ed9126 | 14d48d69d479643612425e6c557cafd0b6f27dd9 | refs/heads/master | 2020-03-23T15:27:33.382509 | 2018-08-12T19:49:01 | 2018-08-12T19:49:01 | 139,466,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | from django.contrib import admin
from .models import Post
class PostModelAdmin(admin.ModelAdmin):
list_display = ["__str__","update","timestamp"]
list_display_links = ["update"]
list_filter = ("update","timestamp")
search_fields = ['title']
class Meta :
model = Post
admin.site.register(Post,PostModelAdmin)
| [
"[email protected]"
]
| |
93445be0fe7f2304b57849fd393fb87152e4fed1 | 95230c76a9e09d518c125ea8105002a7af6d1afc | /05_qstyle_sheets/style_sheets_example.py | 48aab6b37dbed01f2b7497c75912ca16b2631c56 | []
| no_license | amkartheek/nuke_python | d5f86f5ccb9742cd65acaf571fd4f5c7ca4032ff | 67ed5e25796506c9321f487f576bc142842e0041 | refs/heads/master | 2020-05-31T19:04:19.463232 | 2018-03-09T19:17:19 | 2018-03-09T19:17:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,088 | py | from PySide.QtGui import *
from PySide.QtCore import *
import sys
class MyLineEdit(QLineEdit):
def __init__(self):
super(MyLineEdit, self).__init__()
class Panel(QWidget):
def __init__(self):
super(Panel, self).__init__()
first_name_label = QLabel("First Name:")
self.first_name = QLineEdit()
self.first_name.setProperty("valid", False)
self.first_name.setObjectName("first_name")
last_name_label = QLabel("Last Name:")
last_name = QLineEdit()
name_layout = QHBoxLayout()
name_layout.addWidget(first_name_label)
name_layout.addWidget(self.first_name)
name_layout.addWidget(last_name_label)
name_layout.addWidget(last_name)
role_label = QLabel("Role")
role_combobox = QComboBox()
role_combobox.addItems(["Pipeline TD", "Compositor", "FX TD", "Modeler", "Animator", "Lighting TD"])
role_layout = QHBoxLayout()
role_layout.addWidget(role_label)
role_layout.addWidget(role_combobox)
role_layout.addStretch()
self.gender_male_checkbox = QCheckBox("male")
self.gender_famale_checbox = QCheckBox("famale")
gender_layout = QHBoxLayout()
gender_layout.addWidget(self.gender_male_checkbox)
gender_layout.addWidget(self.gender_famale_checbox)
gender_layout.addStretch()
list_widget = QListWidget()
list_widget.addItems(["Canada", "USA", "Japan", "London", "Australia"])
# list_widget.setAlternatingRowColors(True)
save_push_button = QPushButton("OK")
close_pusu_button = QPushButton("Close")
action_layout = QHBoxLayout()
action_layout.addWidget(save_push_button)
action_layout.addWidget(close_pusu_button)
master_layout = QVBoxLayout()
master_layout.addLayout(name_layout)
master_layout.addLayout(role_layout)
master_layout.addLayout(gender_layout)
master_layout.addWidget(list_widget)
master_layout.addLayout(action_layout)
self.setLayout(master_layout)
# Signals
close_pusu_button.clicked.connect(self.close)
save_push_button.clicked.connect(self.show_message_box)
self.gender_male_checkbox.clicked.connect(self.set_checkbox)
self.gender_famale_checbox.clicked.connect(self.set_checkbox)
self.first_name.textChanged.connect(self.check_validity)
self.set_style_sheet()
def check_validity(self, text):
self.first_name.setProperty("valid", bool(text))
self.set_style_sheet()
def set_style_sheet(self):
text = open("style.txt").read()
self.setStyleSheet(text)
def set_checkbox(self):
self.gender_famale_checbox.setChecked(self.sender() is self.gender_famale_checbox)
self.gender_male_checkbox.setChecked(self.sender() is self.gender_male_checkbox)
def show_message_box(self):
QMessageBox.information(self, "information", "User saved successfully!")
app = QApplication(sys.argv)
panel = Panel()
panel.show()
app.exec_()
| [
"[email protected]"
]
| |
b6e89f8e6a9defc0b34985c6463aaee012df5de7 | d6bc51e34d7cb8f45b0000fb0fe236b6fe6e06b9 | /get_my_face_image.py | f6db2c66c86fae13b1d49469228baecfc8a338a3 | []
| no_license | dingjingjing0212/myseltornot | e63b6a91370cc5fb37366be9a93c4e20ce5748a7 | 03a41ba5c45fd4d698c4b8e3d58d280d4c2b07d9 | refs/heads/master | 2020-04-28T08:52:53.576168 | 2019-03-12T06:22:41 | 2019-03-12T06:22:41 | 175,145,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,676 | py | # -*- coding: utf-8 -*-
import os
import pickle
import cv2
import dlib
import numpy as np
def change_contrast_and_brightness(image, alpha=1.0, beta=0):
image = alpha * image + beta
image[image < 0] = 0
image[image > 255] = 255
return image.astype(np.uint8)
predictor_file_path = 'data/models/shape_predictor_68_face_landmarks.dat'
image_folder_path = 'data/myself'
if not os.path.exists(image_folder_path):
os.mkdir(image_folder_path)
image_id_file_path = os.path.join(image_folder_path, 'image_id.pkl')
detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor(predictor_file_path)
if os.path.exists(image_id_file_path):
with open(image_id_file_path, 'rb') as f:
image_id = pickle.load(f)
else:
image_id = 0
image_id_file = open(image_id_file_path, 'wb')
cam = cv2.VideoCapture(0)
while True:
ret_val, img = cam.read()
rgb_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
dets = detector(rgb_image)
num_faces = len(dets)
if num_faces == 0:
print("Sorry, there were no faces found.")
continue
faces = dlib.full_object_detections()
for detection in dets:
faces.append(sp(img, detection))
image = dlib.get_face_chip(img, faces[0], size=64)
image = change_contrast_and_brightness(image, alpha=np.random.uniform(0.6, 3.0), beta=np.random.randint(-50, 100))
cv2.imshow('myself', image)
print(image_id)
cv2.imwrite(os.path.join(image_folder_path, '%d.jpg' % image_id), image)
image_id += 1
if cv2.waitKey(1) == 27 or image_id >= 10000:
pickle.dump(image_id, image_id_file)
image_id_file.close()
break
cv2.destroyAllWindows()
| [
"[email protected]"
]
| |
9789466f6317a7c47b259e2c07f2ba93c65a8d5a | c0baf3c55b538ffd083fdf8b94e034323fcd980d | /Crawl Table.py | babe1280fdc2f52bb95e09b2d817d98cf6a8ce93 | []
| no_license | krishna5737/Crawlers | 9c96d15970f1a794c5c1c7f7fbae9a014bfe7015 | 0b2d3ae430ad77bb748476f1f9a12b320948db81 | refs/heads/master | 2020-03-17T05:32:35.491855 | 2018-05-14T07:23:46 | 2018-05-14T07:23:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,322 | py | #import libraries BeautifulSoup, csv, urllib.request
import bs4 as bs
import urllib.request
import csv
def input_url():
#try to open the url using urllib if true return the url
flag = 0
while (flag == 0):
try:
url = input("Enter URL: ")
urllib.request.urlopen(url)
flag = 1
except ValueError:
print("Sorry, the url entered by you is not correct")
continue
return(url)
def input_table_number(tables_in_page):
while True:
table_number = int(input("Enter Table :"))
if(table_number>tables_in_page):
print("Table number you want to export doesnot exist on page")
continue
elif(table_number <=0):
print("Please enter valid table number")
continue
else:
break
return(table_number-1)
def write_table_to_csv(url,output_csv,table_number,tables):
out_file = open(output_csv,'w') #open the csv in which user want tables to be written
csvwriter = csv.writer(out_file)
table_rows = tables[table_number].find_all('tr') #find all table rows in ith table
for tr in table_rows:
td = tr.find_all('td') or tr.find_all('th') #append table data to td if the tag is td(table data) or th(table header)
row = [i.text for i in td] # extract text from table data(remove tags)
print(row) #print the data to terminal
csvwriter.writerow(row)#write the data to csv
def main():
#Check if the url entered by user is correct or not
#Keep asking for correct url untill the url is valid
url = input_url()
source = urllib.request.urlopen(url) #open url using urllib
soup = bs.BeautifulSoup(source,'lxml') #convert the url in htmltags using beautifulsoup
#calculate number of tables on current page
tables = soup.find_all('table')
tables_in_page = len(tables)
#Check if the table_number entered by user is correct or not
#table_number should be a positive integer and less than total tables on age
table_number = input_table_number(tables_in_page)
#prompt user to enter the table name in which he wants data to be exported
output_csv = input("Enter Output (CSV) filename: ")
#write data to table
write_table_to_csv(url,output_csv,table_number,tables)
main()
| [
"[email protected]"
]
| |
16b184584b1a0e8f5b547720eb57c19050402caf | 0d207a7ff86797206614cbd11b9388850dc7c649 | /cartpole-master/retest.py | e3bed8a8b6899b302088f8446bcf1977fcc2a5b1 | [
"MIT"
]
| permissive | ConnorMich/Fast_Slow_Dynamics_Control | 639a2924ee321ef277049e13d889b20dbcb599fb | d242b2df77391a59bb950ccd704ab80ed2804be1 | refs/heads/master | 2020-05-03T05:04:38.314204 | 2019-04-29T15:59:16 | 2019-04-29T15:59:16 | 178,439,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,948 | py | import gym
import sys
import numpy as np
import argparse
from scores.score_logger import ScoreLogger
from scores.score_logger import FS_score
from scores.score_logger import Test_Score
from scores.score_logger import video
from dqn import DQNSolver
import datetime
import os
ENV_NAME = "CartPole-v1"
TRAINING_EPISODE_TIME = 225
# REQ_MED_TRAIN_REWARD = 250
REQ_MED_TRAIN_REWARD = 360
def test_cartpole(model_name, num_tests, slow_d):
# generate the environment
env = gym.make(ENV_NAME)
# define the observation and action spaces
observation_space = env.observation_space.shape[0]
action_space = env.action_space.n
# Create and Load the DQN Controller Model
dqn_solver = DQNSolver(observation_space, action_space, slow_d)
dqn_solver.load_model(model_name)
# Create the performance analyzer
test_score_manager = FS_score(dqn_solver.pole_ang_d,dqn_solver.cart_vel_d, model_name)
test_score_manager.clear_fs_tests()
# Prep the environemnt
state = env.reset()
state = np.reshape(state, [1, observation_space])
steps = 0
run = 0
episode_reward = 0
while(run<num_tests):
# save the state of the system
test_score_manager.add_state(state[0])
# Render the environment
# env.render()
# Determine and perform the action
action = dqn_solver.test_act(state)
state_next, reward, terminal, info = env.step(action)
episode_reward += dqn_solver.reward(state, 'linear')
# Set the next action of the state
state_next = np.reshape(state_next, [1, observation_space])
state = state_next
# increment the number of steps and add the episode reward
steps +=1
# sum_reward += dqn_solver.reward(state[0], reward_func)
# When the run is finished:
if terminal or steps>TRAINING_EPISODE_TIME:
# Save the CSV
test_score_manager.save_csv()
# Add the run to the PNG
test_score_manager.save_run(run, num_tests)
test_score_manager.clear_run_data()
# Reset the environment
state = env.reset()
state = np.reshape(state, [1, observation_space])
print("steps: " + str(steps))
print("reward: " + str(episode_reward))
steps = 0
episode_reward = 0
run += 1
test_score_manager.close_graphs()
if __name__ == "__main__":
directory = './models/'
names = np.array([])
slow_d = np.array([])
for filename in os.listdir(directory):
if filename.endswith('.h5'):
names = np.append(names, filename[0:len(filename)-3])
sd_index = filename.rfind('_s') + 2
print(filename)
print(filename[sd_index])
slow_d = np.append(slow_d, int(filename[sd_index]))
for i in range(0,len(names)):
test_cartpole(names[0],10, slow_d)
| [
"[email protected]"
]
| |
31c03c46273a3ec99f7d4ec05e1b47a219fe961a | 291c08a11a29ce995099f775ac0ef79cd69dd1fc | /file_app/migrations/0001_initial.py | 3918065b948c8b8a81a7a5331b098db45406b028 | [
"MIT"
]
| permissive | Amirsorouri00/neolej | 1e278a2216a961b8abedc32b30d4fccf5c431d0b | 8fa18f2c1a38b0a59ed7eeeed7ed37ef7b9dad97 | refs/heads/master | 2020-04-20T15:36:24.669991 | 2019-03-17T07:20:02 | 2019-03-17T07:20:02 | 168,935,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | # Generated by Django 2.1.3 on 2019-02-16 15:28
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('remark', models.CharField(max_length=31)),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"[email protected]"
]
| |
4f222f886a3b809a9587571250863c0aa5cc8dc9 | 1a5a455ee38b025accf864941a90ac952ddb5c69 | /manage.py | 0c1fe5dc7e33ee4e9b6421a07042a3e5fad896bc | []
| no_license | vikasjindal2001/user-validation-through-email-verification | 857c79a2ede3bb901ecd905861a372e1b82c24f7 | dda2d601fec74e25178993f8d5663e18fbcda25c | refs/heads/master | 2023-07-25T17:28:17.020657 | 2021-09-03T14:48:25 | 2021-09-03T14:48:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'newvikasproject.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
e0b15df612ba3b856357439a9d6586d0186b146e | c0c4fe8f9aff2e7684fcaf10329f963873753b2a | /src/biotite/sequence/sequence.py | 1a6b8230a35cd5b6afd265692459ee224fe40473 | [
"BSD-3-Clause"
]
| permissive | thomasnevolianis/biotite | 85e1b9d6a1fbb5d9f81501a8ebc617bc26388ab9 | 916371eb602cfcacb2d5356659298ef38fa01fcc | refs/heads/master | 2022-11-30T19:40:53.017368 | 2020-08-04T07:00:59 | 2020-08-04T07:00:59 | 285,375,415 | 0 | 0 | BSD-3-Clause | 2020-08-05T18:41:48 | 2020-08-05T18:41:47 | null | UTF-8 | Python | false | false | 11,010 | py | # This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
"""
The module contains the :class:`Sequence` superclass and :class:`GeneralSequence`.
"""
__name__ = "biotite.sequence"
__author__ = "Patrick Kunzmann"
__all__ = ["Sequence"]
import numbers
import abc
import numpy as np
from .alphabet import Alphabet, LetterAlphabet
from ..copyable import Copyable
_size_uint8 = np.iinfo(np.uint8 ).max +1
_size_uint16 = np.iinfo(np.uint16).max +1
_size_uint32 = np.iinfo(np.uint32).max +1
class Sequence(Copyable, metaclass=abc.ABCMeta):
"""
The abstract base class for all sequence types.
A :class:`Sequence` can be seen as a succession of symbols, that are
elements in the allowed set of symbols, the :class:`Alphabet`.
Internally, a :class:`Sequence` object uses a *NumPy*
:class:`ndarray` of integers, where each integer represents a
symbol.
The :class:`Alphabet` of a :class:`Sequence` object is used to
encode each symbol, that is used to create the
:class:`Sequence`, into an integer. These integer values are called
symbol code, the encoding of an entire sequence of symbols is
called sequence code.
The size of the symbol code type in the array is determined by the
size of the :class:`Alphabet`:
If the :class:`Alphabet` contains 256 symbols or less, one byte is
used per array element; if the :class:`Alphabet` contains
between 257 and 65536 symbols, two bytes are used, and so on.
Two :class:`Sequence` objects are equal if they are instances of the
same class, have the same :class:`Alphabet` and have equal sequence
codes.
Comparison with a string or list of symbols evaluates always to
false.
A :class:`Sequence` can be indexed by any 1-D index a
:class:`ndarray` accepts.
If the index is a single integer, the decoded symbol at that
position is returned, otherwise a subsequence is returned.
Individual symbols of the sequence can also be exchanged in indexed
form: If the an integer is used as index, the item is treated as a
symbol. Any other index (slice, index list, boolean mask) expects
multiple symbols, either as list of symbols, as :class:`ndarray`
containing a sequence code or another :class:`Sequence` instance.
Concatenation of two sequences is achieved with the '+' operator.
Each subclass of :class:`Sequence` needs to overwrite the abstract
method :func:`get_alphabet()`, which specifies the alphabet the
:class:`Sequence` uses.
Parameters
----------
sequence : iterable object, optional
The symbol sequence, the :class:`Sequence` is initialized with.
For alphabets containing single letter strings, this parameter
may also be a :class`str` object.
By default the sequence is empty.
Attributes
----------
code : ndarray
The sequence code.
symbols : list
The list of symbols, represented by the sequence.
The list is generated by decoding the sequence code, when
this attribute is accessed. When this attribute is modified,
the new list of symbols is encoded into the sequence code.
alphabet : Alphabet
The alphabet of this sequence. Cannot be set.
Equal to `get_alphabet()`.
Examples
--------
Creating a DNA sequence from string and print the symbols and the
code:
>>> dna_seq = NucleotideSequence("ACGTA")
>>> print(dna_seq)
ACGTA
>>> print(dna_seq.code)
[0 1 2 3 0]
>>> print(dna_seq.symbols)
['A' 'C' 'G' 'T' 'A']
>>> print(list(dna_seq))
['A', 'C', 'G', 'T', 'A']
Sequence indexing:
>>> print(dna_seq[1:3])
CG
>>> print(dna_seq[[0,2,4]])
AGA
>>> print(dna_seq[np.array([False,False,True,True,True])])
GTA
Sequence manipulation:
>>> dna_copy = dna_seq.copy()
>>> dna_copy[2] = "C"
>>> print(dna_copy)
ACCTA
>>> dna_copy = dna_seq.copy()
>>> dna_copy[0:2] = dna_copy[3:5]
>>> print(dna_copy)
TAGTA
>>> dna_copy = dna_seq.copy()
>>> dna_copy[np.array([True,False,False,False,True])] = "T"
>>> print(dna_copy)
TCGTT
>>> dna_copy = dna_seq.copy()
>>> dna_copy[1:4] = np.array([0,1,2])
>>> print(dna_copy)
AACGA
Reverse sequence:
>>> dna_seq_rev = dna_seq.reverse()
>>> print(dna_seq_rev)
ATGCA
Concatenate the two sequences:
>>> dna_seq_concat = dna_seq + dna_seq_rev
>>> print(dna_seq_concat)
ACGTAATGCA
"""
def __init__(self, sequence=()):
self.symbols = sequence
def copy(self, new_seq_code=None):
"""
Copy the object.
Parameters
----------
new_seq_code : ndarray, optional
If this parameter is set, the sequence code is set to this
value, rather than the original sequence code.
Returns
-------
copy
A copy of this object.
"""
# Override in order to achieve better performance,
# in case only a subsequence is needed,
# because not the entire sequence code is copied then
clone = self.__copy_create__()
if new_seq_code is None:
clone.code = np.copy(self.code)
else:
clone.code = new_seq_code
self.__copy_fill__(clone)
return clone
@property
def symbols(self):
return self.get_alphabet().decode_multiple(self.code)
@symbols.setter
def symbols(self, value):
alph = self.get_alphabet()
dtype = Sequence._dtype(len(alph))
self._seq_code = alph.encode_multiple(value, dtype)
@property
def code(self):
return self._seq_code
@code.setter
def code(self, value):
dtype = Sequence._dtype(len(self.get_alphabet()))
if not isinstance(value, np.ndarray):
raise TypeError("Sequence code must be an integer ndarray")
self._seq_code = value.astype(dtype, copy=False)
@property
def alphabet(self):
return self.get_alphabet()
@abc.abstractmethod
def get_alphabet(self):
"""
Get the :class:`Alphabet` of the :class:`Sequence`.
This method must be overwritten, when subclassing
:class:`Sequence`.
Returns
-------
alphabet : Alphabet
:class:`Sequence` alphabet.
"""
pass
def reverse(self):
"""
Reverse the :class:`Sequence`.
Returns
-------
reversed : Sequence
The reversed :class:`Sequence`.
Examples
--------
>>> dna_seq = NucleotideSequence("ACGTA")
>>> dna_seq_rev = dna_seq.reverse()
>>> print(dna_seq_rev)
ATGCA
"""
reversed_code = np.flip(np.copy(self._seq_code), axis=0)
reversed = self.copy(reversed_code)
return reversed
def is_valid(self):
"""
Check, if the sequence contains a valid sequence code.
A sequence code is valid, if at each sequence position the
code is smaller than the size of the alphabet.
Invalid code means that the code cannot be decoded into
symbols. Furthermore invalid code can lead to serious
errors in alignments, since the substitution matrix
is indexed with an invalid index.
Returns
-------
valid : bool
True, if the sequence is valid, false otherwise.
"""
return (self.code < len(self.get_alphabet())).all()
def get_symbol_frequency(self):
"""
Get the number of occurences of each symbol in the sequence.
If a symbol does not occur in the sequence, but it is in the
alphabet, its number of occurences is 0.
Returns
-------
frequency : dict
A dictionary containing the symbols as keys and the
corresponding number of occurences in the sequence as
values.
"""
frequencies = {}
for code, symbol in enumerate(self.get_alphabet()):
frequencies[symbol] = len(np.nonzero((self._seq_code == code))[0])
return frequencies
def __getitem__(self, index):
alph = self.get_alphabet()
sub_seq = self._seq_code.__getitem__(index)
if isinstance(sub_seq, np.ndarray):
return self.copy(sub_seq)
else:
return alph.decode(sub_seq)
def __setitem__(self, index, item):
alph = self.get_alphabet()
if isinstance(index, numbers.Integral):
# Expect a single symbol
code = alph.encode(item)
else:
# Expect multiple symbols
if isinstance(item, Sequence):
code = item.code
elif isinstance(item, np.ndarray):
code = item
else:
# Default: item is iterable object of symbols
code = alph.encode_multiple(item)
self._seq_code.__setitem__(index, code)
def __len__(self):
return len(self._seq_code)
def __iter__(self):
alph = self.get_alphabet()
i = 0
while i < len(self):
yield alph.decode(self._seq_code[i])
i += 1
def __eq__(self, item):
if not isinstance(item, type(self)):
return False
if self.get_alphabet() != item.get_alphabet():
return False
return np.array_equal(self._seq_code, item._seq_code)
def __str__(self):
alph = self.get_alphabet()
if isinstance(alph, LetterAlphabet):
return alph.decode_multiple(self._seq_code, as_bytes=True)\
.tobytes().decode("ASCII")
else:
return "".join(alph.decode_multiple(self._seq_code))
def __add__(self, sequence):
if self.get_alphabet().extends(sequence.get_alphabet()):
new_code = np.concatenate((self._seq_code, sequence._seq_code))
new_seq = self.copy(new_code)
return new_seq
elif sequence.get_alphabet().extends(self.get_alphabet()):
new_code = np.concatenate((self._seq_code, sequence._seq_code))
new_seq = sequence.copy(new_code)
return new_seq
else:
raise ValueError("The sequences alphabets are not compatible")
@staticmethod
def _dtype(alphabet_size):
if alphabet_size <= _size_uint8:
return np.uint8
elif alphabet_size <= _size_uint16:
return np.uint16
elif alphabet_size <= _size_uint32:
return np.uint32
else:
return np.uint64
| [
"[email protected]"
]
| |
95ee6d9028cb4c1c7c5a614b96db2580eee8344c | e859d4604615e4ff3c6730554b12ae7b09e86286 | /django-stubs/db/models/fields/files.pyi | bb53d5944104eade0990047b3af0abafb3dbaff7 | [
"BSD-3-Clause"
]
| permissive | microblag/django-stubs | d91655c346279424cf5e57b80a0b104dceb86ddc | d0eb05832551d344f06ec3e83cb850866a4d37c2 | refs/heads/master | 2020-04-18T05:18:24.887114 | 2019-02-06T04:02:28 | 2019-02-06T04:02:28 | 167,273,694 | 0 | 0 | null | 2019-01-24T00:12:42 | 2019-01-24T00:12:42 | null | UTF-8 | Python | false | false | 2,954 | pyi | from typing import Any, Callable, List, Optional, Type, Union
from django.core.checks.messages import Error
from django.core.files.base import File
from django.core.files.images import ImageFile
from django.core.files.storage import FileSystemStorage, Storage
from django.db.models.base import Model
from django.db.models.fields import Field
from django.forms import fields as form_fields
class FieldFile(File):
instance: Model = ...
field: FileField = ...
storage: FileSystemStorage = ...
def __init__(self, instance: Model, field: FileField, name: Optional[str]) -> None: ...
file: Any = ...
@property
def path(self) -> str: ...
@property
def url(self) -> str: ...
@property
def size(self) -> int: ...
def save(self, name: str, content: File, save: bool = ...) -> None: ...
def delete(self, save: bool = ...) -> None: ...
@property
def closed(self) -> bool: ...
class FileDescriptor:
field: FileField = ...
def __init__(self, field: FileField) -> None: ...
def __get__(self, instance: Optional[Model], cls: Type[Model] = ...) -> Union[FieldFile, FileDescriptor]: ...
def __set__(self, instance: Model, value: Optional[Any]) -> None: ...
class FileField(Field):
attr_class: Any = ...
descriptor_class: Any = ...
description: Any = ...
storage: Any = ...
upload_to: Any = ...
def __init__(
self,
verbose_name: Optional[str] = ...,
name: Optional[str] = ...,
upload_to: Union[Callable, str] = ...,
storage: Optional[Storage] = ...,
**kwargs: Any
) -> None: ...
def check(self, **kwargs: Any) -> List[Error]: ...
def deconstruct(self) -> Any: ...
def get_internal_type(self) -> str: ...
def get_prep_value(self, value: Union[FieldFile, str]) -> str: ...
def pre_save(self, model_instance: Model, add: bool) -> FieldFile: ...
def generate_filename(self, instance: Optional[Model], filename: str) -> str: ...
def save_form_data(self, instance: Model, data: Optional[Union[bool, File, str]]) -> None: ...
def formfield(self, **kwargs: Any) -> form_fields.FileField: ...
class ImageFileDescriptor(FileDescriptor):
field: ImageField
def __set__(self, instance: Model, value: Optional[str]) -> None: ...
class ImageFieldFile(ImageFile, FieldFile):
field: ImageField
def delete(self, save: bool = ...) -> None: ...
class ImageField(FileField):
def __init__(
self,
verbose_name: Optional[str] = ...,
name: Optional[str] = ...,
width_field: Optional[str] = ...,
height_field: Optional[str] = ...,
**kwargs: Any
) -> None: ...
def check(self, **kwargs: Any) -> List[Any]: ...
def deconstruct(self) -> Any: ...
def update_dimension_fields(self, instance: Model, force: bool = ..., *args: Any, **kwargs: Any) -> None: ...
def formfield(self, **kwargs: Any) -> form_fields.ImageField: ...
| [
"[email protected]"
]
| |
a39a00acac47914e717411524682266198077482 | 7fb51ae4163aeea47d0fb434f28666ea99b104af | /app.py | 2cb0275c32bef3070e1b21c6218a864f8431cfd1 | []
| no_license | knowsuchagency/cdk-hello-apigw-asgi | 153eaae8d01a14e5886315122613c462ea90de70 | a47cdc58ddd9bb070419d4fbcfa1cf07fb3873f9 | refs/heads/master | 2022-12-28T15:44:05.585842 | 2020-10-18T18:17:15 | 2020-10-18T18:17:15 | 301,259,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | #!/usr/bin/env python3
from aws_cdk import core
from hello_apig_wsgi.hello_apig_wsgi_stack import HelloApigWsgiStack
from hello_apig_wsgi.pipeline_stack import PipelineStack
from pydantic import BaseSettings
class Config(BaseSettings):
"""https://pydantic-docs.helpmanual.io/usage/settings/"""
account: str = "385504394431"
region: str = "us-east-2"
gh_username: str = "knowsuchagency"
gh_repo: str = "cdk-hello-apigw-asgi"
if __name__ == "__main__":
config = Config()
app = core.App()
application_stack = HelloApigWsgiStack(app, "application")
pipeline_stack = PipelineStack(
app,
"pipeline",
config,
env={"account": config.account, "region": config.region},
)
app.synth()
| [
"[email protected]"
]
| |
d41ca9e5632d9d18c48449b696185e20724e3e04 | 8f7cd4def3b08def0199eaf0c911d3dc434c8c1f | /venv/Scripts/easy_install-script.py | 9bd09b08847442b354b77a0bf42013d55ffad20d | []
| no_license | colafishx/webPage1 | ba65599a8dbcee72b685516835d8317d382865eb | badcfb8b3f10186b9042fb798950a1c2d4bc0235 | refs/heads/master | 2020-06-10T06:08:38.716528 | 2019-06-25T00:48:19 | 2019-06-25T00:48:19 | 193,604,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | #!"C:\Users\Big data\PycharmProjects\webPage1\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"[email protected]"
]
| |
d6a7ea4be0c3d75a3b28ac3ff8c9b5e269691de4 | b8ce3e24a63a776e59d46986fecd8de291e2ec58 | /DGTSQD/urls.py | 5cd17297b746411e488204f214ea6116290d1b7e | []
| no_license | SathyaprakashraiS/DGTSQD-DJANGO | a9303cd6f538fb8326e73b0284303583636f63ea | c020848b824a206419b308b29e6ead4dfdc9f814 | refs/heads/main | 2023-07-13T12:10:28.313059 | 2021-08-30T18:13:22 | 2021-08-30T18:13:22 | 319,416,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,042 | py | """DGTSQD URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from main.views import comment
from django.conf.urls.static import static
from main.views import newspost
from main.views import eventpost
from main.views import profile
from main.views import addachievements
from main.views import DispProfile
import os
from main.views import viewprofile
from main.views import viewprofile
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('allauth.urls')),
path('',include("main.urls")),
path('basic/',include("main.urls")),
path('comment/',include("main.urls")),
path('',include("django.contrib.auth.urls")),
path('register/',include("main.urls")),
path('profile/',include("main.urls")),
path('',include("django.contrib.auth.urls")),
path('addachievements/',include("main.urls")),
path('viewprofile/',include("main.urls")),
path('dispprofile/',include("main.urls")),
path('viewprofile/<tager>/',include("main.urls")),
]
urlpatterns = urlpatterns + static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
#if settings.DEBUG:
# urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
# urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT + os.path.altsep) | [
"[email protected]"
]
| |
10f7c480f2212599841736bdfdf28fe0de43ab30 | 9183f1bc6a78c04552c4fbcf095415ed1603cc8b | /plants.py | e27d021cefa3c05b201c14a36e1417aa1412fe58 | []
| no_license | Alexander4127/plant-searcher-app | 4c942a8d0533d55ffac68fe0f897448db2b55545 | d120292a6ed753c9f848375ec139e91d0b70f8d5 | refs/heads/main | 2023-09-05T14:36:26.613002 | 2021-10-25T16:14:35 | 2021-10-25T16:14:35 | 385,708,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,297 | py | import requests
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
class PlantFinder:
def __init__(self):
self._categories = []
self._spec_desc = []
self._data = pd.DataFrame()
self._pests = pd.DataFrame()
self._all_colours = set()
self.collect_info()
self.find_colours()
self.get_pests()
def collect_info(self):
number_pages = 29
plant_refs = []
for cur_number in range(1, number_pages):
url = f'http://www.pro-landshaft.ru/plants/catalog/{cur_number}/'
soup = BeautifulSoup(requests.get(url).content, 'html.parser')
for tag in soup.find_all('li', soup.body.strong.parent.parent.parent.ul)[3:][:-14]:
plant_refs.append(tag.a['href'])
url = f'http://www.pro-landshaft.ru/plants/catalog/1/'
soup = BeautifulSoup(requests.get(url).content, 'html.parser')
cat = soup.find_all('ul', soup.body.strong.parent.parent.parent)[1]
self._categories = [tag.text.strip() for tag in soup.find_all('li', cat)[-14:]]
photos = []
common_desc = []
plant_cat = []
pages_refs = []
for ref in plant_refs[:10]:
url = f'http://www.pro-landshaft.ru{ref}'
soup = BeautifulSoup(requests.get(url).content, 'html.parser')
info = soup.body.find_all('p')
cur_cat = [tag.text.strip() for tag in info[1].find_all('a')]
first_type = 0
cur_photo = ''
while not info[first_type].text.startswith('Описание'):
if info[first_type].img and not cur_photo:
cur_photo = 'http://www.pro-landshaft.ru{}'.format(info[first_type].img['src'])
first_type += 1
if first_type == len(info):
first_type = 5
break
common_info = info[first_type].text.strip()[8:]
first_type += 1
if not common_info:
common_info = info[first_type].text.strip()
if info[first_type].img and not cur_photo:
cur_photo = 'http://www.pro-landshaft.ru{}'.format(info[first_type].img['src'].replace(' ', '%20'))
first_type += 1
if not common_info:
common_info = info[first_type].text.strip()
for cur_type in range(first_type, len(info)):
if info[first_type].img and not cur_photo:
cur_photo = 'http://www.pro-landshaft.ru{}'.format(info[first_type].img['src'].replace(' ', '%20'))
if info[cur_type].strong:
if info[cur_type].font or not info[cur_type].text.strip():
break
plant_cat.append([int(cat in cur_cat) for cat in self._categories])
photos.append(cur_photo)
common_desc.append(common_info)
pages_refs.append(url)
self._spec_desc.append(info[cur_type].text.strip())
names = [' '.join(string.split()[:2]).strip(',').strip(' –') for string in self._spec_desc]
df1 = pd.DataFrame(
{
'Name': names,
'General Description': common_desc,
'Special Description': self._spec_desc,
'Photo': photos,
'Link Page': pages_refs
}
)
df2 = pd.DataFrame(np.array(plant_cat), columns=self._categories)
self._data = pd.concat([df1, df2], axis=1)
def find_colours(self):
url = 'https://colorscheme.ru/color-names.html'
soup = BeautifulSoup(requests.get(url).content, 'html.parser')
colours = set()
for tag in soup.find_all('td'):
if tag.text.strip():
word = tag.text.strip().split()[-1]
if 'а' < word[0] < 'я' or 'А' < word[0] < 'Я':
colours.add(word)
colours = list(colours)
for i in range(len(colours)):
colours[i] = colours[i].lower()
if '-' in colours[i]:
colours[i] = colours[i][colours[i].rfind('-') + 1:]
if colours[i].endswith('ый') or colours[i].endswith('ий'):
self._all_colours.add(colours[i][:-2])
colours_exist = [''] * len(self._spec_desc)
for i in range(len(self._spec_desc)):
string = self._spec_desc[i]
for colour in self._all_colours:
if colour in string:
colours_exist[i] += colour + ' '
self._data = pd.concat([self._data, pd.DataFrame({'Colours': colours_exist})], axis=1)
def get_pests(self):
photos = []
links = []
names = []
info = []
for j in range(1, 7):
url = f'http://www.udec.ru/vrediteli/page/{j}'
soup = BeautifulSoup(requests.get(url).content, 'html.parser')
result = [child for child in soup.find('h1').parent.children][3].find_all('div')
for k in range(0, len(result), 2):
cur_pest = result[k]
finded_tags = cur_pest.find_all('a')
if len(finded_tags) < 2 or not self.check_pest(finded_tags[1].text):
continue
if 'belyanka' in finded_tags[0]['href']:
continue
photos.append(finded_tags[0].img['src'])
links.append(finded_tags[0]['href'])
names.append(finded_tags[1].text.strip())
classes = BeautifulSoup(requests.get(links[-1]).content, 'html.parser').find_all('p')
for i in range(len(classes)):
if self.check_obj(classes[i]) and not self.check_obj(classes[i + 1]):
all_info = ''
counter = i + 1
while counter < len(classes) and not \
((not classes[counter].strong and classes[counter].text.strip().startswith('Меры')) or
(classes[counter].strong and classes[counter].strong.text.strip().startswith('Меры'))):
all_info += classes[counter].text.strip()
counter += 1
info.append(all_info)
break
self._pests = pd.DataFrame(
{
'Name': names,
'Info': info,
'Photo': photos,
'Link': links
}
)
def __call__(self, plant_types, plant_colour, plant_name):
plant_name = plant_name.lower()
if plant_name:
indexes = self._data.apply(lambda row: plant_name in row['Name'].lower(), axis=1)
else:
indexes = self._data.apply(lambda row: self.match_query(row, plant_types, plant_colour), axis=1)
if self._data[indexes].empty:
return None
result = self._data[indexes].sample(1)
form_data = {
"res_plant_name": result["Name"].values[0],
"general_desc": result["General Description"].values[0],
"spec_desc": result["Special Description"].values[0],
"photo_ref": result["Photo"].values[0],
"page_ref": result["Link Page"].values[0]
}
name = result['Name'].values[0]
key_word = name.split()[0][:-1].lower()
indexes = self._pests.apply(lambda row: key_word in row['Info'].lower(), axis=1)
if not self._pests[indexes].empty:
pest = self._pests[indexes].sample(1)
form_data['pest_name'] = pest['Name'].values[0]
form_data['pest_info'] = pest['Info'].values[0]
form_data['pest_photo'] = pest['Photo'].values[0]
form_data['pest_link'] = pest['Link Page'].values[0]
else:
form_data['pest_name'] = 'nothing'
return form_data
@staticmethod
def check_pest(string):
for letter in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
if letter in string:
return True
return False
@staticmethod
def good_start(string):
for start in ['Семья', 'Семейство', 'Ряд']:
if string.startswith(start):
return True
return False
def check_obj(self, obj):
return (not obj.strong and self.good_start(obj.text.strip())) or \
(obj.strong and self.good_start(obj.strong.text.strip()))
@staticmethod
def match_query(row, cur_types, cur_colour):
for cur_type in cur_types:
if not row[cur_type]:
return False
return cur_colour[:-2] in row['Colours']
class RandomWeedInfo:
def __init__(self):
self._weeds = pd.DataFrame()
self.get_weeds()
@staticmethod
def check_weed(string):
for letter in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
if letter in string:
return True
return False
def get_weeds(self):
photo = []
link = []
info = []
name = []
for k in range(1, 4):
url = f'http://www.udec.ru/sornyaki/page/{k}'
soup = BeautifulSoup(requests.get(url).content, 'html.parser')
result = soup.body.find('h1').parent.div.find_all('div')
for i in range(1, len(result), 2):
found_tags = result[i].find_all('a')
if len(found_tags) < 2 or not self.check_weed(found_tags[1].text):
continue
photo.append(found_tags[0].img['src'])
link.append(found_tags[0]['href'])
name.append(found_tags[1].text.strip())
classes = BeautifulSoup(requests.get(link[-1]).content, 'html.parser').find_all('p')[3:][:-1]
all_info = ''
for cur_class in classes:
all_info += cur_class.text.strip() + '\n'
info.append(all_info)
self._weeds = pd.DataFrame(
{
'Name': name,
'Info': info,
'Photo': photo,
'Link': link
}
)
def __call__(self):
return self._weeds.sample(1)
| [
"[email protected]"
]
| |
28d3b44b77faf8fb27a807ede09802bfc537eb6d | b788c477ea949e5ca17a5b3d9c24bdd07c6f7a7a | /custommixins/settings.py | b3a98dfb16d7d9390d9b93b31faa5a85f53d10d5 | []
| no_license | xettrisomeman/MixinUpdated | c756723fca9469f8a78bd9b70dcdfa1badaab8c8 | eabb08051ab3dcf1fca41d6efd8e728b6d8f8877 | refs/heads/master | 2020-06-26T09:16:48.097258 | 2019-07-31T07:55:44 | 2019-07-31T07:55:44 | 199,593,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,916 | py |
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w5va1m=vo!2a-+!9own9l&3rnhce^3e_=7hfbq=$d!tqo+x!0_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'djangomixin.apps.DjangomixinConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#rest_framework
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'custommixins.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR , 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'custommixins.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
]
| |
a385b95268ee064344dcc0651454f2a1c24477fe | b73d6665b42ab26f2fcb0a8c4d166f7379d6bf77 | /categories/migrations/0002_auto_20191206_0812.py | 0a673deacd314dc9e4cd470fe0bdb7fd9868c7f4 | []
| no_license | JeppeAug/chavolve-project | 8ba2377d569160c27dfbc4338f2e3dea0e8c9ef4 | 6e20c021ae1b28c3a4fc3138b2f5a64dfb400dde | refs/heads/master | 2020-09-26T12:12:38.129267 | 2019-12-06T12:37:00 | 2019-12-06T12:37:00 | 226,252,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | # Generated by Django 3.0 on 2019-12-06 07:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('categories', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='category',
name='description',
field=models.CharField(default='0000000', max_length=200),
),
migrations.AddField(
model_name='category',
name='headline',
field=models.CharField(default='0000000', max_length=50),
),
migrations.AddField(
model_name='category',
name='image',
field=models.ImageField(default='download.jpg', upload_to='images/'),
),
]
| [
"[email protected]"
]
| |
4c838e38957b8e229ba09084ca17679920d4a87a | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/XYPLEX-LAT1-MIB.py | 9115a68e1bcae47973be45f12f9172c3ebd1ff1c | [
"Apache-2.0"
]
| permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 19,721 | py | #
# PySNMP MIB module XYPLEX-LAT1-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/XYPLEX-LAT1-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:40:05 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibIdentifier, IpAddress, Counter32, enterprises, NotificationType, Integer32, Bits, iso, Counter64, Gauge32, TimeTicks, Unsigned32, ModuleIdentity, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "IpAddress", "Counter32", "enterprises", "NotificationType", "Integer32", "Bits", "iso", "Counter64", "Gauge32", "TimeTicks", "Unsigned32", "ModuleIdentity", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
xyplex = MibIdentifier((1, 3, 6, 1, 4, 1, 33))
lat = MibIdentifier((1, 3, 6, 1, 4, 1, 33, 3))
latAnnounceServices = MibScalar((1, 3, 6, 1, 4, 1, 33, 3, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: latAnnounceServices.setStatus('mandatory')
latCircuitTimer = MibScalar((1, 3, 6, 1, 4, 1, 33, 3, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(30, 200))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: latCircuitTimer.setStatus('mandatory')
latIdentificationLengthLimit = MibScalar((1, 3, 6, 1, 4, 1, 33, 3, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 63))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: latIdentificationLengthLimit.setStatus('mandatory')
latKeepaliveTimer = MibScalar((1, 3, 6, 1, 4, 1, 33, 3, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(10, 180))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: latKeepaliveTimer.setStatus('mandatory')
latMulticastTimer = MibScalar((1, 3, 6, 1, 4, 1, 33, 3, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(10, 180))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: latMulticastTimer.setStatus('mandatory')
latNodeLimit = MibScalar((1, 3, 6, 1, 4, 1, 33, 3, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: latNodeLimit.setStatus('mandatory')
latNumber = MibScalar((1, 3, 6, 1, 4, 1, 33, 3, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 32767))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: latNumber.setStatus('mandatory')
latRetransmitLimit = MibScalar((1, 3, 6, 1, 4, 1, 33, 3, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(4, 120))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: latRetransmitLimit.setStatus('mandatory')
latLocalServiceGroups = MibScalar((1, 3, 6, 1, 4, 1, 33, 3, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(32, 32)).setFixedLength(32)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: latLocalServiceGroups.setStatus('mandatory')
latGroupPurge = MibScalar((1, 3, 6, 1, 4, 1, 33, 3, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: latGroupPurge.setStatus('mandatory')
latNodePurge = MibScalar((1, 3, 6, 1, 4, 1, 33, 3, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: latNodePurge.setStatus('mandatory')
latNodesRejected = MibScalar((1, 3, 6, 1, 4, 1, 33, 3, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latNodesRejected.setStatus('mandatory')
latInMessages = MibScalar((1, 3, 6, 1, 4, 1, 33, 3, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latInMessages.setStatus('mandatory')
latOutMessages = MibScalar((1, 3, 6, 1, 4, 1, 33, 3, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latOutMessages.setStatus('mandatory')
latInSessionsAccepted = MibScalar((1, 3, 6, 1, 4, 1, 33, 3, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latInSessionsAccepted.setStatus('mandatory')
latInSessionsRejected = MibScalar((1, 3, 6, 1, 4, 1, 33, 3, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latInSessionsRejected.setStatus('mandatory')
latAddressChange = MibScalar((1, 3, 6, 1, 4, 1, 33, 3, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latAddressChange.setStatus('mandatory')
latInDuplicates = MibScalar((1, 3, 6, 1, 4, 1, 33, 3, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latInDuplicates.setStatus('mandatory')
latOutRetransmits = MibScalar((1, 3, 6, 1, 4, 1, 33, 3, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latOutRetransmits.setStatus('mandatory')
latInBadMessages = MibScalar((1, 3, 6, 1, 4, 1, 33, 3, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latInBadMessages.setStatus('mandatory')
latInBadSlots = MibScalar((1, 3, 6, 1, 4, 1, 33, 3, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latInBadSlots.setStatus('mandatory')
latInBadMulticasts = MibScalar((1, 3, 6, 1, 4, 1, 33, 3, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latInBadMulticasts.setStatus('mandatory')
latPortTable = MibTable((1, 3, 6, 1, 4, 1, 33, 3, 23), )
if mibBuilder.loadTexts: latPortTable.setStatus('mandatory')
latPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 33, 3, 23, 1), ).setIndexNames((0, "XYPLEX-LAT1-MIB", "latPortIndex"))
if mibBuilder.loadTexts: latPortEntry.setStatus('mandatory')
latPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 23, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latPortIndex.setStatus('mandatory')
latPortAuthorizedGroups = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 23, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(32, 32)).setFixedLength(32)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: latPortAuthorizedGroups.setStatus('mandatory')
latPortAutoPrompt = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 23, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: latPortAutoPrompt.setStatus('mandatory')
latPortCurrentGroups = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 23, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(32, 32)).setFixedLength(32)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: latPortCurrentGroups.setStatus('mandatory')
latPortRemoteModification = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 23, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: latPortRemoteModification.setStatus('mandatory')
latOfferedServiceTable = MibTable((1, 3, 6, 1, 4, 1, 33, 3, 24), )
if mibBuilder.loadTexts: latOfferedServiceTable.setStatus('mandatory')
latOfferedServiceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 33, 3, 24, 1), ).setIndexNames((0, "XYPLEX-LAT1-MIB", "latOfferedServiceName"))
if mibBuilder.loadTexts: latOfferedServiceEntry.setStatus('mandatory')
latOfferedServiceName = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 24, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 16))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: latOfferedServiceName.setStatus('mandatory')
latOfferedServiceStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 24, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("invalid", 1), ("valid", 2))).clone('valid')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: latOfferedServiceStatus.setStatus('mandatory')
latOfferedServiceAllowConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 24, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: latOfferedServiceAllowConnections.setStatus('mandatory')
latOfferedServiceIdentification = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 24, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 63))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: latOfferedServiceIdentification.setStatus('mandatory')
latOfferedServicePassword = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 24, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 16))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: latOfferedServicePassword.setStatus('mandatory')
latOfferedServicePortMap = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 24, 1, 6), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: latOfferedServicePortMap.setStatus('mandatory')
latOfferedServiceQueuing = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 24, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: latOfferedServiceQueuing.setStatus('mandatory')
latVisibleServiceTable = MibTable((1, 3, 6, 1, 4, 1, 33, 3, 25), )
if mibBuilder.loadTexts: latVisibleServiceTable.setStatus('mandatory')
latVisibleServiceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 33, 3, 25, 1), ).setIndexNames((0, "XYPLEX-LAT1-MIB", "latVisibleServiceName"))
if mibBuilder.loadTexts: latVisibleServiceEntry.setStatus('mandatory')
latVisibleServiceName = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 25, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: latVisibleServiceName.setStatus('mandatory')
latVisibleServiceStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 25, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("available", 1), ("unavailable", 2), ("unknown", 3), ("unreachable", 4), ("reachable", 5), ("connected", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: latVisibleServiceStatus.setStatus('mandatory')
latVisibleServiceNode = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 25, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: latVisibleServiceNode.setStatus('mandatory')
latVisibleServiceConnectedSessions = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 25, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latVisibleServiceConnectedSessions.setStatus('mandatory')
latVisibleServiceIdentification = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 25, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 63))).setMaxAccess("readonly")
if mibBuilder.loadTexts: latVisibleServiceIdentification.setStatus('mandatory')
latVisibleServiceRating = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 25, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latVisibleServiceRating.setStatus('mandatory')
latNodeTable = MibTable((1, 3, 6, 1, 4, 1, 33, 3, 26), )
if mibBuilder.loadTexts: latNodeTable.setStatus('mandatory')
latNodeEntry = MibTableRow((1, 3, 6, 1, 4, 1, 33, 3, 26, 1), ).setIndexNames((0, "XYPLEX-LAT1-MIB", "latNodeName"))
if mibBuilder.loadTexts: latNodeEntry.setStatus('mandatory')
latNodeName = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 26, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: latNodeName.setStatus('mandatory')
latNodeStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 26, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("available", 1), ("unavailable", 2), ("unknown", 3), ("unreachable", 4), ("reachable", 5), ("connected", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: latNodeStatus.setStatus('mandatory')
latNodeConnectedSessions = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 26, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latNodeConnectedSessions.setStatus('mandatory')
latNodeAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 26, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(6, 6)).setFixedLength(6)).setMaxAccess("readonly")
if mibBuilder.loadTexts: latNodeAddress.setStatus('mandatory')
latNodeDataLinkFrame = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 26, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latNodeDataLinkFrame.setStatus('mandatory')
latNodeIdentification = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 26, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 63))).setMaxAccess("readonly")
if mibBuilder.loadTexts: latNodeIdentification.setStatus('mandatory')
latNodeGroups = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 26, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(32, 32)).setFixedLength(32)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: latNodeGroups.setStatus('mandatory')
latNodeServiceNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 26, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latNodeServiceNumber.setStatus('mandatory')
latNodeZero = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 26, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ready", 1), ("execute", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: latNodeZero.setStatus('mandatory')
latNodeZeroTime = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 26, 1, 10), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latNodeZeroTime.setStatus('mandatory')
latNodeInMessages = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 26, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latNodeInMessages.setStatus('mandatory')
latNodeOutMessages = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 26, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latNodeOutMessages.setStatus('mandatory')
latNodeInSlots = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 26, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latNodeInSlots.setStatus('mandatory')
latNodeOutSlots = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 26, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latNodeOutSlots.setStatus('mandatory')
latNodeInBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 26, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latNodeInBytes.setStatus('mandatory')
latNodeOutBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 26, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latNodeOutBytes.setStatus('mandatory')
latNodeAddressChange = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 26, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latNodeAddressChange.setStatus('mandatory')
latNodeInDuplicates = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 26, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latNodeInDuplicates.setStatus('mandatory')
latNodeOutRetransmits = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 26, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latNodeOutRetransmits.setStatus('mandatory')
latNodeInBadMessages = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 26, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latNodeInBadMessages.setStatus('mandatory')
latNodeInBadSlots = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 26, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latNodeInBadSlots.setStatus('mandatory')
latNodeInSessionsAccepted = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 26, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latNodeInSessionsAccepted.setStatus('mandatory')
latNodeInSessionsRejected = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 3, 26, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: latNodeInSessionsRejected.setStatus('mandatory')
mibBuilder.exportSymbols("XYPLEX-LAT1-MIB", latNodeLimit=latNodeLimit, latOfferedServiceStatus=latOfferedServiceStatus, latInBadSlots=latInBadSlots, latOfferedServiceIdentification=latOfferedServiceIdentification, latMulticastTimer=latMulticastTimer, latOfferedServiceEntry=latOfferedServiceEntry, latVisibleServiceTable=latVisibleServiceTable, latNodeStatus=latNodeStatus, xyplex=xyplex, latOfferedServiceQueuing=latOfferedServiceQueuing, latVisibleServiceConnectedSessions=latVisibleServiceConnectedSessions, latPortCurrentGroups=latPortCurrentGroups, latOutMessages=latOutMessages, latNodeInBytes=latNodeInBytes, latVisibleServiceRating=latVisibleServiceRating, latInBadMessages=latInBadMessages, lat=lat, latGroupPurge=latGroupPurge, latNodeZero=latNodeZero, latKeepaliveTimer=latKeepaliveTimer, latInMessages=latInMessages, latInSessionsRejected=latInSessionsRejected, latNodeAddressChange=latNodeAddressChange, latCircuitTimer=latCircuitTimer, latNodeOutRetransmits=latNodeOutRetransmits, latRetransmitLimit=latRetransmitLimit, latOfferedServiceAllowConnections=latOfferedServiceAllowConnections, latOfferedServicePortMap=latOfferedServicePortMap, latVisibleServiceNode=latVisibleServiceNode, latAnnounceServices=latAnnounceServices, latNodeZeroTime=latNodeZeroTime, latNodeDataLinkFrame=latNodeDataLinkFrame, latNodeTable=latNodeTable, latVisibleServiceStatus=latVisibleServiceStatus, latNodeConnectedSessions=latNodeConnectedSessions, latNodeInSessionsRejected=latNodeInSessionsRejected, latNodeInBadSlots=latNodeInBadSlots, latOfferedServiceName=latOfferedServiceName, latNodeEntry=latNodeEntry, latNodeOutSlots=latNodeOutSlots, latInSessionsAccepted=latInSessionsAccepted, latVisibleServiceName=latVisibleServiceName, latNodePurge=latNodePurge, latNodeOutMessages=latNodeOutMessages, latOfferedServiceTable=latOfferedServiceTable, latInBadMulticasts=latInBadMulticasts, latNodeInMessages=latNodeInMessages, latNodeInSlots=latNodeInSlots, latPortTable=latPortTable, latOfferedServicePassword=latOfferedServicePassword, latNodeGroups=latNodeGroups, latPortAutoPrompt=latPortAutoPrompt, latLocalServiceGroups=latLocalServiceGroups, latNodeServiceNumber=latNodeServiceNumber, latPortEntry=latPortEntry, latPortRemoteModification=latPortRemoteModification, latIdentificationLengthLimit=latIdentificationLengthLimit, latNumber=latNumber, latVisibleServiceIdentification=latVisibleServiceIdentification, latNodeIdentification=latNodeIdentification, latNodeOutBytes=latNodeOutBytes, latNodeInBadMessages=latNodeInBadMessages, latInDuplicates=latInDuplicates, latNodeName=latNodeName, latVisibleServiceEntry=latVisibleServiceEntry, latAddressChange=latAddressChange, latPortIndex=latPortIndex, latNodeAddress=latNodeAddress, latNodeInDuplicates=latNodeInDuplicates, latNodeInSessionsAccepted=latNodeInSessionsAccepted, latPortAuthorizedGroups=latPortAuthorizedGroups, latNodesRejected=latNodesRejected, latOutRetransmits=latOutRetransmits)
| [
"[email protected]"
]
| |
bb30f670495e905e6715106618fe9e77f8760979 | fe9fb8292ae68eebe0ed863edb103075029d065c | /Transcriptomics/transcriptomics_1.py | 8ef391cc15cffdbbb98ef4ea48ea3a8752a9f704 | []
| no_license | francescajean/PFB-problemsets | 8aef06b3137e9d8008998a613f92d144d867efea | c521d1fb2d46d47952dad41cbabeb0079c77c721 | refs/heads/master | 2020-04-01T11:29:32.479869 | 2018-10-27T15:19:23 | 2018-10-27T15:19:23 | 153,164,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 947 | py | f#!/usr/bin/env python3
import sys
import re
#import pysam
from Bio import SeqIO
kmer_size = sys.argv[1]
input_file = sys.argv[2]
top_kmer_number = sys.argv[3]
kmer_dict = {}
list_sequences = []
for seq_record in SeqIO.parse(input_file,'fastq'):
sequences = str(seq_record.seq)
seq_strip = sequences.rstrip()
list_sequences.append(seq_strip)
#print(list_sequences)
#print(len(seq_strip))
for sequence in list_sequences:
sequence = list(sequence)
for position in range(0,len(sequence)):
position_end = position+8
if position_end <= len(sequence):
kmer_list = sequence[position:position_end]
kmer_str = ''.join(kmer_list)
if kmer_str not in kmer_dict.keys():
kmer_dict[kmer_str] = 1
else:
kmer_dict[kmer_str] += 1
#print(kmer_dict)
kmer_sorted = sorted(kmer_dict, key=kmer_dict.get, reverse=True)
top_kmer = kmer_sorted[0:10]
for kmer in top_kmer:
print(kmer,'\t',kmer_dict[kmer])
#print(kmer_sorted)
| [
"[email protected]"
]
| |
31ce60f8f2d8671df33b9aecdd9ac9385f959a49 | 2949466be9b2761a8d8945938b8ed5be8bdc2fa7 | /第9回/a.py | 7916cc33209d0fd0e2132f3a809af4b36e2122e0 | []
| no_license | greedtn/EEIC-Algorithms2021 | ab1087977e45fb6d386bff9db8ae4984363b203c | d38235f776ad51fac93be5a7972a68299a7e0706 | refs/heads/main | 2023-06-12T09:48:16.287802 | 2021-07-12T12:49:09 | 2021-07-12T12:49:09 | 357,838,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | import sys
import resource
sys.setrecursionlimit(1000000)
resource.setrlimit(resource.RLIMIT_STACK, (-1, -1))
N, M, S, T = map(int, input().split())
S -= 1
T -= 1
edges = [[] for _ in range(N)]
for _ in range(M):
a, b = map(int, input().split())
edges[a-1].append(b-1)
edges[b-1].append(a-1)
done = [False] * N
done[S] = True
def dfs(edges, v):
done[v] = True
for next_v in edges[v]:
if (next_v == T):
print("Yes")
exit()
if (done[next_v]):
continue
dfs(edges, next_v)
dfs(edges, S)
print("No")
| [
"[email protected]"
]
| |
534fd9302ed0fb08767eb5f5e4e6d9586c986e98 | 95e1bc0785c88b77812872543db4d1ab9f3d126e | /model/utils.py | 27b3eafe28bd8a4cbd71cbe70312a6ad69629f48 | [
"Apache-2.0"
]
| permissive | renhongkai/lstm-crf | 7635a098cad3ba4c587b1f4bc704bdb5fcd41f96 | cb47d7d865152893a06fbb890a5cbaa9fb7d03c0 | refs/heads/master | 2021-05-06T12:47:30.929889 | 2017-12-06T00:14:58 | 2017-12-06T00:14:58 | 113,182,910 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,197 | py | """
.. module:: utils
:synopsis: utility tools
.. moduleauthor:: Liyuan Liu, Frank Xu
"""
import codecs
import csv
import itertools
from functools import reduce
import numpy as np
import shutil
import torch
import json
import torch.nn as nn
import torch.nn.init
from model.ner_dataset import *
zip = getattr(itertools, 'izip', zip)
def to_scalar(var):
"""change the first element of a tensor to scalar
"""
return var.view(-1).data.tolist()[0]
def argmax(vec):
"""helper function to calculate argmax of input vector at dimension 1
"""
_, idx = torch.max(vec, 1)
return to_scalar(idx)
def log_sum_exp(vec, m_size):
"""
calculate log of exp sum
args:
vec (batch_size, vanishing_dim, hidden_dim) : input tensor
m_size : hidden_dim
return:
batch_size, hidden_dim
"""
_, idx = torch.max(vec, 1) # B * 1 * M
max_score = torch.gather(vec, 1, idx.view(-1, 1, m_size)).view(-1, 1, m_size) # B * M
return max_score.view(-1, m_size) + torch.log(torch.sum(torch.exp(vec - max_score.expand_as(vec)), 1)).view(-1, m_size) # B * M
def switch(vec1, vec2, mask):
"""
switch function for pytorch
args:
vec1 (any size) : input tensor corresponding to 0
vec2 (same to vec1) : input tensor corresponding to 1
mask (same to vec1) : input tensor, each element equals to 0/1
return:
vec (*)
"""
catvec = torch.cat([vec1.view(-1, 1), vec2.view(-1, 1)], dim=1)
switched_vec = torch.gather(catvec, 1, mask.long().view(-1, 1))
return switched_vec.view(-1)
def encode2char_safe(input_lines, char_dict):
"""
get char representation of lines
args:
input_lines (list of strings) : input corpus
char_dict (dictionary) : char-level dictionary
return:
forw_lines
"""
unk = char_dict['<u>']
forw_lines = [list(map(lambda m: list(map(lambda t: char_dict.get(t, unk), m)), line)) for line in input_lines]
return forw_lines
def concatChar(input_lines, char_dict):
"""
concat char into string
args:
input_lines (list of list of char) : input corpus
char_dict (dictionary) : char-level dictionary
return:
forw_lines
"""
features = [[char_dict[' ']] + list(reduce(lambda x, y: x + [char_dict[' ']] + y, sentence)) + [char_dict['\n']] for sentence in input_lines]
return features
def encode_safe(input_lines, word_dict, unk):
"""
encode list of strings into word-level representation with unk
"""
lines = list(map(lambda t: list(map(lambda m: word_dict.get(m, unk), t)), input_lines))
return lines
def encode(input_lines, word_dict):
"""
encode list of strings into word-level representation
"""
lines = list(map(lambda t: list(map(lambda m: word_dict[m], t)), input_lines))
return lines
def encode2Tensor(input_lines, word_dict, unk):
"""
encode list of strings into word-level representation (tensor) with unk
"""
lines = list(map(lambda t: torch.LongTensor(list(map(lambda m: word_dict.get(m, unk), t))), input_lines))
return lines
def generate_corpus_char(lines, if_shrink_c_feature=False, c_thresholds=1, if_shrink_w_feature=False, w_thresholds=1):
"""
generate label, feature, word dictionary, char dictionary and label dictionary
args:
lines : corpus
if_shrink_c_feature: whether shrink char-dictionary
c_threshold: threshold for shrinking char-dictionary
if_shrink_w_feature: whether shrink word-dictionary
w_threshold: threshold for shrinking word-dictionary
"""
features, labels, feature_map, label_map = generate_corpus(lines, if_shrink_feature=if_shrink_w_feature, thresholds=w_thresholds)
char_count = dict()
for feature in features:
for word in feature:
for tup in word:
if tup not in char_count:
char_count[tup] = 0
else:
char_count[tup] += 1
if if_shrink_c_feature:
shrink_char_count = [k for (k, v) in iter(char_count.items()) if v >= c_thresholds]
char_map = {shrink_char_count[ind]: ind for ind in range(0, len(shrink_char_count))}
else:
char_map = {k: v for (v, k) in enumerate(char_count.keys())}
char_map['<u>'] = len(char_map) # unk for char
char_map[' '] = len(char_map) # concat for char
char_map['\n'] = len(char_map) # eof for char
return features, labels, feature_map, label_map, char_map
def shrink_features(feature_map, features, thresholds):
"""
filter un-common features by threshold
"""
# feature_count统计词(即特征)出现的次数
feature_count = {k: 0 for (k, v) in iter(feature_map.items())} #k : 0表示讲所有key对应的value值变为0
for feature_list in features:
for feature in feature_list:
feature_count[feature] += 1
shrinked_feature_count = [k for (k, v) in iter(feature_count.items()) if v >= thresholds]
feature_map = {shrinked_feature_count[ind]: (ind + 1) for ind in range(0, len(shrinked_feature_count))}
#inserting unk to be 0 encoded
feature_map['<unk>'] = 0
#inserting eof
feature_map['<eof>'] = len(feature_map)
return feature_map
def generate_corpus(lines, if_shrink_feature=False, thresholds=1):
# print("--------",if_shrink_feature)
"""
generate label, feature, word dictionary and label dictionary
args:
lines : corpus
if_shrink_feature: whether shrink word-dictionary
threshold: threshold for shrinking word-dictionary
"""
features = list()
labels = list()
tmp_fl = list()
tmp_ll = list()
feature_map = dict()
label_map = dict()
for line in lines:
if not (line.isspace() or (len(line) > 10 and line[0:10] == '-DOCSTART-')):
line = line.rstrip('\n').split()
tmp_fl.append(line[0])
if line[0] not in feature_map:
feature_map[line[0]] = len(feature_map) + 1 #0 is for unk
tmp_ll.append(line[-1])
if line[-1] not in label_map:
label_map[line[-1]] = len(label_map)
elif len(tmp_fl) > 0:
features.append(tmp_fl)
labels.append(tmp_ll)
tmp_fl = list()
tmp_ll = list()
if len(tmp_fl) > 0:
features.append(tmp_fl)
labels.append(tmp_ll)
label_map['<start>'] = len(label_map)
label_map['<pad>'] = len(label_map)
if if_shrink_feature:
feature_map = shrink_features(feature_map, features, thresholds) #当前传thresholds的是0,也就是不进行压缩,只是加上<unk>,<eof>
else:
#inserting unk to be 0 encoded
feature_map['<unk>'] = 0
#inserting eof
feature_map['<eof>'] = len(feature_map)
return features, labels, feature_map, label_map
def read_corpus(lines):
"""
convert corpus into features and labels(将语料转换为特征或标签)
"""
features = list()
labels = list()
tmp_fl = list()
tmp_ll = list()
for line in lines:
if not (line.isspace() or (len(line) > 10 and line[0:10] == '-DOCSTART-')):
line = line.rstrip('\n').split()
tmp_fl.append(line[0])
tmp_ll.append(line[-1])
elif len(tmp_fl) > 0:
features.append(tmp_fl)
labels.append(tmp_ll)
tmp_fl = list()
tmp_ll = list()
if len(tmp_fl) > 0:
features.append(tmp_fl)
labels.append(tmp_ll)
# features 对应的是句子,len(features)是句子数,label对应的是句子中每个字对应的标签,len(labels)是每个句子中字对应的标签数
return features, labels
def read_features(lines, multi_docs = True):
"""
convert un-annotated corpus into features
"""
if multi_docs:
documents = list()
features = list()
tmp_fl = list()
for line in lines:
if_doc_end = (len(line) > 10 and line[0:10] == '-DOCSTART-')
if not (line.isspace() or if_doc_end):
line = line.split()[0]
tmp_fl.append(line)
else:
if len(tmp_fl) > 0:
features.append(tmp_fl)
tmp_fl = list()
if if_doc_end and len(features) > 0:
documents.append(features)
features = list()
if len(tmp_fl) > 0:
features.append(tmp_fl)
if len(features) >0:
documents.append(features)
return documents
else:
features = list()
tmp_fl = list()
for line in lines:
if not (line.isspace() or (len(line) > 10 and line[0:10] == '-DOCSTART-')):
line = line.split()[0]
tmp_fl.append(line)
elif len(tmp_fl) > 0:
features.append(tmp_fl)
tmp_fl = list()
if len(tmp_fl) > 0:
features.append(tmp_fl)
return features
def shrink_embedding(feature_map, word_dict, word_embedding, caseless):
"""
shrink embedding dictionary to in-doc words only
"""
if caseless:
feature_map = set([k.lower() for k in feature_map.keys()])
new_word_list = [k for k in word_dict.keys() if (k in feature_map)]
new_word_dict = {k:v for (v, k) in enumerate(new_word_list)}
new_word_list_ind = torch.LongTensor([word_dict[k] for k in new_word_list])
new_embedding = word_embedding[new_word_list_ind]
return new_word_dict, new_embedding
def encode_corpus(lines, f_map, l_map, if_lower = False):
"""
encode corpus into features and labels
"""
tmp_fl = []
tmp_ll = []
features = []
labels = []
for line in lines:
if not (line.isspace() or (len(line) > 10 and line[0:10] == '-DOCSTART-')):
line = line.rstrip('\n').split()
tmp_fl.append(line[0])
tmp_ll.append(line[-1])
elif len(tmp_fl) > 0:
features.append(tmp_fl)
labels.append(tmp_ll)
tmp_fl = list()
tmp_ll = list()
if len(tmp_fl) > 0:
features.append(tmp_fl)
labels.append(tmp_ll)
if if_lower:
features = list(map(lambda t: list(map(lambda x: x.lower(), t)), features))
feature_e = encode_safe(features, f_map, f_map['<unk>'])
label_e = encode(labels, l_map)
return feature_e, label_e
def encode_corpus_c(lines, f_map, l_map, c_map):
"""
encode corpus into features (both word-level and char-level) and labels
"""
tmp_fl = []
tmp_ll = []
features = []
labels = []
for line in lines:
if not (line.isspace() or (len(line) > 10 and line[0:10] == '-DOCSTART-')):
line = line.rstrip('\n').split()
tmp_fl.append(line[0])
tmp_ll.append(line[-1])
elif len(tmp_fl) > 0:
features.append(tmp_fl)
labels.append(tmp_ll)
tmp_fl = list()
tmp_ll = list()
if len(tmp_fl) > 0:
features.append(tmp_fl)
labels.append(tmp_ll)
feature_c = encode2char_safe(features, c_map)
feature_e = encode_safe(features, f_map, f_map['<unk>'])
label_e = encode(labels, l_map)
return feature_c, feature_e, label_e
def load_embedding(emb_file, delimiter, feature_map, caseless, unk, shrink_to_train=False):
"""
load embedding
"""
if caseless:
feature_set = set([key.lower() for key in feature_map])
else:
feature_set = set([key for key in feature_map])
word_dict = dict()
embedding_array = list()
for line in open(emb_file, 'r'):
line = line.split(delimiter)
vector = list(map(lambda t: float(t), filter(lambda n: n and not n.isspace(), line[1:])))
if shrink_to_train and line[0] not in feature_set:
continue
if line[0] == unk:
word_dict['<unk>'] = len(word_dict)
else:
word_dict[line[0]] = len(word_dict)
embedding_array.append(vector)
embedding_tensor_1 = torch.FloatTensor(np.asarray(embedding_array))
emb_len = embedding_tensor_1.size(1)
rand_embedding_count = 0
for key in feature_map:
if caseless:
key = key.lower()
if key not in word_dict:
word_dict[key] = len(word_dict)
rand_embedding_count += 1
rand_embedding_tensor = torch.FloatTensor(rand_embedding_count, emb_len)
init_embedding(rand_embedding_tensor)
embedding_tensor = torch.cat((embedding_tensor_1, rand_embedding_tensor), 0)
return word_dict, embedding_tensor
def load_embedding_wlm(emb_file, delimiter, feature_map, full_feature_set, caseless, unk, emb_len, shrink_to_train=False, shrink_to_corpus=False):
"""
load embedding, indoc words would be listed before outdoc words
args:
emb_file: path to embedding file
delimiter: delimiter of lines
feature_map: word dictionary
full_feature_set: all words in the corpus
caseless: convert into casesless style
unk: string for unknown token
emb_len: dimension of embedding vectors
shrink_to_train: whether to shrink out-of-training set or not
shrink_to_corpus: whether to shrink out-of-corpus or not
"""
if caseless:
feature_set = set([key.lower() for key in feature_map])
full_feature_set = set([key.lower() for key in full_feature_set])
else:
feature_set = set([key for key in feature_map])
full_feature_set = set([key for key in full_feature_set])
#ensure <unk> is 0
word_dict = {v:(k+1) for (k,v) in enumerate(feature_set - set(['<unk>']))}
word_dict['<unk>'] = 0
in_doc_freq_num = len(word_dict)
rand_embedding_tensor = torch.FloatTensor(in_doc_freq_num, emb_len)
init_embedding(rand_embedding_tensor)
indoc_embedding_array = list()
# 遍历预训练的词向量,找到在预训练的词向量中,也在训练数据中的词放入indoc_word_array
indoc_word_array = list()
outdoc_embedding_array = list()
# 遍历预训练的词向量,找到在预训练的词向量中,不在训练数据中的词放入outdoc_word_array
outdoc_word_array = list()
for line in open(emb_file, 'r'):
line = line.split(delimiter)
vector = list(map(lambda t: float(t), filter(lambda n: n and not n.isspace(), line[1:])))
if shrink_to_train and line[0] not in feature_set:
continue
if line[0] == unk:
rand_embedding_tensor[0] = torch.FloatTensor(vector) #unk is 0
elif line[0] in word_dict:
rand_embedding_tensor[word_dict[line[0]]] = torch.FloatTensor(vector)
elif line[0] in full_feature_set:
indoc_embedding_array.append(vector)
indoc_word_array.append(line[0])
elif not shrink_to_corpus:
outdoc_word_array.append(line[0])
outdoc_embedding_array.append(vector)
embedding_tensor_0 = torch.FloatTensor(np.asarray(indoc_embedding_array))
if not shrink_to_corpus:
embedding_tensor_1 = torch.FloatTensor(np.asarray(outdoc_embedding_array))
word_emb_len = embedding_tensor_0.size(1) #mbedding_tensor_0.size()的输出是(3,100) size(1)表示取出第二个参数100
assert(word_emb_len == emb_len)
if shrink_to_corpus:
embedding_tensor = torch.cat([rand_embedding_tensor, embedding_tensor_0], 0)
else:
embedding_tensor = torch.cat([rand_embedding_tensor, embedding_tensor_0, embedding_tensor_1], 0)
print(embedding_tensor.size()) #torch.Size([9, 100]) 预训练的词向量中词的个数是7,增加了<eof>,<unk>
for word in indoc_word_array:
word_dict[word] = len(word_dict)
in_doc_num = len(word_dict)
if not shrink_to_corpus:
for word in outdoc_word_array:
word_dict[word] = len(word_dict)
return word_dict, embedding_tensor, in_doc_num
def calc_threshold_mean(features):
"""
calculate the threshold for bucket by mean
"""
lines_len = list(map(lambda t: len(t) + 1, features)) #给每句话的长度加1,因为对应的标签加了一个<start>
average = int(sum(lines_len) / len(lines_len)) #所有字的长度除以句子数,得到每个句子中包含的字数的平均值
lower_line = list(filter(lambda t: t < average, lines_len)) #低于平均值的句子
upper_line = list(filter(lambda t: t >= average, lines_len)) #高于平均值的句子
if len(lower_line) == 0:
lower_average = 0
else:
lower_average = int(sum(lower_line) / len(lower_line))
if len(upper_line) == 0:
upper_average = 0
else:
upper_average = int(sum(upper_line) / len(upper_line))
# lower_average = int(sum(lower_line) / len(lower_line))
# upper_average = int(sum(upper_line) / len(upper_line))
max_len = max(lines_len)
return [lower_average, average, upper_average, max_len]
def construct_bucket_mean_gd(input_features, input_label, word_dict, label_dict):
"""
Construct bucket by mean for greedy decode, word-level only
"""
# encode and padding
features = encode_safe(input_features, word_dict, word_dict['<unk>']) #找出训练数据中的词在预训练的词向量的词典中对应的下标,出现在训练集中但没有出现在预训练的词典中的用unk的下标0表示
labels = encode(input_label, label_dict)
labels = list(map(lambda t: [label_dict['<start>']] + list(t), labels))
thresholds = calc_threshold_mean(features)
return construct_bucket_gd(features, labels, thresholds, word_dict['<eof>'], label_dict['<pad>'])
def construct_bucket_mean_vb(input_features, input_label, word_dict, label_dict, caseless):
"""
Construct bucket by mean for viterbi decode, word-level only
"""
# encode and padding
if caseless:
input_features = list(map(lambda t: list(map(lambda x: x.lower(), t)), input_features))
features = encode_safe(input_features, word_dict, word_dict['<unk>']) #将出现在训练集中的字,没有出现在自向量中的字用unk表示
labels = encode(input_label, label_dict) #label_dict为训练集 验证集 测试集中出现的所有标签及下标 #将训练数据的标签用0 1 表示
labels = list(map(lambda t: [label_dict['<start>']] + list(t), labels)) #在每一个句子对应的标签开头加上<starat>对应的下标 lables表示找到训练集中每句话中每个字对应的标签在lable_dict中的下标
thresholds = calc_threshold_mean(features)
return construct_bucket_vb(features, labels, thresholds, word_dict['<eof>'], label_dict['<pad>'], len(label_dict))
def construct_bucket_mean_vb_wc(word_features, input_label, label_dict, char_dict, word_dict, caseless):
"""
Construct bucket by mean for viterbi decode, word-level and char-level
"""
# encode and padding
char_features = encode2char_safe(word_features, char_dict)
fea_len = [list(map(lambda t: len(t) + 1, f)) for f in char_features]
forw_features = concatChar(char_features, char_dict)
labels = encode(input_label, label_dict)
labels = list(map(lambda t: [label_dict['<start>']] + list(t), labels))
thresholds = calc_threshold_mean(fea_len)
if caseless:
word_features = list(map(lambda t: list(map(lambda x: x.lower(), t)), word_features))
word_features = encode_safe(word_features, word_dict, word_dict['<unk>'])
return construct_bucket_vb_wc(word_features, forw_features, fea_len, labels, thresholds, word_dict['<eof>'], char_dict['\n'], label_dict['<pad>'], len(label_dict))
def construct_bucket_vb_wc(word_features, forw_features, fea_len, input_labels, thresholds, pad_word_feature, pad_char_feature, pad_label, label_size):
"""
Construct bucket by thresholds for viterbi decode, word-level and char-level
"""
# construct corpus for language model pre-training
forw_corpus = [pad_char_feature] + list(reduce(lambda x, y: x + [pad_char_feature] + y, forw_features)) + [pad_char_feature]
back_corpus = forw_corpus[::-1]
# two way construct, first build the bucket, then calculate padding length, then do the padding
buckets = [[[], [], [], [], [], [], [], []] for ind in range(len(thresholds))]
# forw, forw_ind, back, back_in, label, mask
buckets_len = [0 for ind in range(len(thresholds))]
# thresholds is the padded length for fea
# buckets_len is the padded length for char
for f_f, f_l in zip(forw_features, fea_len):
cur_len_1 = len(f_l) + 1
idx = 0
while thresholds[idx] < cur_len_1:
idx += 1
tmp_concat_len = len(f_f) + thresholds[idx] - len(f_l)
if buckets_len[idx] < tmp_concat_len:
buckets_len[idx] = tmp_concat_len
# calc padding
for f_f, f_l, w_f, i_l in zip(forw_features, fea_len, word_features, input_labels):
cur_len = len(f_l)
idx = 0
cur_len_1 = cur_len + 1
while thresholds[idx] < cur_len_1:
idx += 1
padded_feature = f_f + [pad_char_feature] * (buckets_len[idx] - len(f_f)) # pad feature with <'\n'>, at least one
padded_feature_len = f_l + [1] * (thresholds[idx] - len(f_l)) # pad feature length with <'\n'>, at least one
padded_feature_len_cum = list(itertools.accumulate(padded_feature_len)) # start from 0, but the first is ' ', so the position need not to be -1
buckets[idx][0].append(padded_feature) # char
buckets[idx][1].append(padded_feature_len_cum)
buckets[idx][2].append(padded_feature[::-1])
buckets[idx][3].append([buckets_len[idx] - 1] + [buckets_len[idx] - 1 - tup for tup in padded_feature_len_cum[:-1]])
buckets[idx][4].append(w_f + [pad_word_feature] * (thresholds[idx] - cur_len)) #word
buckets[idx][5].append([i_l[ind] * label_size + i_l[ind + 1] for ind in range(0, cur_len)] + [i_l[cur_len] * label_size + pad_label] + [pad_label * label_size + pad_label] * (thresholds[idx] - cur_len_1)) # has additional start, label
buckets[idx][6].append([1] * cur_len_1 + [0] * (thresholds[idx] - cur_len_1)) # has additional start, mask
buckets[idx][7].append([len(f_f) + thresholds[idx] - len(f_l), cur_len_1])
bucket_dataset = [CRFDataset_WC(torch.LongTensor(bucket[0]), torch.LongTensor(bucket[1]),
torch.LongTensor(bucket[2]), torch.LongTensor(bucket[3]),
torch.LongTensor(bucket[4]), torch.LongTensor(bucket[5]),
torch.ByteTensor(bucket[6]), torch.LongTensor(bucket[7])) for bucket in buckets]
return bucket_dataset, forw_corpus, back_corpus
def construct_bucket_vb(input_features, input_labels, thresholds, pad_feature, pad_label, label_size):
"""
Construct bucket by thresholds for viterbi decode, word-level only
"""
buckets = [[[], [], []] for _ in range(len(thresholds))]
for feature, label in zip(input_features, input_labels):
cur_len = len(feature)
idx = 0
cur_len_1 = cur_len + 1
while thresholds[idx] < cur_len_1:
idx += 1
buckets[idx][0].append(feature + [pad_feature] * (thresholds[idx] - cur_len))
buckets[idx][1].append([label[ind] * label_size + label[ind + 1] for ind in range(0, cur_len)] +
[label[cur_len] * label_size + pad_label] +
[pad_label * label_size + pad_label] * (thresholds[idx] - cur_len_1))
buckets[idx][2].append([1] * cur_len_1 + [0] * (thresholds[idx] - cur_len_1))
bucket_dataset = [CRFDataset(torch.LongTensor(bucket[0]), torch.LongTensor(bucket[1]), torch.ByteTensor(bucket[2])) for bucket in buckets]
return bucket_dataset
def construct_bucket_gd(input_features, input_labels, thresholds, pad_feature, pad_label):
"""
Construct bucket by thresholds for greedy decode, word-level only
"""
buckets = [[[], [], []] for ind in range(len(thresholds))]
for feature, label in zip(input_features, input_labels):
cur_len = len(feature)
cur_len_1 = cur_len + 1
idx = 0
while thresholds[idx] < cur_len_1:
idx += 1
buckets[idx][0].append(feature + [pad_feature] * (thresholds[idx] - cur_len))
buckets[idx][1].append(label[1:] + [pad_label] * (thresholds[idx] - cur_len))
buckets[idx][2].append(label + [pad_label] * (thresholds[idx] - cur_len_1))
bucket_dataset = [CRFDataset(torch.LongTensor(bucket[0]), torch.LongTensor(bucket[1]), torch.LongTensor(bucket[2])) for bucket in buckets]
return bucket_dataset
def find_length_from_feats(feats, feat_to_ix):
"""
find length of unpadded features based on feature
"""
end_position = len(feats) - 1
for position, feat in enumerate(feats):
if feat.data[0] == feat_to_ix['<eof>']:
end_position = position
break
return end_position + 1
def find_length_from_labels(labels, label_to_ix):
"""
find length of unpadded features based on labels
"""
end_position = len(labels) - 1
for position, label in enumerate(labels):
if label == label_to_ix['<pad>']:
end_position = position
break
return end_position
def revlut(lut):
return {v: k for k, v in lut.items()}
# Turn a sequence of IOB chunks into single tokens
def iob_to_spans(sequence, lut, strict_iob2=False):
"""
convert to iob to span
"""
iobtype = 2 if strict_iob2 else 1
chunks = []
current = None
for i, y in enumerate(sequence):
label = lut[y]
if label.startswith('B-'):
if current is not None:
chunks.append('@'.join(current))
current = [label.replace('B-', ''), '%d' % i]
elif label.startswith('I-'):
if current is not None:
base = label.replace('I-', '')
if base == current[0]:
current.append('%d' % i)
else:
chunks.append('@'.join(current))
if iobtype == 2:
print('Warning, type=IOB2, unexpected format ([%s] follows other tag type [%s] @ %d)' % (
label, current[0], i))
current = [base, '%d' % i]
else:
current = [label.replace('I-', ''), '%d' % i]
if iobtype == 2:
print('Warning, unexpected format (I before B @ %d) %s' % (i, label))
else:
if current is not None:
chunks.append('@'.join(current))
current = None
if current is not None:
chunks.append('@'.join(current))
return set(chunks)
# Turn a sequence of IOBES chunks into single tokens
def iobes_to_spans(sequence, lut, strict_iob2=False):
"""
convert to iobes to span
"""
iobtype = 2 if strict_iob2 else 1
chunks = []
current = None
for i, y in enumerate(sequence):
label = lut[y]
if label.startswith('B'):
if current is not None:
chunks.append('@'.join(current))
current = [label.replace('B', ''), '%d' % i]
if label.startswith('S-'):
if current is not None:
chunks.append('@'.join(current))
current = None
base = label.replace('S-', '')
chunks.append('@'.join([base, '%d' % i]))
elif label.startswith('I'):
if current is not None:
base = label.replace('I', '')
if base == current[0]:
current.append('%d' % i)
else:
chunks.append('@'.join(current))
if iobtype == 2:
print('Warning')
current = [base, '%d' % i]
else:
current = [label.replace('I', ''), '%d' % i]
if iobtype == 2:
print('Warning')
elif label.startswith('E-'):
if current is not None:
base = label.replace('E-', '')
if base == current[0]:
current.append('%d' % i)
chunks.append('@'.join(current))
current = None
else:
chunks.append('@'.join(current))
if iobtype == 2:
print('Warning')
current = [base, '%d' % i]
chunks.append('@'.join(current))
current = None
else:
current = [label.replace('E-', ''), '%d' % i]
if iobtype == 2:
print('Warning')
chunks.append('@'.join(current))
current = None
else:
if current is not None:
chunks.append('@'.join(current))
current = None
if current is not None:
chunks.append('@'.join(current))
return set(chunks)
def fill_y(nc, yidx):
"""
fill y to dense matrix
"""
batchsz = yidx.shape[0]
siglen = yidx.shape[1]
dense = np.zeros((batchsz, siglen, nc), dtype=np.int)
for i in range(batchsz):
for j in range(siglen):
idx = int(yidx[i, j])
if idx > 0:
dense[i, j, idx] = 1
return dense
def save_checkpoint(state, track_list, filename):
"""
save checkpoint
"""
with open(filename+'.json', 'w') as f:
json.dump(track_list, f)
torch.save(state, filename+'.model')
def adjust_learning_rate(optimizer, lr):
"""
shrink learning rate for pytorch
"""
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def init_embedding(input_embedding):
"""
Initialize embedding
"""
bias = np.sqrt(3.0 / input_embedding.size(1))
nn.init.uniform(input_embedding, -bias, bias)
def init_linear(input_linear):
"""
Initialize linear transformation
"""
bias = np.sqrt(6.0 / (input_linear.weight.size(0) + input_linear.weight.size(1)))
nn.init.uniform(input_linear.weight, -bias, bias)
if input_linear.bias is not None:
input_linear.bias.data.zero_()
def init_lstm(input_lstm):
"""
Initialize lstm
"""
for ind in range(0, input_lstm.num_layers):
weight = eval('input_lstm.weight_ih_l'+str(ind))
bias = np.sqrt(6.0 / (weight.size(0)/4 + weight.size(1)))
nn.init.uniform(weight, -bias, bias)
weight = eval('input_lstm.weight_hh_l'+str(ind))
bias = np.sqrt(6.0 / (weight.size(0)/4 + weight.size(1)))
nn.init.uniform(weight, -bias, bias)
if input_lstm.bias:
for ind in range(0, input_lstm.num_layers):
weight = eval('input_lstm.bias_ih_l'+str(ind))
weight.data.zero_()
weight.data[input_lstm.hidden_size: 2 * input_lstm.hidden_size] = 1
weight = eval('input_lstm.bias_hh_l'+str(ind))
weight.data.zero_()
weight.data[input_lstm.hidden_size: 2 * input_lstm.hidden_size] = 1 | [
"[email protected]"
]
| |
c2be6b88131e68c62aa48b3786283a180db645d5 | 26e39895d20a9f6a2d3d79fc84307bf5aa4ff45e | /classes.py | f8b1e7960605b55d947ef29676dd0e354c0c013f | []
| no_license | karanpepi/oops-python | 0324dbd2f561d904eb28b961cdfaeb7cf502c1d4 | 8d4433819bd70e30b4ccd32aee098ddf3ea853c1 | refs/heads/master | 2020-04-24T22:24:13.868199 | 2019-02-24T07:52:43 | 2019-02-24T07:52:43 | 172,310,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | #Basic class and method
# class Subject:
# """docstring for ClassName"""
# def __init__(self):
# self.name = "Python"
# self.use = "web development"
# sub = Subject()
# sub2 = Subject()
# sub.use = "AI"
# print(sub.use)
# print(sub2.use)
#Instance variable and class variable/static variable
# class Car:
# mirrors = 2
# def __init__(self):
# self.mil = 10
# self.com = "BMW"
# c1 = Car()
# c2 = Car()
# c1.mil = 15
# Car.mirrors = 4
# print(c1.mil,c1.mirrors)
# print(c2.mil,c2.mirrors)
| [
"[email protected]"
]
| |
88894940e71b6a27ead4076a227999278abace79 | 142cb494e93de36045a717cb9d92a0e719cb0007 | /apibb/apibb-server.py | a39e142bc5ad93b4bc04820d98594e8f79090c76 | [
"MIT"
]
| permissive | RagnarDanneskjold/playground21 | 5a8553e93c97684c330d757f1291f1e1be2eee15 | 566d2478b0a0557934d89ebf29d0b9f3317fa76f | refs/heads/master | 2021-01-17T05:44:26.915795 | 2015-11-28T21:13:18 | 2015-11-28T21:13:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,070 | py | import os
import re
import json
import random
import apsw
import time
# import flask web microframework
from flask import Flask
from flask import request
# import from the 21 Developer Library
from two1.lib.wallet import Wallet
from two1.lib.bitserv.flask import Payment
connection = apsw.Connection("apibb.db")
name_re = re.compile(r"^[a-zA-Z0-9][a-zA-Z0-9-\.]*$")
app = Flask(__name__)
wallet = Wallet()
payment = Payment(app, wallet)
def expire_ads():
cursor = connection.cursor()
cursor.execute("DELETE FROM ads WHERE expires < datetime('now')")
def expire_names():
cursor = connection.cursor()
cursor.execute("DELETE FROM names WHERE expires < datetime('now')")
@app.route('/names')
@payment.required(1)
def get_names():
cursor = connection.cursor()
rv = []
for name,created,expires in cursor.execute("SELECT name,created,expires FROM names ORDER BY name"):
obj = {
"name": name,
"created": created,
"expires": expires
}
rv.append(obj)
return json.dumps(rv)
def valid_renewal(request):
name = request.args.get('name')
hours = request.args.get('hours')
if (name_re.match(name) is None or
int(hours) < 1 or
int(hours) > (24 * 30)):
return False
return True
def get_renew_price_from_req(request):
if not valid_renewal(request):
return "invalid advertisement"
hours = int(request.args.get('hours'))
price = hours * 10 # 10 satoshis per hour
if price < 10:
price = 10
return price
@app.route('/namerenew')
@payment.required(get_renew_price_from_req)
def name_renew():
if not valid_renewal(request):
return "invalid renewal"
expire_names()
name = request.args.get('name')
hours = int(request.args.get('hours'))
cursor = connection.cursor()
expires = 0
for v in cursor.execute("SELECT expires FROM names WHERE name = ?", (name,)):
expires = v[0]
print("EXPIRES " + str(expires))
if expires == 0:
cursor.execute("INSERT INTO names VALUES(?, datetime('now'), datetime('now', '+" + str(hours) + " hours'))", (name,))
else:
cursor.execute("UPDATE names SET expires = datetime(?, '+" + str(hours) + " hours') WHERE name = ?", (expires, name))
return "OK"
def valid_advertisement(cursor, request):
name = request.args.get('name')
uri = request.args.get('uri')
pubkey = request.args.get('pubkey')
hours = request.args.get('hours')
if (name_re.match(name) is None or
len(uri) < 1 or
len(uri) > 512 or
len(pubkey) < 32 or
len(pubkey) > 512 or
int(hours) < 1 or
int(hours) > (24 * 30)):
return False
expires = None
for v in cursor.execute("SELECT strftime('%s', expires) FROM names WHERE name = ? AND expires > datetime('now')", (name,)):
expires = v
if expires is None:
return False
# curtime = int(time.time())
# curtime_deltap = curtime + (int(hours) * 60 * 60)
# if curtime_deltap > expires:
# return False
return True
def get_advertise_price_from_req(request):
cursor = connection.cursor()
if not valid_advertisement(cursor, request):
return "invalid advertisement"
hours = int(request.args.get('hours'))
price = hours * 2 # 2 satoshis per hour
if price < 2:
price = 2
return price
@app.route('/advertise')
@payment.required(get_advertise_price_from_req)
def advertise():
cursor = connection.cursor()
if not valid_advertisement(cursor, request):
return "invalid advertisement"
name = request.args.get('name')
uri = request.args.get('uri')
pubkey = request.args.get('pubkey')
hours = request.args.get('hours')
cursor.execute("INSERT INTO ads VALUES(?, ?, ?, datetime('now'), datetime('now','+" + str(hours) + " hours'))", (name, uri, pubkey))
return "OK"
@app.route('/ads')
@payment.required(1)
def get_advertisements():
name = request.args.get('name')
rv = []
cursor = connection.cursor()
for uri,pk,created,expires in cursor.execute("SELECT uri,pubkey,created,expires FROM ads WHERE name = ? AND expires > datetime('now')", (name,)):
obj = {
"uri": uri,
"pubkey": pk,
"created": created,
"expires": expires
}
rv.append(obj)
return json.dumps(rv)
@app.route('/')
def get_info():
info_obj = {
"name": "apibb",
"version": 100,
"pricing": {
"/names" : {
"minimum" : 1
},
"/namerenew" : {
"minimum" : 10
},
"/advertise" : {
"minimum" : 2
},
"/ads" : {
"minimum" : 1
},
}
}
body = json.dumps(info_obj, indent=2)
return (body, 200, {
'Content-length': len(body),
'Content-type': 'application/json',
})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=12002)
| [
"[email protected]"
]
| |
9094d0aa7881214514389028e5649424dde7e59d | d180d7bea0db0aa65ee6d6112c14da903b1867b5 | /run.py | 58fee9cdd7064a02060da43ffe06f863a65d5852 | [
"CC-BY-4.0"
]
| permissive | willemsk/phdthesis-text | 694eb21b0465b67f26291d0349c3fee821562311 | 43d0f0f68bb84e6305f6b430816f8585ce4e8112 | refs/heads/main | 2023-06-22T22:17:33.632485 | 2023-06-07T12:54:06 | 2023-06-07T12:54:06 | 345,727,833 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,723 | py | #!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (C), 2012-2014 by Wannes Meert, KU Leuven
#
# Very naive compilation script for the ADSPHD class.
#
# No file dependency checks are performed (use TeXnicCenter, Texmaker, latexmk,
# rubber, SCons, or make if you want such a feature).
#
import glob
import os
import re
import shlex
import sys
import argparse
from collections import namedtuple
from subprocess import *
## SETTINGS ##
given_settings = {
'mainfile': 'thesis.tex',
'chaptersdir': 'chapters',
'makebibliography': True,
'makeindex': True,
'makeglossary': True,
'makenomenclature': True,
'usebiblatex': True,
'biblatexbackend': 'biber', # alternative: bibtex
'cleanext': ['.tdo','.fls','.toc','.aux','.log','.bbl','.blg','.log',
'.lof','.lot','.ilg','.out','.glo','.gls','.nlo','.nls',
'.brf','.ist','.glg','.synctexgz','.tgz','.idx','.ind',
'-blx.bib','.fdb_latexmk','.synctex.gz','.run.xml',
'.bcf','.glsdefs','.xdy']
}
derived_settings = ['basename', 'chapters', 'cleanfiles', 'pdffile']
verbose = 0
dry = False
### INITIALISATION ###
def initapplications():
"""Initialize the application commands and arguments for the different
platforms."""
global apps
# Unix and linux are the default setup
## *NIX ##
apps.pdflatex = App('pdflatex', '-interaction=nonstopmode -synctex=1 -shell-escape {basename}', verbose)
apps.bibtex = App('bibtex', '--min-crossref=100 {basename}', verbose)
apps.biber = App('biber', '{basename}', verbose)
apps.glossary = App('makeindex', '{basename}.glo -s {basename}.ist -o {basename}.gls', verbose)
apps.nomenclature = App('makeindex', '{basename}.nlo -s nomencl.ist -o {basename}.nls', verbose)
apps.pdfviewer = App('acroread', '{pdffile}', verbose)
apps.remove = App('rm', '-f {cleanfiles}', verbose)
if sys.platform == 'darwin':
## Mac OS X ##
apps.pdfviewer = App('open', '{pdffile}', verbose)
elif sys.platform == 'win32' or sys.platform == 'cygwin':
## Windows ##
## TODO: does not yet work
pass
## DERIVED SETTINGS ##
def create(*args, **kwargs):
class DictAsObj():
def __init__(self, *args, **kwargs):
self.__dict__ = kwargs
for arg in args:
self.__dict__[arg] = None
def __iter__(self):
return self.__dict__.items().__iter__()
def items(self):
return dict(self.__dict__.items())
def copy(self):
return DictAsObj(**self.__dict__)
return DictAsObj(*args, **kwargs)
settings = create(*derived_settings, **given_settings)
settings.basename = os.path.splitext(settings.mainfile)[0]
settings.chapters = [name.replace(".tex", "") for name in glob.glob('chapters/**/*.tex')]
settings.cleanfiles = " ".join([base+ext for ext in settings.cleanext for base in [settings.basename]+settings.chapters])
settings.pdffile = settings.basename+'.pdf'
apps = create('pdflatex', 'bibtex', 'biber', 'glossary', 'nomenclature', 'pdfviewer', 'remove')
## COMPILE ##
knowntargets = dict()
def target(targetname = None):
def decorate(f):
global knowntargets
name = targetname if targetname else f.__name__
knowntargets[name] = f
return f
return decorate
## TARGETS ##
@target()
def test():
"""Verify the settings in run.py"""
allok = testSettings()
if allok:
print("Your settings appear to be consistent")
if verbose > 0:
for k,v in settings:
if verbose > 1 or k not in ['cleanfiles']:
print("{}: {}".format(k, v))
else:
print("(use -v to inspect).")
@target()
def pdf():
"""Alias for compile"""
return compile()
@target()
def compile():
"""Build thesis.pdf"""
testSettings()
latex()
def latex():
global apps
rerun = False
print('#### LATEX ####')
apps.pdflatex.run(settings, 'Latex failed')
if settings.makebibliography:
rerun = True
if settings.usebiblatex and settings.biblatexbackend == 'biber':
print('#### BIBER ####')
apps.biber.run(settings, 'Biber failed')
else:
print('#### BIBTEX ####')
apps.bibtex.run(settings, 'Bibtex failed')
if settings.makeindex:
rerun = True
print('#### INDEX ####')
if settings.makeglossary:
# List of abbreviations
rerun = True
print('#### GLOSSARY ####')
apps.glossary.run(settings, 'Creating glossary failed')
if settings.makenomenclature:
# List of symbols
rerun = True
print('#### NOMENCLATURE ####')
apps.nomenclature.run(settings, 'Creating glossary failed')
if rerun:
print('#### LATEX ####')
apps.pdflatex.run(settings, 'Rerunning (1) Latex failed')
print('#### LATEX ####')
apps.pdflatex.run(settings, 'Rerunning (2) Latex failed')
@target()
def clean():
"""Remove the auxiliary files created by Latex."""
global apps
apps.remove.run(settings, 'Removing auxiliary files failed')
@target()
def realclean():
"""Remove all files created by Latex."""
global apps
clean()
newsettings = settings.copy()
newsettings.cleanfiles += 'thesis.pdf thesis.dvi thesis.ps'
apps.remove.run(newsettings, 'Removing pdf files failed.')
@target()
def cover():
"""Generate a cover.tex file and produce a standalone cover.pdf"""
usersettings = dict()
doc_re = re.compile(r"^\\documentclass")
settings_re = [
('faculty', re.compile("faculty=([a-z]+)")),
('department', re.compile("department=([a-z]+)")),
('phddegree', re.compile("phddegree=([a-z]+)"))
]
content = []
doadd = False
with open(settings.mainfile,'r') as mf:
for line in mf:
if "documentclass" in line:
if doc_re.match(line) is not None:
for s, r in settings_re:
result = r.search(line)
if result is not None:
usersettings[s] = result.group(1)
if doadd:
content.append(line)
if "%%% COVER: Settings" in line:
doadd = True
elif "%%% COVER: End" in line:
doadd = False
if verbose > 0:
print('Recovered settings: ')
print(usersettings)
extra_usersettings = ','.join(['']+['{}={}'.format(k,v) for k,v in usersettings.items()])
with open('cover.tex','w') as cf:
cf.write("""% Cover.tex
\\documentclass[cam,cover{}]{{adsphd}}""".format(extra_usersettings))
cf.write("""
\\usepackage{printlen}
\\uselengthunit{mm}
""")
cf.write("".join(content))
cf.write("""
% Compute total page width
\\newlength{\\fullpagewidth}
\\setlength{\\fullpagewidth}{2\\adsphdpaperwidth}
\\addtolength{\\fullpagewidth}{2\\defaultlbleed}
\\addtolength{\\fullpagewidth}{2\\defaultrbleed}
\\addtolength{\\fullpagewidth}{\\adsphdspinewidth}
\\geometry{
paperwidth=\\fullpagewidth,
paperheight=\\adsphdpaperheight,
}
\\pagestyle{empty}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\\begin{document}
\\makefullcoverpage{\\adsphdspinewidth}{}
\\newlength{\\testje}
\\setlength{\\testje}{10mm}
\\mbox{}
\\newpage
\\subsection*{Used settings:}
\\begin{itemize}
\\item Spine width: \\printlength{\\adsphdspinewidth}
\\item Left bleed: \\printlength{\\lbleed}
\\item Right bleed: \\printlength{\\rbleed}
\\item Paper width: \\printlength{\\adsphdpaperwidth}
\\item Paper height: \\printlength{\\adsphdpaperheight}
\\item Text width: \\printlength{\\textwidth}
\\item Text height: \\printlength{\\textheight}
\\end{itemize}
\\end{document}
""")
print("Written cover to cover.tex")
newsettings = settings.copy()
newsettings.basename = 'cover'
apps.pdflatex.run(newsettings, 'Running Latex failed')
@target()
def newchapter():
"""Create the necessary files for a new chapter."""
chaptername = ""
validchaptername = re.compile(r'^[a-zA-Z0-9_.]+$')
while validchaptername.match(chaptername) == None:
chaptername = input("New chapter file name (only a-z, A-Z, 0-9 or _): ")
newdirpath = os.path.join(settings.chaptersdir, chaptername)
print("Creating new directory: "+newdirpath)
if not os.path.exists(newdirpath):
os.makedirs(newdirpath)
newfilepath = os.path.join(newdirpath,chaptername+".tex")
print("Creating new tex-file: "+newfilepath)
newfile = open(newfilepath, 'w')
print("% !TeX root = ../../"+settings.mainfile, file=newfile)
print("\\chapter{This is "+chaptername+"}\\label{ch:"+chaptername+"}\n", file=newfile)
print("\n\\ldots\n\n\n\n\
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\
% Keep the following \\cleardoublepage at the end of this file, \n\
% otherwise \\includeonly includes empty pages.\n\
\\cleardoublepage\n", file=newfile)
newfile.close()
@target()
def view():
"""Open the generated pdf file in a pdf viewer."""
print("Opening "+settings.pdffile)
apps.pdfviewer.run(settings, 'Opening pdf failed.')
@target()
def targets():
"""Print overview of available targets."""
print("Targets:")
targetdocs = [(target,f.__doc__) for (target,f) in knowntargets.items()]
maxl = max((len(t) for (t,d) in targetdocs))
targetdocs.sort()
for (target,doc) in targetdocs:
s = "- {:<"+str(maxl)+"} {}"
if doc == None:
doc = ''
print(s.format(target,doc))
## AUXILIARY ##
def testSettings():
"""Verify whether run.py is using the expected settings based on
thesis.tex.
"""
allok = True
allok = allok and testBiblatex()
allok = allok and testNomenclature()
allok = allok and testGlossary()
return allok
def testBiblatex():
"""Test whether the main tex file includes biblatex and if this is
consistent with the settings in run.py
"""
global usebiblatex
allok = True
isusingbiblatex = False
# pattern = re.compile(r'^\\documentclass.*biblatex*.*$')
pattern = re.compile(r'^\s*[^%].*{biblatex}')
with open(settings.mainfile, 'r') as f:
for line in f:
if pattern.search(line) != None:
isusingbiblatex = True
if not settings.usebiblatex:
print("WARNING: It appears you are using biblatex while this setting in run.py is set to false.\n")
allok = False
# settings.usebiblatex = True
return allok
if not isusingbiblatex and settings.usebiblatex:
print("WARNING: It appears you are not using biblatex while this setting in run.py is set to true.\n")
# settings.usebiblatex = False
allok = False
return allok
def testNomenclature():
"""Check whether the nomenclature settings are consistent."""
allok = True
texfile = open(settings.mainfile, 'r')
pattern = re.compile(r'^\s*\\usepackage.*{nomencl}.*')
found = False
for line in texfile:
if pattern.search(line) != None:
found = True
if not found and makenomenclature:
print("\nWARNING: Trying to build the nomenclature but you have not include the nomencl Latex package.\n")
allok = False
if found and not settings.makenomenclature:
print("\nWARNING: You have included the nomencl Latex package but in the run.py script this step is not activated.\n")
allok = False
return allok
def testGlossary():
"""Check whether the glossaries settings are consistent."""
allok = True
texfile = open(settings.mainfile, 'r')
pattern = re.compile(r'^\s*\\usepackage.*{glossaries.*')
found = False
for line in texfile:
if pattern.search(line) != None:
found = True
if not found and settings.makeglossary:
print("\nWARNING: Trying to build the glossary but you have not include the glossaries Latex package.\n")
allok = False
if found and not settings.makeglossary:
print("\nWARNING: You have included the glossary Latex package but in the run.py script this step is not activated.\n")
allok = False
return allok
## APPLICATION ##
class App:
def __init__(self, b, o, v=0):
self.binary = b
self.options = o
self.verbose = v
def run(self, settings, errmsg):
""" Run the command for the given settings.
Required settings:
- basename
- cleanfiles
:returns: Return code
"""
returncode = 1
try:
cmd = self.options.format(**settings.items())
args = shlex.split(cmd)
print("Running: "+self.binary+" "+" ".join(args))
if not dry:
returncode = check_call([self.binary] + args)
except CalledProcessError as err:
print(err)
print(sys.argv[0].split("/")[-1] + ": "+errmsg+" (exitcode "+str(err.returncode)+")", file=sys.stderr)
sys.exit(1)
return returncode
## COMMAND LINE INTERFACE ##
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def main(argv=None):
global verbose
global dry
parser = argparse.ArgumentParser(
description='''
Naive compilation script for the ADSPhD class. No file dependency checks
are performed. Use TeXnicCenter, Texmaker, latexmk, rubber, SCons or
make for such a feature.''',
epilog='''
Settings: Open run.py with a text editor and change values in the settings
definition
''')
parser.add_argument('--verbose', '-v', action='count', help='Verbose output')
parser.add_argument('--targets', '-T', action='store_true', help='Print available targets')
parser.add_argument('--dry', '-d', action='store_true', help='Dry run to see commands without executing them')
parser.add_argument('target', nargs='*', help='Targets')
args = parser.parse_args(argv)
if args.verbose is not None:
verbose = args.verbose
dry = args.dry
if args.targets:
targets()
return
initapplications()
if len(args.target) == 0:
print("No targets given, using default target: compile")
compile()
for target in args.target:
print("Target: "+target)
if target in knowntargets:
knowntargets[target]()
else:
print("Unknown target")
if __name__ == "__main__":
sys.exit(main())
| [
"[email protected]"
]
| |
a6ece782ddf978d05bafc7a4c5e181c849ba4483 | c944b854f931cfce8cef2858e3431d470dfa9f45 | /484_find_permutation.py | ef1bf974f8d5a0a1fcc43b5eb6d28a36017b5954 | []
| no_license | Klose6/Leetcode | c6d190ea254505efb15ce001312cf721e775b565 | 338da8147461ff445067818ea8fa42fa76ebf23f | refs/heads/master | 2021-07-15T16:30:58.354881 | 2021-06-24T18:43:53 | 2021-06-24T18:46:00 | 129,567,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | """
484 find permutation
"""
def findPermutation(s):
n = len(s)
nums = [i+1 for i in range(n+1)]
i = 0
while i < n:
if s[i] != "D":
i += 1
else: # find the continuous Ds and reverse them
j = i+1
while j < n and s[j] == "D":
j += 1
# print(f"{i}, {j}")
nums[i:j+1] = nums[i:j+1][::-1]
i = j
return nums
print(findPermutation("D")) # 21
print(findPermutation("ID")) # 213
print(findPermutation("DDIIDI")) # 3214657 | [
"[email protected]"
]
| |
11b93de78ab70c6b13026e2928e725308a40b872 | 498f06792cf33c7925808ff2ab77374eec77e2f0 | /test.py | 85f4eb863ce68753257fbf2751ed5f9c400326bd | []
| no_license | NameLacker/hello-world | dcdd0e273a2c494c6a8e0cb49cdff635015b0e5a | a7627473ec9f5fb1a762aeeff49b8eca6cced9a0 | refs/heads/master | 2020-04-04T01:00:21.826230 | 2018-11-01T05:31:47 | 2018-11-01T05:31:47 | 155,664,299 | 0 | 0 | null | 2018-11-01T05:30:07 | 2018-11-01T05:03:04 | null | UTF-8 | Python | false | false | 653 | py | import numpy as np
# import heapq
# a = np.array([[1,np.nan,2.1,5],[1.3,2,3,2],[1,2,6,2],[2, 1,7,2]], dtype='float32')
##### 处理nan,inf######
# nan = np.isnan(a)
# print(nan)
# a[nan] = 0
# print(a)
##### 处理nan,inf######
##### 找最大值索引######
# a = np.array([2,34,1,5,5])
# c = a.argsort()[-3:][::-1]
# c = (-a).argsort()[:2]
# print(c)
# a = np.random.randint(0, 10, 6)
# b = np.random.randint(0, 10, 6)
# c = np.random.randint(0, 10, 6)
# d = np.row_stack((a, b))
# d = np.row_stack((c ,d))
##### 处理nan,inf######
a = np.array([
[1,2,3],
[2,3,4],
[2,3,5]
])
print(a[:,2]) | [
"[email protected]"
]
| |
0ad9fe4925f4b28b1e4d34b71d268ddf45ccb8a2 | 301e55ee3990b2daf135197eac81e1cc244e6cd3 | /python/search-in-rotated-sorted-array.py | ac8d648b579b72e53fc06f4ec7a23090d95a213e | [
"MIT"
]
| permissive | alirezaghey/leetcode-solutions | 74b1b645c324ea7c1511d9ce3a97c8d622554417 | c32b786e52dd25ff6e4f84242cec5ff1c5a869df | refs/heads/master | 2022-08-22T16:28:05.459163 | 2022-08-18T12:02:51 | 2022-08-18T12:02:51 | 203,028,081 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 996 | py | from typing import List
class Solution:
# Time complexity: O(log n) where n is the length of nums
# Space complexity: O(1)
def search(self, nums: List[int], target: int) -> int:
left, right = 0, len(nums)-1
while left <= right:
mid = left + (right - left) // 2
if nums[mid] < target:
if nums[mid] < nums[-1]: # right part
if target > nums[-1]:
right = mid-1
else:
left = mid+1
else: # left part
left = mid+1
elif nums[mid] > target:
if nums[mid] < nums[-1]: # right part
right = mid-1
else: # left part
if target < nums[0]:
left = mid+1
else:
right = mid-1
else:
return mid
return -1 | [
"[email protected]"
]
| |
09e0f6c4d47782c5b4710ab60f4a4e5b02c290a1 | 314de118562454e7b72406905af4906c6f528794 | /xor.py | 00e42149ed2c304906bab3bb6f57eee37250aa8e | []
| no_license | AkhilSinghania/XOR_Backprop | bc03b0e153d4fb058ec3cb463e810b45e4ca3891 | 539e287c050c04c1be8ede3cef71d06ada456112 | refs/heads/master | 2021-01-23T00:09:41.475833 | 2017-03-22T08:50:45 | 2017-03-22T08:50:45 | 85,703,098 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,013 | py |
import sys
import numpy as np
def sigmoid(x):
#Activation Function used in forward propagation
return 1/(1+np.exp(-x))
def sigmoidDash(x):
#Derivative of sigmoid function
#Activation function used in back propagation
return x*(1-x)
#Given Data
x = np.array(([0,0],[0,1],[1,0],[1,1])) #4x2 matrix
#Actual Output (The Output expected by the result of our neural network)
y = np.array(([0],[1],[1],[1])) #4x1 matrix
#Command for generating the same random numbers every time
#Makes it easy for Debugging
np.random.seed(1)
#Intializing random synapse weights
W1 = np.random.randn(2,4) #2x4 matrix
W2 = np.random.randn(4,1) #4x1 vector
for i in range(500000):
#Forward propagation
layer1 = x #input layer
layer2 = sigmoid(np.dot(layer1,W1)) #4x4 matrix, Hidden layer
layer3 = sigmoid(np.dot(layer2,W2)) #4x1 vector, Output layer
#^In Forward propgation we first multiply the
#values of each node with weights of the synapses
#and then use the activation function to get the
#value for the node in next layer
#Calculating Error
layer3_error = y - layer3 #4x1 vector
#Backward propagation
layer3_Delta = layer3_error*sigmoidDash(layer3) #4x1 vector
layer2_error = layer3_Delta.dot(W2.T) #4x4 matrix
layer2_Delta = layer2_error*sigmoidDash(layer2) #4x4 matrix
#^In Backward propgation we first use the derivative
#(Derivative - slope of the Activation Function)
#of activation function and then multiply the error
#of that particular layer to get a value Delta for
#that particular layer. This Delta value is then
#multiplied with the weight of the synapses to get
#the error in the previous layer. This goes till the
#second layer as there is no error in the input layer.
#Performing Gradient Descent To change the weights accordingly
W2 += layer2.T.dot(layer3_Delta) #4x1 vector
W1 += layer1.T.dot(layer2_Delta) #2x4 matrix
#Printing the Output
print ("Output:")
print (layer3)
| [
"[email protected]"
]
| |
e0c7dd836f868d77b664a7a7d8b6cb4c6b8ce3e2 | 2d33afa6c666d839828473c65c9800df7ff40bec | /resume/urls.py | 2544270ce66dba35c78124edce5ccb3c212356b5 | []
| no_license | raphaelmulenda/cv_template | 5ee1d10a462b3694556bd3ecb07591557df7151a | b9fc98d246f1efb50fd2cc404d3511b7214109b2 | refs/heads/main | 2023-08-25T02:39:43.169105 | 2021-10-19T12:15:28 | 2021-10-19T12:15:28 | 418,229,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | from django.urls import path
from . import views
urlpatterns = [
path("", views.HomePage.as_view(), name="home-page")
]
| [
"[email protected]"
]
| |
452f415224a2bf6bed7e7911ecdbbe8b15249042 | 4904b6b2ac9f760e7e9bf15ab72f22ff86e67ecb | /firstapp/apiurls.py | f46b83fbf888e8967c77b90bfbf1526159946a2f | []
| no_license | mudong1991/md_django_xadmin | d04dcb7515cf31a1f081809c5cd13a81e447235f | 83ee41678ea00993756a22eefcce13a2b5a2b2b1 | refs/heads/master | 2021-01-10T23:54:38.532606 | 2016-10-13T09:10:03 | 2016-10-13T09:10:03 | 70,788,466 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | # -*- coding: UTF-8 -*-
__author__ = 'MD'
from django.conf.urls import url, patterns
from firstapp import apiviews
from rest_framework import routers
urlpatterns = [
url(r"^systeminfo", apiviews.systeminfo),
url(r'^sync_time/', apiviews.GetSystemTime.as_view()),
]
route = routers.DefaultRouter()
route.register(r"^bookborrowinfo", apiviews.BookBorrowInfoSet)
urlpatterns += route.urls
| [
"Administrator@2013-20160229XQ"
]
| Administrator@2013-20160229XQ |
23d8589bbfe5f9b76e56d2548b4f80fb857464f5 | 7c413e5355663de13abd2584cb25939292c9faea | /src/Temporal.py | 4166b26a26d83cf23db28e5fe834aed807ce5c64 | []
| no_license | Zumbalamambo/Deep-Activity-Recognition | 83b39cd0ea497be5897dfd4a13e48daa418a4484 | 83078f329b7336b485a5f640ebfeb52d6d69f3a6 | refs/heads/master | 2020-03-30T13:21:32.935369 | 2018-09-03T00:14:30 | 2018-09-03T00:14:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,259 | py | # -*- coding: utf-8 -*-
import tensorflow as tf
import Layers
import os
class Temporal:
def __init__( self,
dim = 224,
inp_channels = 2,
timesteps = 5,
n_actions = 101,
modelsPath = '../models/',
metaGraphName = 'model.meta',
summaryPath = None,
restoreModel = False,
seed = None):
if seed:
tf.set_random_seed( seed )
self.dim = dim
self.inp_channels = inp_channels
self.timesteps = timesteps
self.n_actions = n_actions
self.modelsPath = modelsPath
self.sess = tf.InteractiveSession()
if not restoreModel:
self.buildGraph()
self.sess.run( tf.global_variables_initializer() )
tf.train.export_meta_graph( filename = os.path.join( modelsPath,
metaGraphName ) )
self.saver = tf.train.Saver( max_to_keep = 2 )
else:
self.saver = tf.train.import_meta_graph( os.path.join( modelsPath,
metaGraphName ) )
self.saver.restore( self.sess,
tf.train.latest_checkpoint( modelsPath ) )
if summaryPath is not None:
writer = tf.summary.FileWriter( summaryPath + 'tmp/net' )
writer.add_graph( self.sess.graph )
def buildGraph( self ):
layers = Layers.Layers()
# Placeholders for input and output
self.y = tf.placeholder( tf.float32 ,
shape = [ None,
self.n_actions ],
name = 'y' )
self.x = tf.placeholder( tf.float32,
shape = [ None,
self.dim * self.dim * self.inp_channels * self.timesteps ],
name = 'x' )
# Phase placeholder for batch normalization
phase = tf.placeholder( tf.bool , name = 'phase' )
# Dropout placeholders
dropout1 = tf.placeholder( tf.float32 , name = 'dropout1' )
dropout2 = tf.placeholder( tf.float32 , name = 'dropout2' )
# Preparing network input
btc = tf.shape( self.x )[0]
fold = tf.reshape( self.x , [ btc , self.dim , self.dim , self.inp_channels * self.timesteps ] )
# Convolution and pooling layers
layers.setDefaultInput( fold , self.inp_channels * self.timesteps )
conv1 = layers.conv2d( ksize_conv = 7 , stride_conv = 2,
ksize_pool = 2 , out_channels = 96,
bn_phase = phase , scope = 'conv1' )
#conv1 = layers.lrn( out_channels = 96, scope = 'conv1' )
conv2 = layers.conv2d( ksize_conv = 5 , stride_conv = 2,
ksize_pool = 2 , out_channels = 256,
bn_phase = phase , scope = 'conv2' )
conv3 = layers.conv2d( ksize_conv = 3 , stride_conv = 1,
ksize_pool = None , out_channels = 512,
bn_phase = phase , scope = 'conv3' )
conv4 = layers.conv2d( ksize_conv = 3 , stride_conv = 1,
ksize_pool = None , out_channels = 512,
bn_phase = phase , scope = 'conv4' )
conv5 = layers.conv2d( ksize_conv = 3 , stride_conv = 1,
ksize_pool = 2 , out_channels = 512,
bn_phase = phase , scope = 'conv5' )
flatten = tf.reshape( conv5 , [ btc , 7 * 7 * 512 ] )
layers.setDefaultInput( flatten , 7 * 7 * 512 )
# Fully connected layers
fully1 = layers.fully( out_channels = 4096 , dropout = dropout1,
bn_phase = None , scope = 'fully1' )
fully2 = layers.fully( out_channels = 2048 , dropout = dropout2,
bn_phase = None , scope = 'fully2' )
# Readout layer
self.y_ = layers.fully( out_channels = self.n_actions,
activation = 'softmax',
scope = 'y_' )
# Number of steps trained so far
global_step = tf.Variable( 0,
name = 'global_step',
trainable = False )
# Define operations and related tensors
self.defineOperations()
def defineOperations( self ):
learning_rate = tf.placeholder( tf.float32, name = 'learning_rate' )
w_fc1 = tf.get_default_graph().get_tensor_by_name( 'fully1/weights:0' )
w_fc2 = tf.get_default_graph().get_tensor_by_name( 'fully2/weights:0' )
w_out = tf.get_default_graph().get_tensor_by_name( 'y_/weights:0' )
l2_loss = 1e-3 * ( tf.nn.l2_loss( w_fc1 ) +
tf.nn.l2_loss( w_fc2 ) +
tf.nn.l2_loss( w_out ) )
#l2_loss = 0
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits( labels = self.y,
logits = self.y_,
name = 'cross_entropy') + l2_loss,
name = 'loss' )
# Train step
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies( update_ops ):
optimizer = tf.train.MomentumOptimizer( learning_rate = learning_rate,
momentum = 0.9,
name = 'optimizer' )
self.train_step = optimizer.minimize( loss,
global_step = self.getGlobalStep(),
name = 'train_step' )
# Checks whether prediction is correct
correct_prediction = tf.equal( tf.argmax( self.y_ , 1 ),
tf.argmax( self.y , 1 ),
name = 'correct_prediction' )
# Calculates accuracy
accuracy = tf.reduce_mean( tf.cast( correct_prediction , tf.float32 ),
name = 'accuracy')
# Builds confusion matrix
confusion = tf.confusion_matrix( labels = tf.argmax( self.y , 1 ) ,
predictions = tf.argmax( self.y_ , 1 ) ,
num_classes = self.n_actions )
def getGlobalStep( self ):
return tf.get_default_graph().get_tensor_by_name( 'global_step:0' )
def saveModel( self ):
print( 'Saving model...' )
self.saver.save( self.sess,
self.modelsPath,
write_meta_graph = False,
global_step = self.getGlobalStep().eval() )
print( 'Model saved!' )
def trainBatch( self, x, y,
dropout1=0.5, dropout2=0.5,
in_drop=1.0, out_drop=1.0,
learning_rate = 1e-2):
graph = tf.get_default_graph()
train_step = graph.get_operation_by_name( 'train_step' )
accuracy = graph.get_tensor_by_name( 'accuracy:0' )
loss = graph.get_tensor_by_name( 'loss:0' )
return self.sess.run( [ train_step, accuracy, loss ],
feed_dict = { 'x:0': x ,
'y:0': y,
'phase:0': 1,
'dropout1:0': dropout1,
'dropout2:0': dropout2,
'learning_rate:0': learning_rate } )
def evaluateBatch( self, x, y ):
graph = tf.get_default_graph()
accuracy = graph.get_tensor_by_name( 'accuracy:0' )
loss = graph.get_tensor_by_name( 'loss:0' )
return self.sess.run( [ accuracy, loss ],
feed_dict = { 'x:0': x,
'y:0': y,
'phase:0': 0,
'dropout1:0': 1.0,
'dropout2:0': 1.0 } )
def evaluateActivs( self, x, y ):
graph = tf.get_default_graph()
y_ = graph.get_tensor_by_name( 'y_/Softmax:0' )
return self.sess.run( [ y_ ],
feed_dict = { 'x:0': x,
'y:0': y,
'phase:0': 0,
'dropout1:0': 1.0,
'dropout2:0': 1.0 } )
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
net = Temporal()
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.