blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1db25bdeb177e7112fd4df6cf720162c9a9839ce | 422019d363c222c87b72f1d66e6ce6162440d08c | /wego/src/googlesheet.py | 7c5681d078de408b7923a4de6a0a632d5378e2b1 | [] | no_license | miteshgala92/applicationscraping | 6a5b9e4efa956e19312c91a7d06d4e68463f1d47 | e82ce1f3d3619d9042cfac5b30d248fd1960e1fb | refs/heads/main | 2023-05-08T03:03:18.607843 | 2021-06-01T10:48:20 | 2021-06-01T10:48:20 | 301,666,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,063 | py | import json
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import pandas as pd
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']
# The ID and range of a sample spreadsheet.
with open('wego_properties') as json_file:
data = json.load(json_file)
SAMPLE_SPREADSHEET_ID = data.get("SAMPLE_SPREADSHEET_ID")
SAMPLE_RANGE_NAME = data.get("SAMPLE_RANGE_NAME")
def routes_data():
"""Shows basic usage of the Sheets API.
Prints values from a sample spreadsheet.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists(data.get("tokenpickel_location")):
with open(data.get("tokenpickel_location"), 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(data.get('credentials_location'), SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(data.get('tokenpickel_location'), 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,
range=SAMPLE_RANGE_NAME).execute()
values = result.get('values')
df = pd.DataFrame(values[1:], columns=values[0])
df = df.loc[(df['Active'] == 'YES')]
if not values:
print('No data found.')
df = ''
return df
else:
return df
| [
"[email protected]"
] | |
32113ab189b118bdbaf0079eb26ec17a64dd23ff | b23369a5067dd7db9660c303bb2099ae3fe55638 | /algorithm/solutions/offer/test.py | 6e1fd5c9341f39bf19a968486082cf9b91f6f1f6 | [] | no_license | blackholemedia/writings | f65153f760c469764a9acbed12f0dbf37234f6f6 | 7d54bb2b37066c454990a11d1ca72bdd39d1122c | refs/heads/master | 2021-06-11T17:07:38.773129 | 2021-03-28T10:16:44 | 2021-03-28T10:16:44 | 160,291,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | #-*- coding=utf-8 -*-
from functools import reduce
import sys
if sys.platform == 'linux':
sys.path.append('/home/alta/ds')
from mytree.binarytree import BinaryTree,TreeNode
else:
sys.path.append('c:\\users\\alta')
from datastructure.mytree.binarytree import BinaryTree
if __name__ == '__main__':
b = BinaryTree()
if b:
print(1)
else:
print(2)
| [
"[email protected]"
] | |
5710ed4d4a5418f66fe349adb978a247caf11b55 | 35e6376063265f3aebbe2d2147da9e61b5bb3ef3 | /rod_align/_ext/rod_align/__init__.py | f73c1cc3eaee375d531e87ecb437d370d043bd2c | [
"MIT"
] | permissive | notantony/Grid-Anchor-based-Image-Cropping-Pytorch | 2e6fdd4280726089b2e500cc48fa14254558bf5d | 32a2dea9151c123c8e589bd196450f56cf3ef7d1 | refs/heads/master | 2021-05-25T19:29:33.876827 | 2020-09-17T17:22:58 | 2020-09-17T17:22:58 | 253,891,360 | 0 | 0 | MIT | 2020-04-07T19:23:56 | 2020-04-07T19:23:55 | null | UTF-8 | Python | false | false | 383 | py |
from torch.utils.ffi import _wrap_function
from ._rod_align import lib as _lib, ffi as _ffi
__all__ = []
def _import_symbols(locals):
for symbol in dir(_lib):
fn = getattr(_lib, symbol)
if callable(fn):
locals[symbol] = _wrap_function(fn, _ffi)
else:
locals[symbol] = fn
__all__.append(symbol)
_import_symbols(locals())
| [
"[email protected]"
] | |
dea6f94ad5d677551c991b4885863988669f1af5 | 44846980df148e1a0621e8a359a7fd357482fd74 | /05-Inheritance/problem-5-Restaurant/project/food/cake.py | 9c455dce1d126fdbf08463332d2acb717c199373 | [
"MIT"
] | permissive | Beshkov/Python_OOP | 2dbd3383126f226895b652c4feaf8d79d867d4f8 | 297edadb3e7801dfeee5752a20aae6aead8da610 | refs/heads/main | 2023-04-03T14:24:59.218856 | 2021-04-18T15:13:11 | 2021-04-18T15:13:11 | 341,330,079 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | from project.food.dessert import Desert
class Cake(Desert):
PRICE = 5
GRAMS = 250
CALORIES = 1000
def __init__(self, name):
super().__init__(name, Cake.PRICE, Cake.GRAMS, Cake.CALORIES)
| [
"[email protected]"
] | |
9cebe181063772a441f7159a79ae936585a7fcd4 | fdd70388fe6189aeba7c6a2a06313666906ea264 | /gsConfigManager.py | fdf3a869fc6a6f741a55184be0d5ab5e6eb798de | [
"MIT"
] | permissive | accidentalrebel/gsConfigManager | e0312637304c2b52370875d204a2acc73a9dee32 | 03e5db7e80a41e82bce3291cf408fd0a2c8c9b8c | refs/heads/master | 2020-04-04T14:03:03.278262 | 2018-04-02T12:37:05 | 2018-04-02T12:37:05 | 155,984,874 | 1 | 0 | null | 2018-11-03T13:18:35 | 2018-11-03T13:18:34 | null | UTF-8 | Python | false | false | 2,690 | py | #!/usr/bin/env python3
# Steve Callaghan <scalla[at]amazon.com>
# 2018/03/16
import sys
import auth
import json
import export
import config
def print_json (jsonMsg):
print json.dumps(jsonMsg, indent=4, sort_keys=False)
def fail_with_error (msg):
print('Fatal Error: ' + msg)
sys.exit()
def print_help (filename):
print('--- ' + filename + ' usage guide.')
print('--- Configure and Help')
print(' - ' + filename + ' configure')
print(' - ' + filename + ' configApi -- Configure ApiKey.')
print(' - ' + filename + ' configAuth -- Configure Credentails.')
print(' - ' + filename + ' help -- Print this help screen.')
print('--- Export Options')
print(' - ' + filename + ' exportScripts -- Export Scrpts')
print(' - ' + filename + ' exportManagementSnapshot -- Export the Management Screens')
print(' - ' + filename + ' exportAll -- Export the entire current configuration')
# Main
if len(sys.argv) <= 1 or sys.argv[1] == 'help':
print_help(sys.argv[0])
elif sys.argv[1] == 'configure':
auth.configure()
elif sys.argv[1] == 'configApi':
auth.configureApi()
elif sys.argv[1] == 'configAuth':
auth.configureAuth()
elif sys.argv[1] == 'sanitize':
print('Sanitize not yet implemented.')
else:
if sys.argv[1] == 'getSnapshot': # Get Snapshot
if (len(sys.argv) <= 2):
fail_with_error('Invalid parameters. Missing parameter snapshotId.')
print(config.get_snapshot(auth.get_configured_apikey(), sys.argv[2], auth.get_gs_access_token()))
elif sys.argv[1] == 'getManagementScreens': # Get Management Screens
print_json(config.get_management_screens(auth.get_configured_apikey(), auth.get_gs_access_token()))
elif sys.argv[1] == 'getManagementSnippets': # Get Management Snippets
print_json(config.get_management_snippets(auth.get_configured_apikey(), auth.get_gs_access_token()))
elif sys.argv[1] == 'getManagementSnippet': # Get Management Snippet
if len(sys.argv) <= 2:
fail_with_error('Invalid parameters. Missing parameter snippet shortCode.')
print_json(config.get_management_snippet(auth.get_configured_apikey(), sys.argv[2], auth.get_gs_access_token()))
elif sys.argv[1] == 'exportScripts':
export.export_scripts(auth.get_configured_apikey(), auth.get_gs_access_token())
elif sys.argv[1] == 'exportManagementSnapshot': # Export Management Screens
export.export_management_snapshot(auth.get_configured_apikey(), auth.get_gs_access_token())
elif sys.argv[1] == 'exportAll':
export.export_all(auth.get_configured_apikey(), auth.get_gs_access_token())
else:
print_help(sys.argv[0]) | [
"[email protected]"
] | |
426a56fc1b17445626f5aa5bf3af52c6f0fa5620 | 280a7abee07f47d96932ac1f7a67aa1950278db4 | /kouchan-blog/urls.py | ad9c17afcabc450f5c38247198da7e785a4de91e | [
"MIT"
] | permissive | KoukiNAGATA/kouchan-blog | 7bdb93f68cc0db87c6ceffed4c4ce72cb4c2ff82 | 51808388135eca32077ad05241a0587df0e08c25 | refs/heads/main | 2023-08-07T19:18:04.045900 | 2021-09-03T14:07:52 | 2021-09-03T14:07:52 | 366,347,065 | 0 | 0 | MIT | 2021-09-01T13:33:53 | 2021-05-11T10:46:12 | JavaScript | UTF-8 | Python | false | false | 1,506 | py | """kouchan-blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.conf import settings
from blog.views import PostDetailView, PostListView, NewsListView, BlogListView, AboutView
urlpatterns = [
path('admin/', admin.site.urls),
path('posts/<post_id>',
PostDetailView.as_view(), name="post_detail"),
path('', PostListView.as_view(), name="post_list"),
path('news/', NewsListView.as_view(), name="news_list"),
path('blog/', BlogListView.as_view(), name="blog_list"),
path('about/', AboutView.as_view(), name="about"),
url(r'mdeditor/', include('mdeditor.urls'))
]
if settings.DEBUG:
# static files (images, css, javascript, etc.)
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
8f426dfbfdc1c9bebf1194576c9f3d6d77e7d800 | ba14294ecd07a6c5f06a8204b298e1dfeeef7938 | /S14Q02_school_marks_with_read.py | 60717b489a107565850d102137a9c8d5d0cc05dc | [] | no_license | deepak261/python-exe | 50fd87f022d9e1b8e90661b1ce69df4ac53fbfa9 | 0bf575eebdbe4da0e3957b06aa7f0854286b73d1 | refs/heads/master | 2020-09-12T08:33:43.493309 | 2019-12-17T09:48:17 | 2019-12-17T09:48:17 | 222,369,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | '''
S14Q02
Create a text file called “students.txt”.
Each line should be of the form
“student_name : student_marks”
- Write a Python program to read the contents from this file.
- Print the names and marks of all students
who have scored more than 90% marks,
in ascending order of their marks.
'''
def user_input():
name = input("enter the student's name:")
marks = input("enter the total marks:")
return name,marks
def sec(x):
return int(x[1])
with open ('students.txt') as file:
FH = file.readlines()
a_grade = []
for line in FH:
slp_line = line.strip()
print(slp_line)
slp_line = slp_line.split(':')
if int(slp_line[1])>=90:
a_grade.append(slp_line)
for i in a_grade:
print(i)
a_grade.sort(key = sec)
print("sorted student marks list")
for i in a_grade:
print(i)
| [
"[email protected]"
] | |
dc468ce1b9be09aaa942aaaf98afe386e2ed2f59 | d20aa624e04cbd0c426a6b7281f3cc1ed88a75cc | /calculator-python/tests/test_parser.py | 5a86d714431e1ead59048d5c74ccbb1052e303ea | [] | no_license | kenfj/calculators | 8a0710818f030ee919908d85839e1d9228b3f9e2 | 287ccc46d9c3a63caae7ef1543c21cbf998853c7 | refs/heads/master | 2022-12-09T23:32:41.446162 | 2020-09-06T15:26:22 | 2020-09-06T15:29:17 | 293,289,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,903 | py | from tokens import Token, TokenType
from parser_ import Parser
from nodes import BinNode, NumberNode, UnaryNode
def test_empty():
tokens = []
node = Parser(tokens).parse()
assert node is None
def test_number():
tokens = [Token(TokenType.NUMBER, 51.2)]
node = Parser(tokens).parse()
assert node == NumberNode(51.2)
def test_plus():
tokens = [
Token(TokenType.NUMBER, 1),
Token(TokenType.PLUS),
Token(TokenType.NUMBER, 2),
]
node = Parser(tokens).parse()
assert node == BinNode(NumberNode(1), TokenType.PLUS, NumberNode(2))
def test_unary():
tokens = [
Token(TokenType.PLUS),
Token(TokenType.NUMBER, 1),
]
node = Parser(tokens).parse()
assert node == UnaryNode(TokenType.PLUS, NumberNode(1))
tokens = [
Token(TokenType.MINUS),
Token(TokenType.NUMBER, 1),
]
node = Parser(tokens).parse()
assert node == UnaryNode(TokenType.MINUS, NumberNode(1))
def test_individual_operations():
tokens = [
Token(TokenType.NUMBER, 27),
Token(TokenType.PLUS),
Token(TokenType.NUMBER, 14),
]
node = Parser(tokens).parse()
assert node == BinNode(NumberNode(27), TokenType.PLUS, NumberNode(14))
tokens = [
Token(TokenType.NUMBER, 27),
Token(TokenType.MINUS),
Token(TokenType.NUMBER, 14),
]
node = Parser(tokens).parse()
assert node == BinNode(NumberNode(27), TokenType.MINUS, NumberNode(14))
tokens = [
Token(TokenType.NUMBER, 27),
Token(TokenType.MULTIPLY),
Token(TokenType.NUMBER, 14),
]
node = Parser(tokens).parse()
assert node == BinNode(NumberNode(27), TokenType.MULTIPLY, NumberNode(14))
tokens = [
Token(TokenType.NUMBER, 27),
Token(TokenType.DIVIDE),
Token(TokenType.NUMBER, 14),
]
node = Parser(tokens).parse()
assert node == BinNode(NumberNode(27), TokenType.DIVIDE, NumberNode(14))
def test_full_expression():
# 27 + (43 / 36 - 48) * 51
tokens = [
Token(TokenType.NUMBER, 27),
Token(TokenType.PLUS),
Token(TokenType.LPAREN),
Token(TokenType.NUMBER, 43),
Token(TokenType.DIVIDE),
Token(TokenType.NUMBER, 36),
Token(TokenType.MINUS),
Token(TokenType.NUMBER, 48),
Token(TokenType.RPAREN),
Token(TokenType.MULTIPLY),
Token(TokenType.NUMBER, 51),
]
node = Parser(tokens).parse()
assert node == BinNode(
NumberNode(27),
TokenType.PLUS,
BinNode(
BinNode(
BinNode(
NumberNode(43),
TokenType.DIVIDE,
NumberNode(36),
),
TokenType.MINUS,
NumberNode(48),
),
TokenType.MULTIPLY,
NumberNode(51),
)
)
| [
"[email protected]"
] | |
8b2d77afecc99f3f0398cd4507fc5842784c22be | c6ff580f5642b26e1c29bca11b1c8f78522289a5 | /aulaCanal/core/views.py | 684da9e536b900712e2b162fa62ab995ea0662cd | [] | no_license | cpaivaj/estadoPensante_projetoDjango | aa12126a1d14f7540f34bb5228ef83d6761a3784 | 55a28e904ab511217dfa521316e461c0b8c00252 | refs/heads/master | 2022-10-21T09:22:05.532670 | 2020-06-19T22:13:00 | 2020-06-19T22:13:00 | 273,595,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | from django.shortcuts import render
# Create your views here.
def home(request):
template_name = 'home.html'
context = {}
return render(request, template_name, context) | [
"[email protected]"
] | |
c4015e599f027ed19397facf0d40c886fce19fe9 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_235/ch5_2019_09_30_20_38_17_649024.py | b8de2e39ea7eef630ab86123a0bf54f25a4961ab | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | from math import *
def testa_primo(x): #retorna True se o número for primo
d = 2
primo = True
while d <= int(sqrt(x)):
if x % d == 0:
primo = False
break
elif x < 3:
primo = False
else:
primo = True
d = d + 1
return primo
def maior_primo(x):
num = int(x)
if int(x) <= 1:
return -1
while testa_primo(num) == False:
num = num - 1
return num | [
"[email protected]"
] | |
4c4a941d44a281ad1162e35cf84b018d8526ec3a | 818cb255f3f00080a7aa68282e65f4c1d0310c77 | /django_flask_samples/try-django-19-master/src/posts/models.py | 2199bb6459b2c49a295f14443e40c119e815f209 | [
"MIT"
] | permissive | pmnyc/Data_Engineering_Collections | fdca0f9a3de71f5c9855e5bbb45c574d1062077d | b7d29cd4c134cb1252e5c45dd500d969fe0f6029 | refs/heads/master | 2021-06-24T22:15:32.913229 | 2020-11-08T10:12:04 | 2020-11-08T10:12:04 | 153,053,634 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,922 | py | from __future__ import unicode_literals
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.signals import pre_save
from django.utils import timezone
from django.utils.text import slugify
# Create your models here.
# MVC MODEL VIEW CONTROLLER
#Post.objects.all()
#Post.objects.create(user=user, title="Some time")
class PostManager(models.Manager):
def active(self, *args, **kwargs):
# Post.objects.all() = super(PostManager, self).all()
return super(PostManager, self).filter(draft=False).filter(publish__lte=timezone.now())
def upload_location(instance, filename):
#filebase, extension = filename.split(".")
#return "%s/%s.%s" %(instance.id, instance.id, extension)
PostModel = instance.__class__
new_id = PostModel.objects.order_by("id").last().id + 1
"""
instance.__class__ gets the model Post. We must use this method because the model is defined below.
Then create a queryset ordered by the "id"s of each object,
Then we get the last object in the queryset with `.last()`
Which will give us the most recently created Model instance
We add 1 to it, so we get what should be the same id as the the post we are creating.
"""
return "%s/%s" %(new_id, filename)
class Post(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, default=1)
title = models.CharField(max_length=120)
slug = models.SlugField(unique=True)
image = models.ImageField(upload_to=upload_location,
null=True,
blank=True,
width_field="width_field",
height_field="height_field")
height_field = models.IntegerField(default=0)
width_field = models.IntegerField(default=0)
content = models.TextField()
draft = models.BooleanField(default=False)
publish = models.DateField(auto_now=False, auto_now_add=False)
updated = models.DateTimeField(auto_now=True, auto_now_add=False)
timestamp = models.DateTimeField(auto_now=False, auto_now_add=True)
objects = PostManager()
def __unicode__(self):
return self.title
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("posts:detail", kwargs={"slug": self.slug})
class Meta:
ordering = ["-timestamp", "-updated"]
def create_slug(instance, new_slug=None):
slug = slugify(instance.title)
if new_slug is not None:
slug = new_slug
qs = Post.objects.filter(slug=slug).order_by("-id")
exists = qs.exists()
if exists:
new_slug = "%s-%s" %(slug, qs.first().id)
return create_slug(instance, new_slug=new_slug)
return slug
def pre_save_post_receiver(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = create_slug(instance)
pre_save.connect(pre_save_post_receiver, sender=Post)
| [
"[email protected]"
] | |
8b86f4a685ad52d85f4bb1ffabcb5cf186388c82 | 0a8026a21808540a31d3e5699657df744a8a0268 | /euler/euler019.py | ccd99ff4cc8d9ae8e9cdc1c5e7569a84fe6bb95d | [] | no_license | wubek/ProjectEuler | 10208dfd1b17da3408f9cb3e973a53172c88a955 | 7ce04b80136a77bfbe9c0bf4b7a6af3c560e8b19 | refs/heads/master | 2016-09-05T14:53:58.728372 | 2015-02-10T23:04:58 | 2015-02-10T23:04:58 | 23,163,013 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,572 | py | # author wukat
'''
You are given the following information, but you may prefer to do some research for yourself.
1 Jan 1900 was a Monday.
Thirty days has September,
April, June and November.
All the rest have thirty-one,
Saving February alone,
Which has twenty-eight, rain or shine.
And on leap years, twenty-nine.
A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.
How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
'''
def create_months_list():
return ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
def create_length_of_months_dict():
return {"Jan": 31, "Feb": 28, "Mar": 31, "Apr": 30, "May": 31, "Jun": 30, "Jul": 31, "Aug": 31, "Sep": 30, "Oct": 31, "Nov": 30, "Dec": 31}
def check_if_leap_year(year):
return True if year % 4 == 0 and year % 400 != 0 else False
def get_feb_length(year):
return 29 if check_if_leap_year(year) else 28
def count_sundays_on_first_day_of_month_between_dates():
sundays = 0
first_day = 366 % 7
lengths = create_length_of_months_dict()
for year in range(1901, 2001):
lengths["Feb"] = get_feb_length(year)
for month in create_months_list():
first_day += lengths[month] % 7
if first_day % 7 == 0:
sundays += 1
if first_day % 7 == 0: # we've checked also 1 Jan 2001
sundays -= 1
return sundays
if __name__ == "__main__":
print(count_sundays_on_first_day_of_month_between_dates()) | [
"[email protected]"
] | |
065a9bd04669a93db0587677142de9f98bddb0f5 | 1e37b2c02b462689e7468d58cacff1167951294a | /Exam statistics dobre pratyki.py | 46b00a8c68b6281480d3af508ac0c4f388f18946 | [] | no_license | MProMikolajczyk/Python-script | 43f00706959996b1e97980adcba19d47103f4480 | 7512c8b1aac65ba6a652d81dfed301bf2fb7b830 | refs/heads/master | 2023-04-12T23:17:39.195642 | 2019-04-02T13:04:54 | 2019-04-02T13:04:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | #liczenie sumy
grades = [100, 100, 90, 40, 80, 100, 85, 70, 90, 65, 90, 85, 50.5]
def grades_sum(scores):
count=0
for grades in scores:
count+=grades
return count
print grades_sum(grades)
#liczenie sredniej za pomoca funkcji w funkcji
def grades_average(grades_input):
average=grades_sum(grades_input)/float(len(grades_input)) #odolanie do zbiru z ktorego sie kozysta
return average
print grades_average(grades) | [
"[email protected]"
] | |
523cb48fc3b9e7896248ac276d639d62564eebd6 | 710c2f79b3327e35fce095bb7589bdb3485bb7e4 | /vam.py | ed5ec229f582fca7fe7de87bc3edc8c7ff14eb89 | [] | no_license | Tavisca-vvinod/Transportation-Problem | 7408e9713c4ad0804da9012869feb45d9139ab60 | 6040eb6a2c34e166adfc78023de99e3e860d43aa | refs/heads/master | 2022-03-08T07:12:18.807707 | 2019-11-02T20:45:58 | 2019-11-02T20:45:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,194 | py | import sys
import numpy as np
requirement=[25,35,105,20]
availability=[50,70,30,50]
cost=[[4,6,8,13],[13,11,10,8],[14,4,10,13],[9,11,13,8]]
r=4
c=4
total_requirement=0
total_availability=0
def p_cost(cost):
for i in range(r):
print(cost[i])
for i in range(0,len(requirement)):
total_requirement+=requirement[i]
for i in range(0,len(availability)):
total_availability+=availability[i]
if(total_requirement < total_availability):
for i in range(r):
cost[i].append(0)
requirement.append(total_availability-total_requirement)
c+=1
if(total_requirement > total_availability):
temp=[0]*c
cost.append(temp)
availability.append(total_requirement-total_availability)
r+=1
df=np.zeros((r,c))
print("Updated cost matrix ")
print(cost)
res=0
while(total_requirement >0):
row=[float('-inf')]*r
col=[float('-inf')]*c
for i in range(r):
mi=sys.maxsize
ma=float('-inf')
for j in range(c):
if(cost[i][j]!=sys.maxsize):
ma=max(ma,cost[i][j])
mi=min(mi,cost[i][j])
row[i]=ma-mi
for j in range(c):
mi=sys.maxsize
ma=float('-inf')
for i in range(r):
if(cost[i][j]!=sys.maxsize):
ma=max(ma,cost[i][j])
mi=min(mi,cost[i][j])
col[j]=ma-mi
print("Calculated value of diff in max and min element in a row ",row)
print("Calculated value of diff in max and min element in a col",col)
ma=max(row)
ma=max(ma,max(col))
rind=-1
cind=-1
if(ma in row):
rind=row.index(max(row))
cind=cost[rind].index(min(cost[rind]))
else:
cind=col.index(max(col))
temp=[]
for i in range(r):
temp.append(cost[i][cind])
rind=temp.index(min(temp))
print("Index choosen ",rind,cind)
diff=min(availability[rind],requirement[cind])
print("Allocated value to the index ",diff)
res+=(cost[rind][cind]*diff)
df[rind][cind]=diff
if(diff==availability[rind]):
for j in range(c):
cost[rind][j]=sys.maxsize
else:
for i in range(r):
cost[i][cind]=sys.maxsize
total_requirement-=diff
availability[rind]-=diff
requirement[cind]-=diff
#p_cost(cost)
print("Total cost of transportation is ",res)
print("Initial basic feasible solution is ")
print(df)
| [
"[email protected]"
] | |
50b72c5c85059bdeb4f6483199c42f82c27c6a4a | baf3996414315ffb60470c40c7ad797bf4e6897f | /12_back_dev/Flask/Udemy, Flask, Advanced REST API, codes/section04/56_creating_simple_translation_lib/start/schemas/confirmation.py | a417a6761f440dc49f74e7b019c97f6112032b71 | [
"MIT"
] | permissive | thiago-allue/portfolio | 8fbbecca7ce232567aebe97c19944f444508b7f4 | 0acd8253dc7c5150fef9b2d46eead3db83ca42de | refs/heads/main | 2023-03-15T22:10:21.109707 | 2022-09-14T17:04:35 | 2022-09-14T17:04:35 | 207,919,073 | 0 | 0 | null | 2019-11-13T18:18:23 | 2019-09-11T22:40:46 | Python | UTF-8 | Python | false | false | 310 | py | from ma import ma
from models.confirmation import ConfirmationModel
class ConfirmationSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = ConfirmationModel
load_instance = True
load_only = ("user",)
dump_only = ("id", "expired_at", "confirmed")
include_fk = True
| [
"[email protected]"
] | |
a7323c90e60c0cf20b56805548934dc7ff65e394 | e138ae16efe8fcc87491972cd38377112b56570c | /fedex_selenium_headless.py | b716e5a069c71997da9919ecdb7445a18764b9a8 | [] | no_license | Vatsalya-singhi/Web-Automation-Fedex | c9d5c9a4affddba2c33bab62ca788adabbaad856 | b41b084018f62309b5dcd2f85b2ceff419996b8e | refs/heads/master | 2021-09-10T21:16:01.850110 | 2018-04-02T09:15:33 | 2018-04-02T09:15:33 | 126,003,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,413 | py | import time,datetime,calendar
import ezodf,pandas as pd
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.chrome.options import Options
from time import strptime
from pandas import ExcelWriter
from pandas import ExcelFile
path='C:\\Users\\Samridi\\Downloads\\sample_records.ods'
def getpd(path):
doc = ezodf.opendoc(path)
sheet = doc.sheets[0]
df_dict = {}
for i, row in enumerate(sheet.rows()):
# row is a list of cells
# assume the header is on the first row
if i == 0:
# columns as lists in a dictionary
df_dict = {cell.value:[] for cell in row}
# create index for the column headers
col_index = {j:cell.value for j, cell in enumerate(row)}
continue
for j, cell in enumerate(row):
# use header instead of column index
df_dict[col_index[j]].append(cell.value)
df = pd.DataFrame(df_dict)
return(df)
toaddarr=[]
ndf=getpd(path)
df=ndf.dropna()
df=ndf[['trackingnumber','shipdate','country','shipcountry','pincode','shippincode','servicetype']].copy()
count=0
df=df.head(50)
for index, row in df.iterrows():
try:
inDate=row['shipdate'] #get input
#getting date from string to datetime format
d = datetime.datetime.strptime(inDate, '%Y-%m-%d %H:%M:%S')
shipdate='/'.join(str(x) for x in (d.day, d.month, d.year))
print("shipdate - "+str(inDate))
oo=[int(d.year),int(d.month),int(d.day)]
#getting next day no
dno=(datetime.datetime(oo[0],oo[1],oo[2])).weekday()
def nextdate(no):
d = datetime.date.today()
while d.weekday() != no:
d += datetime.timedelta(1)
return d
dno=nextdate(dno)
dno=str(dno.strftime("%m-%d-%Y")).replace('-','/')
print("next date with same dno - "+str(dno))
# scraping for data
chromedriver= 'D:\\webdrivers\\chromedriver'
options = Options()
options.add_experimental_option("excludeSwitches",["ignore-certificate-errors"])
options.add_argument('--headless')
options.add_argument('--disable-gpu')
browser = webdriver.Chrome(chromedriver,chrome_options=options)
browser.get('https://www.fedex.com/ratefinder/home')
fromcountry = browser.find_element_by_name("origCountry")
destCountry = browser.find_element_by_name("destCountry")
el = browser.find_element_by_id('origCountryId')
for option in el.find_elements_by_tag_name('option'):
fromctry=row['country']#'IN' #get input
if option.get_attribute("value") == fromctry:
option.click()
break
ll = browser.find_element_by_id('destCountryId')
for option in ll.find_elements_by_tag_name('option'):
toctry=row['shipcountry']#'IN' #get input
if option.get_attribute("value") == toctry:
option.click()
break
try: #try zipcode else by city
zplid = browser.find_element_by_id("origZipId")
destzpid= browser.find_element_by_id("destZipId")
zpin=str(row['pincode'])
zpin= zpin.replace('.0','')
destpin=str(row['shippincode'])
destpin= destpin.replace('.0','')
#send keys
zplid.send_keys(zpin) #get input for pin
destzpid.send_keys(destpin)#get input for pin
except:
try:
ogcity= browser.find_element_by_id("origCityId")
for option in ogcity.find_elements_by_tag_name('option'):
toctry=row['city'] #get shipcity input
if option.get_attribute("value") == toctry:
option.click()
break
descity= browser.find_element_by_id("destCityId")
for option in descity.find_elements_by_tag_name('option'):
toctry=row['shipcity'] #get city input
if option.get_attribute("value") == toctry:
option.click()
break
except:
pass
no = browser.find_element_by_id("NumOfPackages")
weigh=browser.find_element_by_id("totalPackageWeight")
no.clear()
no.send_keys("1")
weigh.send_keys("10")
pk = browser.find_element_by_name('receivedAtCode')
for option in pk.find_elements_by_tag_name('option'):
if option.text == 'Drop off at FedEx location':
option.click()
break
browser.execute_script("document.getElementById('shipCalendarDate._date').value ='"+dno+"' ;")
browser.find_element_by_id("ttTime").click()
try:
ll = browser.find_element_by_name('shipmentPurpose')
for option in ll.find_elements_by_tag_name('option'):
if option.text == 'Personal (Not sold)':
option.click()
break
ll = browser.find_element_by_name('freightOnValue')
for option in ll.find_elements_by_tag_name('option'):
if option.text == 'Own risk':
option.click()
break
except:
pass
try:
tiv = browser.find_element_by_name("customsValue")
tiv.send_keys("10")
except:
print("failed to set custom value")
lfg = browser.find_element_by_name('packageForm.packageList[0].packageType')
for option in lfg.find_elements_by_tag_name('option'):
if option.text == "Your Packaging":
option.click()
break
browser.find_element_by_class_name("buttonpurple").click()
svtype=str(row['servicetype'])
print(svtype)
servicetype=svtype #get service type
arr=[]
flag=0
if browser.find_element_by_xpath("//td[@style='border-right: 1px solid #CFCFCF; vertical-align: middle;']//font[contains(@id,'"+servicetype+"')]"):
ao=browser.find_element_by_xpath("//td[@style='border-right: 1px solid #CFCFCF; vertical-align: middle;']//font[contains(@id,'"+servicetype+"')]").text
if "End of day" in ao:
try:
flag=int(ao[11])
except:
flag=2
else:
arr=str(ao).replace(',','').split(' ')
arr=arr[1:4]
if flag==0:
arr[0]=str(strptime(arr[0],'%b').tm_mon)
finaldate='/'.join(arr)
print("date calculated from dno - "+str(finaldate))
date_format = "%m/%d/%Y"
a = datetime.datetime.strptime(dno, date_format)
b = datetime.datetime.strptime(finaldate, date_format)
delta = b - a
print("No of days taken = "+str(delta.days))
#calcuation for the estimated date
erp=d+ datetime.timedelta(days=int(delta.days))
print("estimation date= "+str(erp))
toaddarr.append(str(erp))
print("------------------------------------------")
else:
erp=d+ datetime.timedelta(days=int(flag))
print("estimation date= "+str(erp))
toaddarr.append(str(erp))
print("------------------------------------------")
browser.quit()
except:
print("error case failed")
browser.quit()
count+=1
toaddarr.append("error occured")
print("------------------------------------------")
pass
print("no of exceptions= "+str(count))
print(df.shape)
df['estimated date']=toaddarr
print(df.shape)
writer = ExcelWriter('fedex_estimate_date_cal.xlsx')
df.to_excel(writer,'Sheet1',index=False)
writer.save()
| [
"[email protected]"
] | |
0e15d2c51b2d7c3b97dcf953ec8c6c90cb55246b | ac4b9385b7ad2063ea51237fbd8d1b74baffd016 | /.history/google/s5_getparser_20210215174545.py | 37f5cae91e71af167eb6607e6180fc9f178c3b26 | [] | no_license | preethanpa/ssoemprep | 76297ef21b1d4893f1ac2f307f60ec72fc3e7c6f | ce37127845253c768d01aeae85e5d0d1ade64516 | refs/heads/main | 2023-03-09T00:15:55.130818 | 2021-02-20T06:54:58 | 2021-02-20T06:54:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,689 | py | import shutil
from fonduer.parser.preprocessors import html_doc_preprocessor
from sqlalchemy import exc
import pdftotree
import re
from sen_parser_usable import *
from config import config
import json
import os
import posixpath
import http.server
import urllib.request, urllib.parse, urllib.error
import cgi
import shutil
import mimetypes
import re
from io import BytesIO
import json
import uuid
import sys
import logging
import errno
from fonduer.parser.models import Document, Sentence, Table
from fonduer.parser.preprocessors import HTMLDocPreprocessor
from fonduer.parser import Parser
from pprint import pprint
from fonduer import Meta, init_logging
from fonduer.candidates import CandidateExtractor
from fonduer.candidates import MentionNgrams
from fonduer.candidates import MentionExtractor
from fonduer.candidates.models import Mention
from fonduer.candidates.models import mention_subclass
from fonduer.candidates.models import candidate_subclass
from fonduer.candidates.matchers import RegexMatchSpan, DictionaryMatch, LambdaFunctionMatcher, Intersect, Union
from fonduer.features import Featurizer
import inspect
import matchers as matchers
from extract_html import *
PII_KEYLIST = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/model/pii-keylist.json'
PARALLEL = 4 # assuming a quad-core machine
# ATTRIBUTE = "ns8s_invoice_poc_stage"
# check that the databases mentioned below already exist
getdbref = __import__('s1_2_getdbref')
# Will return <module '1_2_getdbref' from '/home/dsie/Developer/sandbox/3ray/server/backend/python/kbc_process/1_2_getdbref.py'>
# pdf_path = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/documents/pdf/'
# docs_path = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/documents/html/'
# pdf_path = json.loads(sys.argv[1])['pdf_path']
# docs_path = json.loads(sys.argv[1])['html_path']
# job_id = json.loads(sys.argv[1])['job_id']
# exc_context = 'email_id'
# doc_context = 'mock'
# exc_context = json.loads(sys.argv[1])['context'] if len(sys.argv) > 0 and json.loads(sys.argv[1])['context'] is not None else None
# doc_context = json.loads(sys.argv[1])['doc_name'] if len(sys.argv) > 0 and json.loads(sys.argv[1])['doc_name'] is not None else None
# exc_context = 'phone_number'
pdf_path = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/drive_documents/efca2facee5f8df9/pdf/'
docs_path = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/drive_documents/efca2facee5f8df9/html/'
job_id = 'efca2facee5f8df9'
exc_context = None
doc_context = None
# Configure logging for Fonduer
init_logging(log_dir="logs", level=logging.ERROR)
max_docs = 1000
PARALLEL = 4
doc_preprocessor = None
execution_stack = ["1. Get session object..."]
try:
session = getdbref.get_session()
sessType = type(session) # Will return <class 'sqlalchemy.orm.session.Session'>
execution_stack.append("Done.")
execution_stack.append("2. Processing layout...")
except Exception as session_exception:
logging.error(f'{execution_stack}, session = getdbref.get_session(), {session_exception}')
except exc.SQLAlchemyError as sql_exception:
logging.error(f'{execution_stack}, session = getdbref.get_session(), {sql_exception}')
def do_prepare_mentions_batch(candidate_mentions, config):
# for index, data in enumerate(config):
for index, data in config.items():
mention_subclass_list = list()
max_ngrams = None
for key in data.keys():
if key == 'Candidates':
for c in data.get(key):
if c not in candidate_mentions.keys(): #TODO verify this condition
candidate_mentions[c] = {
"mention_names": [],
"mention_ngrams": [],
"mention_matchers": [],
"mention_subclass": [],
"max_ngrams": [],
"throttler_function": []
}
candidate_mentions[c]['mention_names'].append(data['MentionName'])
candidate_mentions[c]['mention_ngrams'].append(data['MentionNGrams'])
candidate_mentions[c]['mention_matchers'].append(matchers.matcher[data.get('Context')])
if 'mention_subclass' in candidate_mentions[c].keys():
candidate_mentions[c]['mention_subclass'].append(mention_subclass(data['MentionName']))
else:
candidate_mentions[c]['mention_subclass'] = [mention_subclass(data['MentionName'])]
if 'max_ngrams' in candidate_mentions[c].keys():
candidate_mentions[c]['max_ngrams'].append(MentionNgrams(n_max=candidate_mentions[c].get('mention_ngrams')))
else:
candidate_mentions[c]['max_ngrams'] = [MentionNgrams(n_max=candidate_mentions[c].get('mention_ngrams'))]
# candidate_mentions[c]['throttler_function'] = data.get('ThrottlerFunctions')[0].get('tf')
candidate_mentions[c]['throttler_function'] = [{data.get('ThrottlerFunctions')[0].get('tf')}]
return candidate_mentions
def do_prepare_mentions(candidate_mentions, config, context):
mention_subclass_list = list()
max_ngrams = None
ctx = {
"mention_names": [],
"mention_ngrams": [],
"mention_matchers": [],
"mention_subclass": [],
"max_ngrams": [],
"throttler_function": None
}
ctx['mention_names'].append(config[context].get('MentionName'))
ctx['mention_ngrams'].append(config[context]['MentionNGrams'])
ctx['mention_matchers'].append(matchers.matcher[config[context].get('Context')])
ctx['mention_subclass'].append(mention_subclass(config[context]['MentionName']))
ctx['max_ngrams'].append(MentionNgrams(n_max=config[context].get('MaxNGrams')))
ctx['throttler_function'] = config[context].get('ThrottlerFunctions')[0].get('tf')
candidate_mentions[context] = ctx
return candidate_mentions
def do_train(candidate_mentions):
from sqlalchemy import desc
docs = session.query(Document).order_by(Document.name).all()
# docs = session.query(Document).order_by(desc(Document.id)).limit(1)
total_mentions = session.query(Mention).count()
splits = (1, 0.0, 0.0)
train_cands = []
for candidate_key in candidate_mentions.keys():
train_docs = set()
dev_docs = set()
test_docs = set()
'''print('Mention Subclass {}, Ngrams {} and Matchers {}'
.format(candidate_mentions[candidate_key]['mention_subclass'],
candidate_mentions[candidate_key]['max_ngrams'],
candidate_mentions[candidate_key]['mention_matchers']))
'''
mention_extractor = MentionExtractor(session, candidate_mentions[candidate_key]['mention_subclass'], candidate_mentions[candidate_key]['max_ngrams'], candidate_mentions[candidate_key]['mention_matchers'])
print(f'in do_train 171: docs {docs}')
mention_extractor.apply(docs, , parallelism=PARALLEL, progress_bar=False)
print(f'in do_train 173: candidate_mentions {candidate_mentions}')
candidate_mentions[candidate_key]['candidate_subclass'] = candidate_subclass(candidate_key, candidate_mentions[candidate_key].get('mention_subclass'), table_name=candidate_mentions[candidate_key]['mention_names'][0])
candidate_extractor = CandidateExtractor(session, [candidate_mentions[candidate_key]['candidate_subclass']], throttlers=[candidate_mentions[candidate_key]['throttler_function']])
data = [(doc.name, doc) for doc in docs]
data.sort(key=lambda x: x[0])
for i, (doc_name, doc) in enumerate(data):
train_docs.add(doc)
for i, docs in enumerate([train_docs, dev_docs, test_docs]):
candidate_extractor.apply(docs, split=i, parallelism=PARALLEL)
# train_cands = candidate_extractor.get_candidates(split = 0)
# train_cands.append(candidate_extractor.get_candidates(split = 0))
candidate_mentions[candidate_key]['train_cands'] = candidate_extractor.get_candidates(split = 0)
for index, item in enumerate(candidate_mentions[candidate_key]['train_cands']):
if len(item) > 0:
featurizer = Featurizer(session, [candidate_mentions[candidate_key]['candidate_subclass']])
featurizer.apply(split=0, train=True, parallelism=PARALLEL)
# %time featurizer.apply(split=0, train=True, parallelism=PARALLEL)
# %time F_train = featurizer.get_feature_matrices(candidate_mentions[candidate_key]['train_cands'])
else:
candidate_mentions[candidate_key]['train_cands'].pop(index)
# candidate[candidate_key]['train_cands'] = train_cands
return candidate_mentions
def do_process_get_candidates(candidate_mentions=None):
train_cands = do_train(candidate_mentions)
return train_cands
def handle_return(generator, func):
contextInfoDict = yield from generator
func(contextInfoDict)
def get_context_async(sm, document_context='', search_context=''):
pass
# star_char_index = sm.char_start
# end_char_index = sm.char_end
# star_char_index = sm['applicant_name_context'].char_start
# end_char_index = sm['applicant_name_context'].char_end
# contextInfoDictionary = {
# 'label': {
# # 'spanMention': sm['applicant_name_context'],
# 'document': sm[search_context].sentence.document.name,
# 'documentId': sm[search_context].sentence.document.id,
# 'sentence': sm[search_context].sentence.text,
# 'contextValue': sm[search_context].sentence.text[star_char_index:end_char_index+1],
# 'startChar': star_char_index,
# 'endChar': end_char_index
# },
# 'value': {
# # 'spanMention': sm['applicant_name_context'],
# 'document': sm[search_context].sentence.document.name,
# 'documentId': sm[search_context].sentence.document.id,
# 'sentence': sm[search_context].sentence.text,
# 'contextValue': sm[search_context].sentence.text[star_char_index:end_char_index+1],
# 'startChar': star_char_index,
# 'endChar': end_char_index
# }
# }
# yield contextInfoDictionary
def print_values(value):
print('returned: {}'.format(json.dumps(value)))
def do_get_docs_values(candidates=None, document_context=None, search_context=None):
'''
"<class 'fonduer.parser.models.document.Document'>"
"<class 'fonduer.parser.models.section.Section'>"
"<class 'fonduer.parser.models.sentence.Sentence'>"
"<class 'fonduer.candidates.models.span_mention.SpanMention'>"
"<class 'fonduer.candidates.models.mention.ApplicationNameLabel'>"
'''
train_cands = None
docs_and_values = []
all_docs_and_values = []
search_types = ['all_docs_and_pii', 'all_doc_and_'+search_context, 'all_pii_for_'+document_context, search_context+'_for_'+document_context]
search_type = ''
if document_context == None and search_context == None:
'''Entire KB'''
search_type = search_types[0]
elif document_context == None and search_context is not None:
''' Send entire KB '''
search_type = search_types[1]
elif document_context is not None and search_context == None:
''' Send KB for document'''
search_type = search_types[2]
else:
''' Send KB for match in Doc'''
search_type = search_types[3]
for index, item in enumerate(candidates):
train_cands = candidates.get(item).get('train_cands')
if train_cands is not None:
for instances in train_cands:
for candidate in instances:
for key, value in enumerate(candidate):
all_docs_and_values.append({
"documentName": value.context.sentence.document.name,
"page": value.context.sentence.page,
"piiFound": value.context.sentence.text
})
for item in all_docs_and_values:
if search_type == 0:
docs_and_values.append(item)
elif search_type == 1:
'''
search_context is already filtered, hence do not filter any document
'''
docs_and_values.append(item)
elif search_type == 2:
'''
only filter document name
'''
docs_and_values.append(item) if item.get("documentName") in document_context else None
else:
'''
search_type is 3
search_context is already filtered, hence only filter document_name
'''
docs_and_values.append(item) if item.get("documentName") in document_context else None
# logging.info(f'docs_and_values: {docs_and_values}')
return docs_and_values
def train_and_test_experiment(document_context=None, context_label='', user=0, pdf_path=''):
'''
context_value:
context_label:
user:
pdf_path:
'''
candidate_mentions = do_prepare_mentions({}, config, context_label)
candidates = do_process_get_candidates(candidate_mentions)
results = []
if candidates is not None:
span_mention = None
span_mention_list = do_get_docs_values(candidates, document_context, context_label)
if len(span_mention_list) > 0:
span_mention = span_mention_list[0]
returned_contexts = handle_return(get_context_async(span_mention, document_context, context_label), print_values)
for x in returned_contexts:
results.append(x)
else:
# TODO
pass
return results
def train_and_test(document_context=None, context_label='', user=0, pdf_path=''):
'''
context_value:
context_label:
user:
pdf_path:
'''
# candidate_mentions = do_prepare_mentions({}, config, context_label)
candidate_mentions = do_prepare_mentions_batch({}, config)
candidates = do_process_get_candidates(candidate_mentions)
results = []
if candidates is not None:
results = do_get_docs_values(candidates, document_context, context_label)
return results
print(json.dumps({"result": train_and_test(document_context=doc_context, context_label=exc_context), "job_id": job_id })) | [
"{[email protected]}"
] | |
975465b086d16cb27f6ba2cd45f2c3a046e8636c | 48527cb443a1d5129ddfc230feb6a66c2ffa5f92 | /virtusa/twostrings.py | 2917397d9e6f4666168bab74b5bfa1ba231dbee2 | [] | no_license | arthtyagi/dailycode | 13c557c76ccb4855bd12f17e543151addeea9523 | ec026b438a484b26b467807822e059ba019fc8b4 | refs/heads/master | 2021-07-25T19:25:01.554898 | 2021-01-04T11:12:40 | 2021-01-04T11:12:40 | 233,267,702 | 2 | 0 | null | 2020-05-18T00:21:12 | 2020-01-11T17:12:30 | Python | UTF-8 | Python | false | false | 304 | py | def twoStrings(s1, s2):
m1 = set(s1)
m2 = set(s2)
if set.intersection(m1,m2):
return "YES"
return "NO"
if __name__ == '__main__':
t = int(input())
for _ in range(t):
first = input()
second = input()
print (twoStrings(first, second))
| [
"[email protected]"
] | |
4edafb11a5ecab33e8f8e447c50016d11e29f1aa | 223861dd0dda7a2e4c94c81171cd2d7fb4979659 | /azure/mgmt/resource/subscriptions/operations/tenants_operations.py | 24063db6f23095981c05cbf62a64edf3ba0195a1 | [] | no_license | pexip/os-python-azure-mgmt-resource | 99bc515e3ff4c1a07458fcd2de96bb48daab1eee | f09919899900adf389baf8f96ae7117ccdbd7f1a | refs/heads/master | 2023-08-28T05:02:15.070287 | 2017-02-28T22:33:49 | 2017-02-28T22:33:49 | 54,351,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,665 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class TenantsOperations(object):
"""TenantsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Gets the tenants for your account.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`TenantIdDescriptionPaged
<azure.mgmt.resource.subscriptions.models.TenantIdDescriptionPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/tenants'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.TenantIdDescriptionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.TenantIdDescriptionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| [
"[email protected]"
] | |
5a3e12e7fb539f1bbd4918c5e539c2908bb07470 | f73ca5563e8900e1fb7b9f29c94e695402ecb051 | /relationships/exposure_nomination_relationship.py | 232c1f96213d6743dffa68af4a6638cd78201862 | [] | no_license | IceBucketScience/analysis | ad5b29307da714827a7970e55328a34fa4dda295 | d08d2951e8ed0cb75b67125cf9296552cf097565 | refs/heads/master | 2021-01-20T07:47:23.730105 | 2015-05-29T06:39:54 | 2015-05-29T06:39:54 | 35,286,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,977 | py | from util.graph import Graph
import pandas as pd
import statsmodels.api as sm
import numpy as np
from util.significance import test_significance
from util.plot import add_binary_jitter, get_binary_distribution, plot_binary_distribution, plot_normal_distributions
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.interactive(False)
g = Graph()
def compute_exposure_pcts(g):
participants = g.get_challenge_participants()
participants_w_min_friends = [participant for participant in participants if len(participant.get_friends()) >= 50]
raw_coord_pairs = []
for participant in participants_w_min_friends:
coord_pair = {'exposure_pct': participant.get_exposure_pct(), 'participated': 1 if participant.completed_challenge() else 0}
raw_coord_pairs.append(coord_pair)
return pd.DataFrame(raw_coord_pairs)
exposure_pcts = compute_exposure_pcts(g)
binary_distribution = get_binary_distribution(exposure_pcts, 'exposure_pct', 'participated', 'completed', 'didnt_complete', 15)
print binary_distribution
#plot_binary_distribution(binary_distribution, 'exposure_pct', 'exposure_pct', 'pct_completed')
ax = add_binary_jitter(exposure_pcts, 'exposure_pct', 'participated').plot(x='exposure_pct', y='participated', kind='scatter', alpha=0.3)
# logit = sm.Logit(exposure_pcts['exposure_pct'], exposure_pcts['participated'])
# reg_result = logit.fit()
# print reg_result.summary()
# x_vals = np.linspace(exposure_pcts['exposure_pct'].min(), exposure_pcts['exposure_pct'].max(), 20)
# pd.DataFrame({'x': x_vals, 'y': logit.cdf(x_vals)}).plot(x='x', y='y', ax=ax)
didnt_participate = exposure_pcts[exposure_pcts['participated'] == 0].loc[:, 'exposure_pct']
participated = exposure_pcts[exposure_pcts['participated'] == 1].loc[:, 'exposure_pct']
#plot_normal_distributions(participated, didnt_participate, 'participated', 'didnt_participate', 'exposure_pct', 'frequency')
test_significance(didnt_participate, participated)
plt.show() | [
"[email protected]"
] | |
1f14d36ff7919bb1e0fe13f29300a18c34b3c7df | 5e20392dc4487c75e1e3061c1416a895317ca7c2 | /navmenu/menus.py | c201e820b7ccfbfda8a750af2181a3d3001b545d | [
"MIT"
] | permissive | rashidsh/navmenu | 7072f3cee55469ccbe7f79eb5ce81c86a3aab4bb | ec67b820462cc102417e214cd74eb7b1b97ad1f1 | refs/heads/master | 2023-08-18T02:20:26.021801 | 2021-10-03T16:56:54 | 2021-10-03T16:56:54 | 366,816,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,112 | py | import collections.abc
from abc import ABC, abstractmethod
from typing import Iterator, Optional, Sequence
from .actions import Action
from .contents import BaseContent
from .items import BaseItem
from .responses import Message
from .keyboard import KeyboardButton, Keyboard
class BaseMenu(ABC):
"""A generic menu.
Args:
aliases: A sequence of strings that act as shortcuts to the menu.
"""
__slots__ = 'aliases',
def __init__(self, aliases: Optional[Sequence[str]] = None):
if aliases is None:
aliases = []
self.aliases = aliases
@abstractmethod
def select(self, action: str, payload=None) -> Optional[Iterator[Message]]:
"""Select an item based on action and payload and optionally return one or multiple messages.
Args:
action: A string indicating selected menu button.
payload: An incoming message payload.
Returns:
None or a list of messages.
"""
pass
@abstractmethod
def get_message(self, payload: Optional[dict] = None) -> Message:
"""Get a message representing the menu.
Args:
payload: An incoming message payload.
Returns:
A message representing the menu.
"""
pass
@abstractmethod
def serialize(self) -> dict:
"""Serialize the class instance to a dictionary.
Returns:
A serialized class instance.
"""
res = {}
if self.aliases:
res['aliases'] = self.aliases
return res
def enter(self, payload: Optional[dict] = None) -> Optional[Message]:
"""Enter the menu and optionally return a message.
Args:
payload: An incoming message payload.
Returns:
None or a message.
"""
pass
class Menu(BaseMenu):
"""A menu with fixed content and list of items.
Args:
content: The menu content.
items: A sequence of menu items.
default_action: The action to select when the provided action does not exist.
aliases: A sequence of strings that act as shortcuts to the menu.
"""
__slots__ = 'content', 'items', 'default_action'
def __init__(
self,
content: BaseContent,
items: Optional[Sequence[BaseItem]] = None,
default_action: Optional[Action] = None,
aliases: Optional[Sequence[str]] = None,
) -> None:
super().__init__(aliases)
if items is None:
items = []
self.content = content
self.items = items
self.default_action = default_action
def __repr__(self) -> str:
return f'Menu({self.content}, {self.items}, {repr(self.default_action)}, {self.aliases})'
def select(self, action: str, payload: Optional[dict] = None) -> Optional[Iterator[Message]]:
target_item = next((
i for i in self.items if (isinstance(i, BaseItem) and i.name == action and i.is_available(payload))
), None)
if target_item is None:
if self.default_action:
return self.default_action.process(payload),
return
return target_item.on_select(payload)
def get_message(self, payload: Optional[dict] = None) -> Message:
if payload is None:
payload = {}
keyboard = Keyboard()
for item in self.items:
if item.is_available(payload):
kwargs = item.get_content()
if kwargs['type'] == 'button':
keyboard.add_button(KeyboardButton(
kwargs['payload'],
kwargs['text'].format(**payload),
kwargs['color'],
))
elif kwargs['type'] == 'line_break':
keyboard.add_line()
return Message(self.content, keyboard, payload)
def add_item(self, item: BaseItem) -> None:
"""Add the item to the menu.
Args:
item: The item to add.
Raises:
RuntimeError: The menu's item list is immutable.
"""
try:
self.items.append(item)
except AttributeError:
raise RuntimeError('The menu\'s item list is immutable')
def serialize(self) -> dict:
res = {
**super().serialize(),
'content': {
'type': self.content.__class__.__name__,
**self.content.serialize(),
}
}
if self.items:
res['items'] = [{
'type': item.__class__.__name__,
**item.serialize(),
} for item in self.items]
if self.default_action:
res['default_action'] = {
'type': self.default_action.__class__.__name__,
**self.default_action.serialize(),
}
return res
class CustomMenu(BaseMenu):
"""A menu that is controlled by a custom class.
Args:
handler: A class containing "select", "get_message" and "enter" methods.
aliases: A sequence of strings that act as shortcuts to the menu.
"""
__slots__ = 'handler',
def __init__(self, handler, aliases: Optional[Sequence[str]] = None) -> None:
super().__init__(aliases)
self.handler = handler
def __repr__(self) -> str:
return f'CustomMenu({self.handler})'
def select(self, action: str, payload: Optional[dict] = None) -> Optional[Iterator[Message]]:
res = self.handler.select(action, payload)
return res if isinstance(res, collections.abc.Sequence) else (res, )
def get_message(self, payload: Optional[dict] = None) -> Optional[Message]:
return self.handler.get_message(payload)
def serialize(self) -> dict:
res = {
**super().serialize(),
'handler': self.handler.__name__,
}
return res
def enter(self, payload: Optional[dict] = None) -> Optional[Message]:
return self.handler.enter(payload)
| [
"[email protected]"
] | |
6c292a191edbad614fba3c6a8a5e4449118a6ce3 | 07e23e5fc0abbbf6aa19162545321562a0fd056a | /Basic Python/Basic_python/27_assert.py | bf30490f715333860c35512278a1cd02c27543d7 | [] | no_license | Meet57/programming | 12ed9f1da93b5aa09bd5eb7f03437b39ce6aabcc | 65ee6468e592bacec13aa6eb381666350c24500a | refs/heads/master | 2021-06-15T11:29:37.530931 | 2021-05-29T21:18:21 | 2021-05-29T21:18:21 | 195,511,057 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | # Python 3 code to demonstrate
# working of assert
# initializing number
a = 4
b = 0
# using assert to check for 0
print ("The value of a / b is : ")
assert b != 0, "Divide by 0 error"
print (a / b)
| [
"[email protected]"
] | |
c3a326c658642bc0f0f992e7f5184c0e1f41e2dc | 6cbfa2d9fc719ce73c2fff6696569170cf6a2cfe | /mp2/node_modules/bcrypt/build-tmp-napi-v3/config.gypi | 033aefdb875e6d80a76ef216f13860a8f8e427ba | [
"MIT"
] | permissive | unisse-courses/s14-mp15 | a68ea1f3936e6521d5e196df3e3847cd4a1664c2 | 9d92e78b8c266f1920b6ae0cacb16660973e4796 | refs/heads/master | 2022-12-26T01:03:54.074100 | 2021-03-19T12:21:27 | 2021-03-19T12:21:27 | 242,083,885 | 0 | 0 | null | 2022-12-07T13:45:32 | 2020-02-21T07:53:07 | CSS | UTF-8 | Python | false | false | 6,469 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": [],
"msbuild_toolset": "v141",
"msvs_windows_target_platform_version": "10.0.17763.0"
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "..\\..\\deps/icu-small\\source/data/in\\icudt67l.dat",
"icu_default_data": "",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "67",
"is_debug": 0,
"napi_build_version": "3",
"nasm_version": "2.14",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 72,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "false",
"node_use_etw": "true",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"ossfuzz": "false",
"shlib_suffix": "so.72",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_enable_object_print": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"v8_use_snapshot": 1,
"want_separate_host_toolset": 0,
"nodedir": "C:\\Users\\Marius\\AppData\\Local\\node-gyp\\Cache\\12.19.0",
"standalone_static_library": 1,
"msbuild_path": "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\BuildTools\\MSBuild\\15.0\\Bin\\MSBuild.exe",
"fallback_to_build": "true",
"module": "C:\\Users\\Marius\\Documents\\GitHub\\s14-mp15\\mp2\\node_modules\\bcrypt\\lib\\binding\\napi-v3\\bcrypt_lib.node",
"module_name": "bcrypt_lib",
"module_path": "C:\\Users\\Marius\\Documents\\GitHub\\s14-mp15\\mp2\\node_modules\\bcrypt\\lib\\binding\\napi-v3",
"napi_version": "7",
"node_abi_napi": "napi",
"node_napi_label": "napi-v3",
"access": "",
"allow_same_version": "",
"also": "",
"always_auth": "",
"audit": "true",
"audit_level": "low",
"auth_type": "legacy",
"before": "",
"bin_links": "true",
"browser": "",
"ca": "",
"cache": "C:\\Users\\Marius\\AppData\\Roaming\\npm-cache",
"cache_lock_retries": "10",
"cache_lock_stale": "60000",
"cache_lock_wait": "10000",
"cache_max": "Infinity",
"cache_min": "10",
"cert": "",
"cidr": "",
"color": "true",
"commit_hooks": "true",
"depth": "Infinity",
"description": "true",
"dev": "",
"dry_run": "",
"editor": "notepad.exe",
"engine_strict": "",
"fetch_retries": "2",
"fetch_retry_factor": "10",
"fetch_retry_maxtimeout": "60000",
"fetch_retry_mintimeout": "10000",
"force": "",
"format_package_lock": "true",
"fund": "true",
"git": "git",
"git_tag_version": "true",
"global": "",
"globalconfig": "C:\\Users\\Marius\\AppData\\Roaming\\npm\\etc\\npmrc",
"globalignorefile": "C:\\Users\\Marius\\AppData\\Roaming\\npm\\etc\\npmignore",
"global_style": "",
"group": "",
"ham_it_up": "",
"heading": "npm",
"https_proxy": "",
"if_present": "",
"ignore_prepublish": "",
"ignore_scripts": "",
"init_author_email": "",
"init_author_name": "",
"init_author_url": "",
"init_license": "ISC",
"init_module": "C:\\Users\\Marius\\.npm-init.js",
"init_version": "1.0.0",
"json": "",
"key": "",
"legacy_bundling": "",
"link": "",
"local_address": "",
"logs_max": "10",
"long": "",
"maxsockets": "50",
"message": "%s",
"metrics_registry": "https://registry.npmjs.org/",
"node_gyp": "C:\\Program Files\\nodejs\\node_modules\\npm\\node_modules\\node-gyp\\bin\\node-gyp.js",
"node_options": "",
"node_version": "12.19.0",
"offline": "",
"onload_script": "",
"only": "",
"optional": "true",
"otp": "",
"package_lock": "true",
"package_lock_only": "",
"parseable": "",
"prefer_offline": "",
"prefer_online": "",
"prefix": "C:\\Users\\Marius\\AppData\\Roaming\\npm",
"preid": "",
"production": "",
"progress": "true",
"read_only": "",
"rebuild_bundle": "true",
"registry": "https://registry.npmjs.org/",
"rollback": "true",
"save": "true",
"save_bundle": "",
"save_dev": "",
"save_exact": "",
"save_optional": "",
"save_prefix": "^",
"save_prod": "",
"scope": "",
"scripts_prepend_node_path": "warn-only",
"script_shell": "",
"searchexclude": "",
"searchlimit": "20",
"searchopts": "",
"searchstaleness": "900",
"send_metrics": "",
"shell": "C:\\WINDOWS\\system32\\cmd.exe",
"shrinkwrap": "true",
"sign_git_commit": "",
"sign_git_tag": "",
"sso_poll_frequency": "500",
"sso_type": "oauth",
"strict_ssl": "true",
"tag": "latest",
"tag_version_prefix": "v",
"timing": "",
"tmp": "C:\\Users\\Marius\\AppData\\Local\\Temp",
"umask": "0000",
"unicode": "",
"unsafe_perm": "true",
"update_notifier": "true",
"usage": "",
"user": "",
"userconfig": "C:\\Users\\Marius\\.npmrc",
"user_agent": "npm/6.14.8 node/v12.19.0 win32 x64",
"version": "",
"versions": "",
"viewer": "browser"
}
}
| [
"[email protected]"
] | |
50f43bb978b74c849a33e544f6af9ebca0ac7070 | c43109f27fc0432b8592f76106f9b9cfd971bf19 | /game_functions.py | b93cb7251469b05db38a349da3ea3cf2ecb464ba | [] | no_license | Stark-Xue/alien_invasion | ca6d8cb76ac47a24d5482613fad7970347af0014 | 80e42448f613b98f9ac6de5687119beab8ad7d2e | refs/heads/master | 2020-07-17T18:08:26.927976 | 2019-09-08T09:21:35 | 2019-09-08T09:21:35 | 206,065,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,160 | py | import sys
import pygame
from time import sleep
from Bullet import Bullet
from alien import Alien
def check_keydown_events(event, ai_settings, screen, ship, bullets):
if event.key == pygame.K_RIGHT:
#向右移动飞船
ship.moving_right = True
elif event.key == pygame.K_LEFT:
ship.moving_left = True
elif event.key == pygame.K_SPACE:
fire_bullet(ai_settings, screen, ship, bullets)
elif event.key == pygame.K_q:
sys.exit()
def check_keyup_events(event, ship):
if event.key == pygame.K_RIGHT:
ship.moving_right = False
elif event.key == pygame.K_LEFT:
ship.moving_left = False
def check_events(ai_settings, screen, stats, sb, play_button, ship, aliens, bullets):
"""监视键盘事件和鼠标事件"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, ai_settings, screen, ship, bullets)
elif event.type == pygame.KEYUP:
check_keyup_events(event, ship)
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_x, mouse_y = pygame.mouse.get_pos()
check_play_button(ai_settings, screen, stats, sb, play_button, ship, aliens,
bullets, mouse_x, mouse_y)
def check_play_button(ai_settings, screen, stats, sb, play_button, ship, aliens,
bullets, mouse_x, mouse_y):
"""玩家单击按钮开始游戏"""
botton_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)
if botton_clicked and not stats.game_active:
#重置游戏设置
ai_settings.initialize_dynamic_settings()
#隐藏光标
pygame.mouse.set_visible(False)
#重置游戏统计信息
stats.reset_stats()
stats.game_active = True
#重置计分牌信息
sb.prep_score()
sb.prep_high_score()
sb.prep_level()
sb.prep_ships()
#子弹和外星人清空
aliens.empty()
bullets.empty()
#新建一群外星人,新建一艘飞船放置于底部中间
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
def update_screen(ai_settings, screen, stats, sb, ship, aliens, bullets,
play_button):
"""更新屏幕上的图像,并切换到新屏幕"""
#每次循环时都重绘屏幕
screen.fill(ai_settings.bg_color)
for bullet in bullets:
bullet.draw_bullet()
ship.blitme()
#alien.blitme()
aliens.draw(screen)
#显示得分
sb.show_score()
if not stats.game_active:
play_button.draw_button()
#让最近绘制的屏幕可见
pygame.display.flip()
def update_bullets(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""更新子弹的位置,删除已消失的子弹"""
#更新子弹的位置
bullets.update()
#删除已消失的子弹
for bullet in bullets.copy():
if bullet.rect.bottom <= 0:
bullets.remove(bullet)
#print(len(bullets))
check_bullets_aliens_collisions(ai_settings, screen, stats, sb, ship, aliens, bullets)
def check_bullets_aliens_collisions(ai_settings, screen, stats, sb, ship, aliens, bullets):
#检查是否有子弹击中类外星人
#如果是这样,就删除对应的子弹和外星人
collisions = pygame.sprite.groupcollide(bullets, aliens, True, True)
if collisions:
for alien in collisions.values():
stats.score += ai_settings.alien_points * len(alien)
sb.prep_score()
check_high_score(stats, sb)
if len(aliens) == 0:
#删除现有的子弹,并新建一群外星人
bullets.empty()
ai_settings.increase_speed()
stats.level += 1
sb.prep_level()
create_fleet(ai_settings, screen, ship, aliens)
def fire_bullet(ai_settings, screen, ship, bullets):
"""如果还没有达到限制,就发射一颗子弹"""
if len(bullets) < ai_settings.bullet_allowed:
#创建一颗子弹,并将其加入到编组bullets中
new_bullet = Bullet(ai_settings, screen, ship)
bullets.add(new_bullet)
def get_number_alien_x(ai_settings, alien_width):
available_space_x = ai_settings.screen_width - 2 * alien_width
number_alien_x = int(available_space_x / (2 * alien_width))
return number_alien_x
def get_number_rows(ai_settings, ship_height, alien_height):
"""计算屏幕可以容耐多少行外星人"""
available_space_y = ai_settings.screen_height - 3 * alien_height - ship_height
number_alien_rows = int(available_space_y / (2 * alien_height))
return number_alien_rows
def create_alien(ai_settings, screen, aliens, alien_number, row_number):
alien = Alien(ai_settings, screen)
alien_width = alien.rect.width
alien.x = alien_width + 2 * alien_width * alien_number
alien.rect.x = alien.x
alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number
aliens.add(alien)
def create_fleet(ai_settings, screen, ship, aliens):
"""创建外星人群"""
#创建一个外星人,并计算一行可以容耐多少个外星人
#外星人之间的间距就是外星人的宽度
alien = Alien(ai_settings, screen)
#alien_width = alien.rect.width
number_alien_x = get_number_alien_x(ai_settings, alien.rect.width)
number_rows = get_number_rows(ai_settings, ship.rect.height,
alien.rect.height)
#创建外星人群
for number_row in range(number_rows):
#创建第一行外星人
for alien_number in range(number_alien_x):
create_alien(ai_settings, screen, aliens, alien_number,
number_row)
def check_fleet_edges(ai_settings, aliens):
"""有外行人到达边缘时采取相应的措施"""
for alien in aliens.sprites():
if alien.check_edges():
change_fleet_direction(ai_settings, aliens)
break
def change_fleet_direction(ai_settings, aliens):
for alien in aliens.sprites():
alien.rect.y += ai_settings.fleet_drop_speed
ai_settings.fleet_direction *= -1
def update_aliens(ai_settings, stats, screen, sb, ship, aliens, bullets):
"""更新外星人群中所有外星人的位置"""
"""检查是否有外星人位于屏幕边缘"""
check_fleet_edges(ai_settings, aliens)
aliens.update()
#检测外星人和飞船碰撞
if pygame.sprite.spritecollideany(ship, aliens):
#print("ship hut!!!")
ship_hit(ai_settings, stats, screen, sb, ship, aliens, bullets)
#检查是否有外星人到达底部
check_aliens_bottom(ai_settings, stats, screen, sb, ship, aliens, bullets)
def check_aliens_bottom(ai_settings, stats, screen, sb, ship, aliens, bullets):
#检查是否有外星人到达底部
screen_rect = screen.get_rect()
for alien in aliens.sprites():
if alien.rect.bottom >= screen_rect.bottom:
#像处理外星人和飞船相撞一样处理
ship_hit(ai_settings, stats, screen, sb, ship, aliens, bullets)
break
def ship_hit(ai_settings, stats, screen, sb, ship, aliens, bullets):
#响应被外星人撞到的飞船
#将ship_left减1
if stats.ship_left > 1:
stats.ship_left -= 1
#更新计分牌
sb.prep_ships()
#子弹和外星人清空
aliens.empty()
bullets.empty()
#新建一群外星人,新建一艘飞船放置于底部中间
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
#暂停0.5秒
sleep(0.5)
else:
stats.game_active = False
pygame.mouse.set_visible(True)
def check_high_score(stats, sb):
"""检查是否诞生类最高分"""
if stats.score > stats.high_score:
stats.high_score = stats.score
sb.prep_high_score()
| [
"[email protected]"
] | |
2350975098b4d2adf5f678bb5e030ce99685ae81 | 2b55b0b4b153e1235cef267bc4f68040ced69398 | /node_modules/uws/build/config.gypi | e49284cf7390d90c9daf9573fec4b4dd50558f8e | [
"Zlib"
] | permissive | mfps/chatApp | 25107be7d6810973e63683ab10e2c0b1c2f8f36e | 6b0701e7ef58abaa1f3ee60b421fc1590dcbf07b | refs/heads/master | 2021-05-07T00:25:49.245260 | 2017-11-09T15:09:43 | 2017-11-09T15:09:43 | 110,129,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,462 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"debug_http2": "false",
"debug_nghttp2": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_file": "icudt59l.dat",
"icu_data_in": "..\\..\\deps/icu-small\\source/data/in\\icudt59l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "59",
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 57,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "false",
"node_use_etw": "true",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "true",
"node_use_v8_platform": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "so.57",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"want_separate_host_toolset_mkpeephole": 0,
"nodedir": "C:\\Users\\zim\\.node-gyp\\8.5.0",
"standalone_static_library": 1,
"ignore_optional": "",
"ignore_scripts": "",
"init_license": "MIT",
"init_version": "1.0.0",
"registry": "https://registry.yarnpkg.com",
"save_prefix": "^",
"strict_ssl": "true",
"user_agent": "yarn/1.0.2 npm/? node/v8.5.0 win32 x64",
"version_git_message": "v%s",
"version_git_sign": "",
"version_git_tag": "true",
"version_tag_prefix": "v"
}
}
| [
"[email protected]"
] | |
d22cbea1818d3165db14b5717ba1432b0f7a2fda | 4059573793d0ee5b74c9dd919aa2945dad2fe426 | /Practise_stacks_queues/animal_shelter.py | 4bcc4ed0250752698c46cbac64b5eac41e6d2528 | [] | no_license | nayanika2304/DataStructuresPractice | 04ea6d9248a63983abdd2b983632ba5907eed9d4 | f3c815ff113ce3977cc743360b77fb21c9f9b383 | refs/heads/master | 2022-12-08T05:28:22.897414 | 2020-08-29T18:17:57 | 2020-08-29T18:17:57 | 282,513,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,898 | py | '''
An animal shelter, which holds only dogs and cats, operates on a strictly"first in, first
out" basis. People must adopt either the "oldest" (based on arrival time) of all animals at the shelter,
or they can select whether they would prefer a dog or a cat (and will receive the oldest animal of
that type). They cannot select which specific animal they would like. Create the data structures to
maintain this system and implement operations such as enqueue, dequeueAny, dequeueDog,
and dequeueCat. You may use the built-in Linked List data structure.
'''
class Animal(object):
def __init__(self, animalName=None, animalType=None, next=None):
self.animalName = animalName
self.animalType = animalType
self.next = next
self.timestamp = 0 # to keep track of the order of arrival of the animals
class AnimalShelter(object):
def __init__(self):
self.headCat = None
self.tailCat = None
self.headDog = None
self.tailDog = None
self.animalNumber = 0
def enqueue(self, animalName, animalType):
self.animalNumber += 1
newAnimal = Animal(animalName, animalType)
newAnimal.timestamp = self.animalNumber
if animalType == 'cat':
if not self.headCat:
self.headCat = newAnimal
if self.tailCat:
self.tailCat.next = newAnimal
self.tailCat = newAnimal
elif animalType == 'dog':
if not self.headDog:
self.headDog = newAnimal
if self.tailDog:
self.tailDog.next = newAnimal
self.tailDog = newAnimal
def dequeueCat(self):
if self.headCat:
newAnimal = self.headCat
self.headCat = newAnimal.next
return str(newAnimal.animalName)
else:
return "No cat left!"
def dequeueDog(self):
if self.headDog:
newAnimal = self.headDog
self.headDog = newAnimal.next
return str(newAnimal.animalName)
else:
return "No dog left!"
def dequeueAny(self):
if self.headCat and not self.headDog:
return self.dequeueCat()
elif not self.headCat and self.headDog:
return self.dequeueDog()
elif self.headCat:
if self.headCat.timestamp < self.headDog.timestamp:
return self.dequeueCat()
else:
return self.dequeueDog()
else:
return "No animal left!"
def display(self):
print('The list of cats : ')
cats = self.headCat
countCat = 1
while cats != None:
print("#{} : {}.".format(countCat,cats.animalName))
cats = cats.next
countCat += 1
print('\nThe list of dogs : ')
dogs = self.headDog
countDog = 1
while dogs:
print("#{} : {}.".format(countDog,dogs.animalName))
dogs = dogs.next
countDog += 1
if __name__ == "__main__":
AS = AnimalShelter()
AS.enqueue('mia', 'cat')
AS.enqueue('tommy', 'dog')
AS.enqueue('lisa', 'cat')
AS.enqueue('bruno', 'dog')
AS.enqueue('brando', 'dog')
AS.enqueue('molly', 'cat')
AS.display()
print("\nSelect a cat")
print(AS.dequeueCat())
print("\nSelect a dog")
print(AS.dequeueDog())
print("\nSelect any animal")
print(AS.dequeueAny()) | [
"[email protected]"
] | |
199ddbd7a5a1fd27e5f784db9d5a6bfdb6c25fe0 | 2086a897538311fe25acfec950e68a447d12cb24 | /Elementary/secret_message.py | 538a8d0ddbdbf291db1aaaf4fdbee78c49bd3f5a | [] | no_license | ahrechanychenko/checkio | a9d1e154c7cd86bcad689cfabf2adc7f689d3f40 | fbbe7f6681bfa420d90a6b117f427c1adcdc6bd8 | refs/heads/master | 2021-10-16T00:19:43.413789 | 2015-10-15T18:31:06 | 2015-10-15T18:31:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | __author__ = 'levor23'
def find_message(text):
return ''.join([x for x in text if x.isupper()])
if __name__ == '__main__':
# These "asserts" using only for self-checking
# and not necessary for auto-testing
assert find_message(u"How are you? "
u"Eh, ok. Low or Lower? Ohhh.") == "HELLO", "hello"
assert find_message(u"hello world!") == "", "Nothing"
assert find_message(u"HELLO WORLD!!!") == "HELLOWORLD", "Capitals"
| [
"[email protected]"
] | |
dec31f9189b180e0f74278eb02c491a584c5b4c6 | 549270020f6c8724e2ef1b12e38d11b025579f8d | /recipes/openscenegraph/all/conanfile.py | 5d6b6d3a14882b9a8393542db25bcd7838642b33 | [
"MIT"
] | permissive | conan-io/conan-center-index | 1bcec065ccd65aa38b1fed93fbd94d9d5fe6bc43 | 3b17e69bb4e5601a850b6e006e44775e690bac33 | refs/heads/master | 2023-08-31T11:34:45.403978 | 2023-08-31T11:13:23 | 2023-08-31T11:13:23 | 204,671,232 | 844 | 1,820 | MIT | 2023-09-14T21:22:42 | 2019-08-27T09:43:58 | Python | UTF-8 | Python | false | false | 22,040 | py | from conan import ConanFile
from conan.tools.files import get, rmdir, rm, apply_conandata_patches
from conan.tools.build import cross_building
from conan.tools.scm import Version
from conan.tools.apple import is_apple_os
from conan.errors import ConanInvalidConfiguration
from conans import CMake
import os
import functools
required_conan_version = ">=1.50.0"
class OpenSceneGraphConanFile(ConanFile):
name = "openscenegraph"
description = "OpenSceneGraph is an open source high performance 3D graphics toolkit"
topics = ("openscenegraph", "graphics")
url = "https://github.com/conan-io/conan-center-index"
homepage = "http://www.openscenegraph.org"
license = "LGPL-2.1-only", "WxWindows-exception-3.1"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"build_applications": [True, False],
"enable_notify": [True, False],
"enable_deprecated_api": [True, False],
"enable_readfile": [True, False],
"enable_ref_ptr_implicit_output_conversion": [True, False],
"enable_ref_ptr_safe_dereference": [True, False],
"enable_envvar_support": [True, False],
"enable_windowing_system": [True, False],
"enable_deprecated_serializers": [True, False],
"use_fontconfig": [True, False],
"with_asio": [True, False],
"with_curl": [True, False],
"with_dcmtk": [True, False],
"with_freetype": [True, False],
"with_gdal": [True, False],
"with_gif": [True, False],
"with_gta": [True, False],
"with_jasper": [True, False],
"with_jpeg": [True, False],
"with_openexr": [True, False],
"with_png": [True, False],
"with_tiff": [True, False],
"with_zlib": [True, False],
"opengl_profile": ["gl1", "gl2", "gl3", "glCore", "gles1", "gles2", "gles3", "gles2+gles3"],
}
default_options = {
"shared": False,
"fPIC": True,
"build_applications": False,
"enable_notify": True,
"enable_deprecated_api": False,
"enable_readfile": True,
"enable_ref_ptr_implicit_output_conversion": True,
"enable_ref_ptr_safe_dereference": True,
"enable_envvar_support": True,
"enable_windowing_system": True,
"enable_deprecated_serializers": False,
"use_fontconfig": True,
"with_asio": False,
"with_curl": False,
"with_dcmtk": False,
"with_freetype": True,
"with_gdal": False,
"with_gif": True,
"with_gta": False,
"with_jasper": False,
"with_jpeg": True,
"with_openexr": False,
"with_png": True,
"with_tiff": True,
"with_zlib": True,
"opengl_profile": "gl2",
}
short_paths = True
exports_sources = "CMakeLists.txt", "patches/*.patch"
generators = "cmake", "cmake_find_package"
@property
def _source_subfolder(self):
return "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
del self.options.with_asio
# Default to false with fontconfig until it is supported on Windows
self.options.use_fontconfig = False
if is_apple_os(self):
# osg uses imageio on Apple platforms
del self.options.with_gif
del self.options.with_jpeg
del self.options.with_png
# imageio supports tiff files so the tiff plugin isn't needed on Apple platforms
self.options.with_tiff = False
def configure(self):
if self.options.shared:
del self.options.fPIC
if not self.options.with_zlib:
# These require zlib support
del self.options.with_openexr
del self.options.with_png
del self.options.with_dcmtk
def validate(self):
if self.options.get_safe("with_asio", False):
raise ConanInvalidConfiguration("ASIO support in OSG is broken, see https://github.com/openscenegraph/OpenSceneGraph/issues/921")
if hasattr(self, "settings_build") and cross_building(self):
raise ConanInvalidConfiguration("openscenegraph recipe cannot be cross-built yet. Contributions are welcome.")
def requirements(self):
if self.options.enable_windowing_system and self.settings.os == "Linux":
self.requires("xorg/system")
self.requires("opengl/system")
if self.options.use_fontconfig:
self.requires("fontconfig/2.14.2")
if self.options.get_safe("with_asio", False):
# Should these be private requires?
self.requires("asio/1.22.1")
self.requires("boost/1.81.0")
if self.options.with_curl:
self.requires("libcurl/8.0.1")
if self.options.get_safe("with_dcmtk"):
self.requires("dcmtk/3.6.6")
if self.options.with_freetype:
self.requires("freetype/2.13.0")
if self.options.with_gdal:
self.requires("gdal/3.4.3")
if self.options.get_safe("with_gif"):
self.requires("giflib/5.2.1")
if self.options.with_gta:
self.requires("libgta/1.2.1")
if self.options.with_jasper:
self.requires("jasper/2.0.33")
if self.options.get_safe("with_jpeg"):
self.requires("libjpeg/9e")
if self.options.get_safe("with_openexr"):
self.requires("openexr/3.1.7")
if self.options.get_safe("with_png"):
self.requires("libpng/1.6.40")
if self.options.with_tiff:
self.requires("libtiff/4.5.1")
if self.options.with_zlib:
self.requires("zlib/1.2.13")
def source(self):
get(self, **self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True)
def _patch_sources(self):
apply_conandata_patches(self)
for package in ("Fontconfig", "Freetype", "GDAL", "GIFLIB", "GTA", "Jasper", "OpenEXR"):
# Prefer conan's find package scripts over osg's
os.unlink(os.path.join(self._source_subfolder, "CMakeModules", "Find{}.cmake".format(package)))
@functools.lru_cache(1)
def _configured_cmake(self):
cmake = CMake(self)
cmake.definitions["USE_3RDPARTY_BIN"] = False
cmake.definitions["DYNAMIC_OPENSCENEGRAPH"] = self.options.shared
cmake.definitions["DYNAMIC_OPENTHREADS"] = self.options.shared
cmake.definitions["BUILD_OSG_APPLICATIONS"] = self.options.build_applications
cmake.definitions["BUILD_OSG_EXAMPLES"] = False
cmake.definitions["OSG_NOTIFY_DISABLED"] = not self.options.enable_notify
cmake.definitions["OSG_USE_DEPRECATED_API"] = self.options.enable_deprecated_api
cmake.definitions["OSG_PROVIDE_READFILE"] = self.options.enable_readfile
cmake.definitions["OSG_USE_REF_PTR_IMPLICIT_OUTPUT_CONVERSION"] = self.options.enable_ref_ptr_implicit_output_conversion
cmake.definitions["OSG_USE_REF_PTR_SAFE_DEREFERENCE"] = self.options.enable_ref_ptr_safe_dereference
cmake.definitions["OSG_ENVVAR_SUPPORTED"] = self.options.enable_envvar_support
if not self.options.enable_windowing_system:
cmake.definitions["OSG_WINDOWING_SYSTEM"] = None
cmake.definitions["BUILD_OSG_DEPRECATED_SERIALIZERS"] = self.options.enable_deprecated_serializers
cmake.definitions["OSG_TEXT_USE_FONTCONFIG"] = self.options.use_fontconfig
cmake.definitions["OPENGL_PROFILE"] = str(self.options.opengl_profile).upper()
# Disable option dependencies unless we have a package for them
cmake.definitions["OSG_WITH_FREETYPE"] = self.options.with_freetype
cmake.definitions["OSG_WITH_OPENEXR"] = self.options.get_safe("with_openexr", False)
cmake.definitions["OSG_WITH_INVENTOR"] = False
cmake.definitions["OSG_WITH_JASPER"] = self.options.with_jasper
cmake.definitions["OSG_WITH_OPENCASCADE"] = False
cmake.definitions["OSG_WITH_FBX"] = False
cmake.definitions["OSG_WITH_ZLIB"] = self.options.with_zlib
cmake.definitions["OSG_WITH_GDAL"] = self.options.with_gdal
cmake.definitions["OSG_WITH_GTA"] = self.options.with_gta
cmake.definitions["OSG_WITH_CURL"] = self.options.with_curl
cmake.definitions["OSG_WITH_LIBVNCSERVER"] = False
cmake.definitions["OSG_WITH_DCMTK"] = self.options.get_safe("with_dcmtk", False)
cmake.definitions["OSG_WITH_FFMPEG"] = False
cmake.definitions["OSG_WITH_DIRECTSHOW"] = False
cmake.definitions["OSG_WITH_SDL"] = False
cmake.definitions["OSG_WITH_POPPLER"] = False
cmake.definitions["OSG_WITH_RSVG"] = False
cmake.definitions["OSG_WITH_NVTT"] = False
cmake.definitions["OSG_WITH_ASIO"] = self.options.get_safe("with_asio", False)
cmake.definitions["OSG_WITH_ZEROCONF"] = False
cmake.definitions["OSG_WITH_LIBLAS"] = False
cmake.definitions["OSG_WITH_GIF"] = self.options.get_safe("with_gif", False)
cmake.definitions["OSG_WITH_JPEG"] = self.options.get_safe("with_jpeg", False)
cmake.definitions["OSG_WITH_PNG"] = self.options.get_safe("with_png", False)
cmake.definitions["OSG_WITH_TIFF"] = self.options.with_tiff
if self.settings.os == "Windows":
# osg has optional quicktime support on Windows
cmake.definitions["CMAKE_DISABLE_FIND_PACKAGE_QuickTime"] = True
cmake.definitions["OSG_MSVC_VERSIONED_DLL"] = False
cmake.configure()
return cmake
def build(self):
self._patch_sources()
self._configured_cmake().build()
def package(self):
self._configured_cmake().install()
self.copy(pattern="LICENSE.txt", dst="licenses", src=self._source_subfolder)
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
rm(self, "*.pdb", self.package_folder, True)
def package_info(self):
# FindOpenSceneGraph.cmake is shipped with cmake and is a traditional cmake script
# It doesn't setup targets and only provides a few variables:
# - OPENSCENEGRAPH_FOUND
# - OPENSCENEGRAPH_VERSION
# - OPENSCENEGRAPH_INCLUDE_DIRS
# - OPENSCENEGRAPH_LIBRARIES
# Unfortunately, the cmake_find_package generators don't currently allow directly setting variables,
# but it will set the last three of these if the name of the package is OPENSCENEGRAPH (it uses
# the filename for the first, so OpenSceneGraph_FOUND gets set, not OPENSCENEGRAPH_FOUND)
# TODO: set OPENSCENEGRAPH_FOUND in cmake_find_package and cmake_find_package_multi
self.cpp_info.filenames["cmake_find_package"] = "OpenSceneGraph"
self.cpp_info.filenames["cmake_find_package_multi"] = "OpenSceneGraph"
self.cpp_info.names["cmake_find_package"] = "OPENSCENEGRAPH"
self.cpp_info.names["cmake_find_package_multi"] = "OPENSCENEGRAPH"
if self.settings.build_type == "Debug":
postfix = "d"
elif self.settings.build_type == "RelWithDebInfo":
postfix = "rd"
elif self.settings.build_type == "MinSizeRel":
postfix = "s"
else:
postfix = ""
def setup_plugin(plugin):
lib = "osgdb_" + plugin
plugin_library = self.cpp_info.components[lib]
plugin_library.libs = [] if self.options.shared else [lib + postfix]
plugin_library.requires = ["OpenThreads", "osg", "osgDB", "osgUtil"]
if not self.options.shared:
plugin_library.libdirs = [os.path.join("lib", "osgPlugins-{}".format(self.version))]
return plugin_library
def setup_serializers(lib):
plugins = []
if lib not in ("osgDB", "osgWidget", "osgPresentation"):
plugins.append("serializers_{}".format(lib.lower()))
if self.options.enable_deprecated_serializers:
if lib not in ("osgUtil", "osgDB", "osgGA", "osgManipulator", "osgUI", "osgPresentation"):
plugins.append("deprecated_{}".format(lib.lower()))
for plugin in plugins:
setup_plugin(plugin).requires.append(lib)
def setup_library(lib):
library = self.cpp_info.components[lib]
library.libs = [lib + postfix]
library.names["pkg_config"] = "openscenegraph-{}".format(lib)
setup_serializers(lib)
return library
# Core libraries
# requires obtained from osg's source code
# TODO: FindOpenThreads.cmake is shipped with CMake, so we should generate separate
# files for it with cmake_find_package and cmake_find_package_multi
library = self.cpp_info.components["OpenThreads"]
library.libs = ["OpenThreads" + postfix]
library.names["pkg_config"] = "openthreads"
if self.settings.os == "Linux":
library.system_libs = ["pthread"]
library = setup_library("osg")
library.requires = ["OpenThreads", "opengl::opengl"]
if self.settings.os == "Linux":
library.system_libs = ["m", "rt", "dl"]
if not self.options.shared:
library.defines.append("OSG_LIBRARY_STATIC")
library = setup_library("osgDB")
library.requires = ["osg", "osgUtil", "OpenThreads"]
if self.settings.os == "Linux":
library.system_libs = ["dl"]
elif self.settings.os == "Macos":
library.frameworks = ["Carbon", "Cocoa"]
if self.options.with_zlib:
library.requires.append("zlib::zlib")
setup_library("osgUtil").requires = ["osg", "OpenThreads"]
setup_library("osgGA").requires = ["osgDB", "osgUtil", "osg", "OpenThreads"]
library = setup_library("osgText")
library.requires = ["osgDB", "osg", "osgUtil", "OpenThreads"]
if self.options.use_fontconfig:
library.requires.append("fontconfig::fontconfig")
library = setup_library("osgViewer")
library.requires = ["osgGA", "osgText", "osgDB", "osgUtil", "osg"]
if self.options.enable_windowing_system:
if self.settings.os == "Linux":
library.requires.append("xorg::xorg")
elif is_apple_os(self):
library.frameworks = ["Cocoa"]
if self.settings.os == "Windows":
library.system_libs = ["gdi32"]
setup_library("osgAnimation").requires = ["osg", "osgText", "osgGA", "osgViewer", "OpenThreads"]
setup_library("osgFX").requires = ["osgUtil", "osgDB", "osg", "OpenThreads"]
setup_library("osgManipulator").requires = ["osgViewer", "osgGA", "osgUtil", "osg", "OpenThreads"]
setup_library("osgParticle").requires = ["osgUtil", "osgDB", "osg", "OpenThreads"]
setup_library("osgUI").requires = ["osgDB", "osgGA", "osgUtil", "osgText", "osgViewer", "osg", "OpenThreads"]
setup_library("osgVolume").requires = ["osgGA", "osgDB", "osgUtil", "osg", "OpenThreads"]
setup_library("osgShadow").requires = ["osgUtil", "osgDB", "osg", "OpenThreads"]
setup_library("osgSim").requires = ["osgText", "osgUtil", "osgDB", "osg", "OpenThreads"]
setup_library("osgTerrain").requires = ["osgUtil", "osgDB", "osg", "OpenThreads"]
setup_library("osgWidget").requires = ["osgText", "osgViewer", "osgDB", "osg", "OpenThreads"]
setup_library("osgPresentation").requires = ["osgViewer", "osgUI", "osgWidget", "osgManipulator", "osgVolume", "osgFX", "osgText", "osgGA", "osgUtil", "osgDB", "osg", "OpenThreads"]
# Start of plugins
# NodeKit/Psudo loader plugins
setup_plugin("osga")
setup_plugin("rot")
setup_plugin("scale")
setup_plugin("trans")
setup_plugin("normals")
setup_plugin("revisions")
setup_plugin("osgviewer").requires.append("osgViewer")
setup_plugin("osgshadow").requires.append("osgShadow")
setup_plugin("osgterrain").requires.append("osgTerrain")
# Main native plugins
setup_plugin("osg")
plugin = setup_plugin("ive")
plugin.requires.extend(("osgSim", "osgFX", "osgText", "osgTerrain", "osgVolume"))
if self.options.with_zlib:
plugin.requires.append("zlib::zlib")
# Viewer plugins
setup_plugin("cfg").requires.append("osgViewer")
# Shader plugins
setup_plugin("glsl")
# Image plugins
setup_plugin("rgb")
setup_plugin("bmp")
setup_plugin("pnm")
setup_plugin("dds")
setup_plugin("tga")
setup_plugin("hdr")
setup_plugin("dot")
setup_plugin("vtf")
setup_plugin("ktx")
if self.options.get_safe("with_jpeg"):
setup_plugin("jpeg").requires.append("libjpeg::libjpeg")
if self.options.with_jasper:
setup_plugin("jp2").requires.append("jasper::jasper")
if self.options.get_safe("with_openexr"):
setup_plugin("exr").requires.append("openexr::openexr")
if self.options.get_safe("with_gif"):
setup_plugin("gif").requires.append("giflib::giflib")
if self.options.get_safe("with_png"):
setup_plugin("png").requires.extend(("libpng::libpng", "zlib::zlib"))
if self.options.with_tiff:
setup_plugin("tiff").requires.append("libtiff::libtiff")
if self.options.with_gdal:
setup_plugin("gdal").requires.extend(("osgTerrain", "gdal::gdal"))
setup_plugin("ogr").requires.append("gdal::gdal")
if self.options.with_gta:
setup_plugin("gta").requires.append("libgta::libgta")
# 3D Image plugins
if self.options.get_safe("with_dcmtk"):
plugin = setup_plugin("dicom")
plugin.requires.extend(("osgVolume", "dcmtk::dcmtk"))
if self.settings.os == "Windows":
plugin.system_libs = ["wsock32", "ws2_32"]
# 3rd party 3d plugins
setup_plugin("3dc")
setup_plugin("p3d").requires.extend(("osgGA", "osgText", "osgVolume", "osgFX", "osgViewer", "osgPresentation"))
if self.options.with_curl:
plugin = setup_plugin("curl")
plugin.requires.append("libcurl::libcurl")
if self.options.with_zlib:
plugin.requires.append("zlib::zlib")
if self.options.with_zlib:
setup_plugin("gz").requires.append("zlib::zlib")
# with_inventor
# setup_plugin("iv")
# with_collada
# setup_plugin("dae")
# with_fbx
# setup_plugin("fbx")
# with_opencascade
# setup_plugin("opencascade")
setup_plugin("bvh").requires.append("osgAnimation")
setup_plugin("x")
setup_plugin("dxf").requires.append("osgText")
setup_plugin("openflight").requires.append("osgSim")
setup_plugin("obj")
setup_plugin("pic")
setup_plugin("stl")
setup_plugin("3ds")
setup_plugin("ac")
setup_plugin("pov")
setup_plugin("logo")
setup_plugin("lws")
setup_plugin("md2")
setup_plugin("osgtgz")
setup_plugin("tgz")
setup_plugin("shp").requires.extend(("osgSim", "osgTerrain"))
setup_plugin("txf").requires.append("osgText")
setup_plugin("bsp")
setup_plugin("mdl")
setup_plugin("gles").requires.extend(("osgUtil", "osgAnimation"))
setup_plugin("osgjs").requires.extend(("osgAnimation", "osgSim"))
setup_plugin("lwo").requires.append("osgFX")
setup_plugin("ply")
setup_plugin("txp").requires.extend(("osgSim", "osgText"))
# with_ffmpeg
# setup_plugin("ffmpeg")
# with_gstreamer
# setup_plugin("gstreamer")
# with_directshow
# setup_plugin("directshow")
if is_apple_os(self):
setup_plugin("imageio").frameworks = ["Accelerate"]
if ((self.settings.os == "Macos" and self.settings.os.version and Version(self.settings.os.version) >= "10.8")
or (self.settings.os == "iOS" and Version(self.settings.os.version) >= "6.0")):
plugin = setup_plugin("avfoundation")
plugin.requires.append("osgViewer")
plugin.frameworks = ["AVFoundation", "Cocoa", "CoreVideo", "CoreMedia", "QuartzCore"]
if self.settings.os == "Macos" and self.settings.os.version and Version(self.settings.os.version) <= "10.6" and self.settings.arch == "x86":
setup_plugin("qt").frameworks = ["QuickTime"]
if self.settings.os == "Macos" and self.settings.arch == "x86":
plugin = setup_plugin("QTKit")
plugin.requires.append("osgViewer")
plugin.frameworks = ["QTKit", "Cocoa", "QuickTime", "CoreVideo"]
# with_nvtt
# setup_plugin("nvtt")
if self.options.with_freetype:
setup_plugin("freetype").requires.extend(("osgText", "freetype::freetype"))
if self.options.with_zlib:
setup_plugin("zip")
# with_svg
# setup_plugin("svg")
# with_pdf/poppler
# setup_plugin("pdf")
# with_vnc
# setup_plugin("vnc")
setup_plugin("pvr")
plugin = setup_plugin("osc")
plugin.requires.append("osgGA")
if self.settings.os == "Windows":
plugin.system_libs = ["ws2_32", "winmm"]
setup_plugin("trk")
setup_plugin("tf")
# with_blas
# setup_plugin("las")
setup_plugin("lua")
# with_sdl
# setup_plugin("sdl")
if self.options.get_safe("with_asio", False):
setup_plugin("resthttp").requires.extend(("osgPresentation", "asio::asio", "boost::boost"))
# with_zeroconf
# setup_plugin("zeroconf")
| [
"[email protected]"
] | |
d4637ca674ee9122988a540c520dfb1108b8baeb | 7473827589ddfc0e2ad8ccf19029f5666fedffc4 | /Lab8.py | 9a6a955c7e73ef4048a52b699a7d3274b81ffc28 | [
"MIT"
] | permissive | aft0903/IA241 | 475cea78a24294bf32a2b433bd7775e1de7d6415 | c8927706cc9eed40dcaa37f51c5ac6a3994c9334 | refs/heads/main | 2023-04-19T09:06:27.789028 | 2021-05-06T04:19:31 | 2021-05-06T04:19:31 | 331,729,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | #Aileen Towner
#IA 241
#Lab 8
#3.1
def count_words (input_str):
return len(input_str.split())
# TEST: print(count_words('a string'))
#3.2
demo_str = 'hello world'
print(count_words(demo_str))
#3.3
def min_num (num_list): #function
min_item = num_list[0] #define variables
for num in num_list: #for every number in the list
if type(num) is not str:
if min_item >= num:# if the variable is less than the number as it goes thru the list min_item = nu
min_item = num
return (min_item)
#3.4
demo_list = [1,2,3,4,5,6]
print(min_num(demo_list))
#3.5
mix_list = [1,2,3,4,'a',5,6]
print(min_num(mix_list))
| [
"[email protected]"
] | |
a18cbd39063799664162c48d9bf060db4e12c07f | cafefb0b182567e5cabe22c44578bb712385e9f5 | /lib/gcloud/bigtable/row.py | cb9ce2e67e3dd9f880a237df21a5ef52625a64e6 | [
"BSD-3-Clause"
] | permissive | gtaylor/evennia-game-index | fe0088e97087c0aaa0c319084e28b2c992c2c00b | b47f27f4dff2a0c32991cee605d95911946ca9a5 | refs/heads/master | 2022-11-25T20:28:23.707056 | 2022-11-07T17:47:25 | 2022-11-07T17:47:25 | 55,206,601 | 2 | 2 | BSD-3-Clause | 2018-04-19T05:41:12 | 2016-04-01T05:40:15 | Python | UTF-8 | Python | false | false | 34,378 | py | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User friendly container for Google Cloud Bigtable Row."""
import struct
import six
from gcloud._helpers import _datetime_from_microseconds
from gcloud._helpers import _microseconds_from_datetime
from gcloud._helpers import _to_bytes
from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2
from gcloud.bigtable._generated import (
bigtable_service_messages_pb2 as messages_pb2)
_PACK_I64 = struct.Struct('>q').pack
MAX_MUTATIONS = 100000
"""The maximum number of mutations that a row can accumulate."""
class Row(object):
"""Base representation of a Google Cloud Bigtable Row.
This class has three subclasses corresponding to the three
RPC methods for sending row mutations:
* :class:`DirectRow` for ``MutateRow``
* :class:`ConditionalRow` for ``CheckAndMutateRow``
* :class:`AppendRow` for ``ReadModifyWriteRow``
:type row_key: bytes
:param row_key: The key for the current row.
:type table: :class:`Table <gcloud.bigtable.table.Table>`
:param table: The table that owns the row.
"""
def __init__(self, row_key, table):
self._row_key = _to_bytes(row_key)
self._table = table
class _SetDeleteRow(Row):
"""Row helper for setting or deleting cell values.
Implements helper methods to add mutations to set or delete cell contents:
* :meth:`set_cell`
* :meth:`delete`
* :meth:`delete_cell`
* :meth:`delete_cells`
:type row_key: bytes
:param row_key: The key for the current row.
:type table: :class:`Table <gcloud.bigtable.table.Table>`
:param table: The table that owns the row.
"""
ALL_COLUMNS = object()
"""Sentinel value used to indicate all columns in a column family."""
def _get_mutations(self, state):
"""Gets the list of mutations for a given state.
This method intended to be implemented by subclasses.
``state`` may not need to be used by all subclasses.
:type state: bool
:param state: The state that the mutation should be
applied in.
:raises: :class:`NotImplementedError <exceptions.NotImplementedError>`
always.
"""
raise NotImplementedError
def _set_cell(self, column_family_id, column, value, timestamp=None,
state=None):
"""Helper for :meth:`set_cell`
Adds a mutation to set the value in a specific cell.
``state`` is unused by :class:`DirectRow` but is used by
subclasses.
:type column_family_id: str
:param column_family_id: The column family that contains the column.
Must be of the form
``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family where the cell
is located.
:type value: bytes or :class:`int`
:param value: The value to set in the cell. If an integer is used,
will be interpreted as a 64-bit big-endian signed
integer (8 bytes).
:type timestamp: :class:`datetime.datetime`
:param timestamp: (Optional) The timestamp of the operation.
:type state: bool
:param state: (Optional) The state that is passed along to
:meth:`_get_mutations`.
"""
column = _to_bytes(column)
if isinstance(value, six.integer_types):
value = _PACK_I64(value)
value = _to_bytes(value)
if timestamp is None:
# Use -1 for current Bigtable server time.
timestamp_micros = -1
else:
timestamp_micros = _microseconds_from_datetime(timestamp)
# Truncate to millisecond granularity.
timestamp_micros -= (timestamp_micros % 1000)
mutation_val = data_pb2.Mutation.SetCell(
family_name=column_family_id,
column_qualifier=column,
timestamp_micros=timestamp_micros,
value=value,
)
mutation_pb = data_pb2.Mutation(set_cell=mutation_val)
self._get_mutations(state).append(mutation_pb)
def _delete(self, state=None):
"""Helper for :meth:`delete`
Adds a delete mutation (for the entire row) to the accumulated
mutations.
``state`` is unused by :class:`DirectRow` but is used by
subclasses.
:type state: bool
:param state: (Optional) The state that is passed along to
:meth:`_get_mutations`.
"""
mutation_val = data_pb2.Mutation.DeleteFromRow()
mutation_pb = data_pb2.Mutation(delete_from_row=mutation_val)
self._get_mutations(state).append(mutation_pb)
def _delete_cells(self, column_family_id, columns, time_range=None,
state=None):
"""Helper for :meth:`delete_cell` and :meth:`delete_cells`.
``state`` is unused by :class:`DirectRow` but is used by
subclasses.
:type column_family_id: str
:param column_family_id: The column family that contains the column
or columns with cells being deleted. Must be
of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type columns: :class:`list` of :class:`str` /
:func:`unicode <unicode>`, or :class:`object`
:param columns: The columns within the column family that will have
cells deleted. If :attr:`ALL_COLUMNS` is used then
the entire column family will be deleted from the row.
:type time_range: :class:`TimestampRange`
:param time_range: (Optional) The range of time within which cells
should be deleted.
:type state: bool
:param state: (Optional) The state that is passed along to
:meth:`_get_mutations`.
"""
mutations_list = self._get_mutations(state)
if columns is self.ALL_COLUMNS:
mutation_val = data_pb2.Mutation.DeleteFromFamily(
family_name=column_family_id,
)
mutation_pb = data_pb2.Mutation(delete_from_family=mutation_val)
mutations_list.append(mutation_pb)
else:
delete_kwargs = {}
if time_range is not None:
delete_kwargs['time_range'] = time_range.to_pb()
to_append = []
for column in columns:
column = _to_bytes(column)
# time_range will never change if present, but the rest of
# delete_kwargs will
delete_kwargs.update(
family_name=column_family_id,
column_qualifier=column,
)
mutation_val = data_pb2.Mutation.DeleteFromColumn(
**delete_kwargs)
mutation_pb = data_pb2.Mutation(
delete_from_column=mutation_val)
to_append.append(mutation_pb)
# We don't add the mutations until all columns have been
# processed without error.
mutations_list.extend(to_append)
class DirectRow(_SetDeleteRow):
"""Google Cloud Bigtable Row for sending "direct" mutations.
These mutations directly set or delete cell contents:
* :meth:`set_cell`
* :meth:`delete`
* :meth:`delete_cell`
* :meth:`delete_cells`
These methods can be used directly::
>>> row = table.row(b'row-key1')
>>> row.set_cell(u'fam', b'col1', b'cell-val')
>>> row.delete_cell(u'fam', b'col2')
.. note::
A :class:`DirectRow` accumulates mutations locally via the
:meth:`set_cell`, :meth:`delete`, :meth:`delete_cell` and
:meth:`delete_cells` methods. To actually send these mutations to the
Google Cloud Bigtable API, you must call :meth:`commit`.
:type row_key: bytes
:param row_key: The key for the current row.
:type table: :class:`Table <gcloud.bigtable.table.Table>`
:param table: The table that owns the row.
"""
def __init__(self, row_key, table):
super(DirectRow, self).__init__(row_key, table)
self._pb_mutations = []
def _get_mutations(self, state): # pylint: disable=unused-argument
"""Gets the list of mutations for a given state.
``state`` is unused by :class:`DirectRow` but is used by
subclasses.
:type state: bool
:param state: The state that the mutation should be
applied in.
:rtype: list
:returns: The list to add new mutations to (for the current state).
"""
return self._pb_mutations
def set_cell(self, column_family_id, column, value, timestamp=None):
"""Sets a value in this row.
The cell is determined by the ``row_key`` of this :class:`DirectRow`
and the ``column``. The ``column`` must be in an existing
:class:`.ColumnFamily` (as determined by ``column_family_id``).
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
:type column_family_id: str
:param column_family_id: The column family that contains the column.
Must be of the form
``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family where the cell
is located.
:type value: bytes or :class:`int`
:param value: The value to set in the cell. If an integer is used,
will be interpreted as a 64-bit big-endian signed
integer (8 bytes).
:type timestamp: :class:`datetime.datetime`
:param timestamp: (Optional) The timestamp of the operation.
"""
self._set_cell(column_family_id, column, value, timestamp=timestamp,
state=None)
def delete(self):
"""Deletes this row from the table.
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
"""
self._delete(state=None)
def delete_cell(self, column_family_id, column, time_range=None):
"""Deletes cell in this row.
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
:type column_family_id: str
:param column_family_id: The column family that contains the column
or columns with cells being deleted. Must be
of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family that will have a
cell deleted.
:type time_range: :class:`TimestampRange`
:param time_range: (Optional) The range of time within which cells
should be deleted.
"""
self._delete_cells(column_family_id, [column], time_range=time_range,
state=None)
def delete_cells(self, column_family_id, columns, time_range=None):
"""Deletes cells in this row.
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
:type column_family_id: str
:param column_family_id: The column family that contains the column
or columns with cells being deleted. Must be
of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type columns: :class:`list` of :class:`str` /
:func:`unicode <unicode>`, or :class:`object`
:param columns: The columns within the column family that will have
cells deleted. If :attr:`ALL_COLUMNS` is used then
the entire column family will be deleted from the row.
:type time_range: :class:`TimestampRange`
:param time_range: (Optional) The range of time within which cells
should be deleted.
"""
self._delete_cells(column_family_id, columns, time_range=time_range,
state=None)
def commit(self):
"""Makes a ``MutateRow`` API request.
If no mutations have been created in the row, no request is made.
Mutations are applied atomically and in order, meaning that earlier
mutations can be masked / negated by later ones. Cells already present
in the row are left unchanged unless explicitly changed by a mutation.
After committing the accumulated mutations, resets the local
mutations to an empty list.
:raises: :class:`ValueError <exceptions.ValueError>` if the number of
mutations exceeds the :data:`MAX_MUTATIONS`.
"""
mutations_list = self._get_mutations(None)
num_mutations = len(mutations_list)
if num_mutations == 0:
return
if num_mutations > MAX_MUTATIONS:
raise ValueError('%d total mutations exceed the maximum allowable '
'%d.' % (num_mutations, MAX_MUTATIONS))
request_pb = messages_pb2.MutateRowRequest(
table_name=self._table.name,
row_key=self._row_key,
mutations=mutations_list,
)
# We expect a `google.protobuf.empty_pb2.Empty`
client = self._table._cluster._client
client._data_stub.MutateRow(request_pb, client.timeout_seconds)
self.clear()
def clear(self):
"""Removes all currently accumulated mutations on the current row."""
del self._pb_mutations[:]
class ConditionalRow(_SetDeleteRow):
"""Google Cloud Bigtable Row for sending mutations conditionally.
Each mutation has an associated state: :data:`True` or :data:`False`.
When :meth:`commit`-ed, the mutations for the :data:`True`
state will be applied if the filter matches any cells in
the row, otherwise the :data:`False` state will be applied.
A :class:`ConditionalRow` accumulates mutations in the same way a
:class:`DirectRow` does:
* :meth:`set_cell`
* :meth:`delete`
* :meth:`delete_cell`
* :meth:`delete_cells`
with the only change the extra ``state`` parameter::
>>> row_cond = table.row(b'row-key2', filter_=row_filter)
>>> row_cond.set_cell(u'fam', b'col', b'cell-val', state=True)
>>> row_cond.delete_cell(u'fam', b'col', state=False)
.. note::
As with :class:`DirectRow`, to actually send these mutations to the
Google Cloud Bigtable API, you must call :meth:`commit`.
:type row_key: bytes
:param row_key: The key for the current row.
:type table: :class:`Table <gcloud.bigtable.table.Table>`
:param table: The table that owns the row.
:type filter_: :class:`.RowFilter`
:param filter_: Filter to be used for conditional mutations.
"""
def __init__(self, row_key, table, filter_):
super(ConditionalRow, self).__init__(row_key, table)
self._filter = filter_
self._true_pb_mutations = []
self._false_pb_mutations = []
def _get_mutations(self, state):
"""Gets the list of mutations for a given state.
Over-ridden so that the state can be used in:
* :meth:`set_cell`
* :meth:`delete`
* :meth:`delete_cell`
* :meth:`delete_cells`
:type state: bool
:param state: The state that the mutation should be
applied in.
:rtype: list
:returns: The list to add new mutations to (for the current state).
"""
if state:
return self._true_pb_mutations
else:
return self._false_pb_mutations
def commit(self):
"""Makes a ``CheckAndMutateRow`` API request.
If no mutations have been created in the row, no request is made.
The mutations will be applied conditionally, based on whether the
filter matches any cells in the :class:`ConditionalRow` or not. (Each
method which adds a mutation has a ``state`` parameter for this
purpose.)
Mutations are applied atomically and in order, meaning that earlier
mutations can be masked / negated by later ones. Cells already present
in the row are left unchanged unless explicitly changed by a mutation.
After committing the accumulated mutations, resets the local
mutations.
:rtype: bool
:returns: Flag indicating if the filter was matched (which also
indicates which set of mutations were applied by the server).
:raises: :class:`ValueError <exceptions.ValueError>` if the number of
mutations exceeds the :data:`MAX_MUTATIONS`.
"""
true_mutations = self._get_mutations(state=True)
false_mutations = self._get_mutations(state=False)
num_true_mutations = len(true_mutations)
num_false_mutations = len(false_mutations)
if num_true_mutations == 0 and num_false_mutations == 0:
return
if (num_true_mutations > MAX_MUTATIONS or
num_false_mutations > MAX_MUTATIONS):
raise ValueError(
'Exceed the maximum allowable mutations (%d). Had %s true '
'mutations and %d false mutations.' % (
MAX_MUTATIONS, num_true_mutations, num_false_mutations))
request_pb = messages_pb2.CheckAndMutateRowRequest(
table_name=self._table.name,
row_key=self._row_key,
predicate_filter=self._filter.to_pb(),
true_mutations=true_mutations,
false_mutations=false_mutations,
)
# We expect a `.messages_pb2.CheckAndMutateRowResponse`
client = self._table._cluster._client
resp = client._data_stub.CheckAndMutateRow(
request_pb, client.timeout_seconds)
self.clear()
return resp.predicate_matched
# pylint: disable=arguments-differ
def set_cell(self, column_family_id, column, value, timestamp=None,
state=True):
"""Sets a value in this row.
The cell is determined by the ``row_key`` of this
:class:`ConditionalRow` and the ``column``. The ``column`` must be in
an existing :class:`.ColumnFamily` (as determined by
``column_family_id``).
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
:type column_family_id: str
:param column_family_id: The column family that contains the column.
Must be of the form
``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family where the cell
is located.
:type value: bytes or :class:`int`
:param value: The value to set in the cell. If an integer is used,
will be interpreted as a 64-bit big-endian signed
integer (8 bytes).
:type timestamp: :class:`datetime.datetime`
:param timestamp: (Optional) The timestamp of the operation.
:type state: bool
:param state: (Optional) The state that the mutation should be
applied in. Defaults to :data:`True`.
"""
self._set_cell(column_family_id, column, value, timestamp=timestamp,
state=state)
def delete(self, state=True):
"""Deletes this row from the table.
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
:type state: bool
:param state: (Optional) The state that the mutation should be
applied in. Defaults to :data:`True`.
"""
self._delete(state=state)
def delete_cell(self, column_family_id, column, time_range=None,
state=True):
"""Deletes cell in this row.
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
:type column_family_id: str
:param column_family_id: The column family that contains the column
or columns with cells being deleted. Must be
of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family that will have a
cell deleted.
:type time_range: :class:`TimestampRange`
:param time_range: (Optional) The range of time within which cells
should be deleted.
:type state: bool
:param state: (Optional) The state that the mutation should be
applied in. Defaults to :data:`True`.
"""
self._delete_cells(column_family_id, [column], time_range=time_range,
state=state)
def delete_cells(self, column_family_id, columns, time_range=None,
state=True):
"""Deletes cells in this row.
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
:type column_family_id: str
:param column_family_id: The column family that contains the column
or columns with cells being deleted. Must be
of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type columns: :class:`list` of :class:`str` /
:func:`unicode <unicode>`, or :class:`object`
:param columns: The columns within the column family that will have
cells deleted. If :attr:`ALL_COLUMNS` is used then the
entire column family will be deleted from the row.
:type time_range: :class:`TimestampRange`
:param time_range: (Optional) The range of time within which cells
should be deleted.
:type state: bool
:param state: (Optional) The state that the mutation should be
applied in. Defaults to :data:`True`.
"""
self._delete_cells(column_family_id, columns, time_range=time_range,
state=state)
# pylint: enable=arguments-differ
def clear(self):
"""Removes all currently accumulated mutations on the current row."""
del self._true_pb_mutations[:]
del self._false_pb_mutations[:]
class AppendRow(Row):
"""Google Cloud Bigtable Row for sending append mutations.
These mutations are intended to augment the value of an existing cell
and uses the methods:
* :meth:`append_cell_value`
* :meth:`increment_cell_value`
The first works by appending bytes and the second by incrementing an
integer (stored in the cell as 8 bytes). In either case, if the
cell is empty, assumes the default empty value (empty string for
bytes or and 0 for integer).
:type row_key: bytes
:param row_key: The key for the current row.
:type table: :class:`Table <gcloud.bigtable.table.Table>`
:param table: The table that owns the row.
"""
def __init__(self, row_key, table):
super(AppendRow, self).__init__(row_key, table)
self._rule_pb_list = []
def clear(self):
"""Removes all currently accumulated modifications on current row."""
del self._rule_pb_list[:]
def append_cell_value(self, column_family_id, column, value):
"""Appends a value to an existing cell.
.. note::
This method adds a read-modify rule protobuf to the accumulated
read-modify rules on this row, but does not make an API
request. To actually send an API request (with the rules) to the
Google Cloud Bigtable API, call :meth:`commit`.
:type column_family_id: str
:param column_family_id: The column family that contains the column.
Must be of the form
``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family where the cell
is located.
:type value: bytes
:param value: The value to append to the existing value in the cell. If
the targeted cell is unset, it will be treated as
containing the empty string.
"""
column = _to_bytes(column)
value = _to_bytes(value)
rule_pb = data_pb2.ReadModifyWriteRule(family_name=column_family_id,
column_qualifier=column,
append_value=value)
self._rule_pb_list.append(rule_pb)
def increment_cell_value(self, column_family_id, column, int_value):
"""Increments a value in an existing cell.
Assumes the value in the cell is stored as a 64 bit integer
serialized to bytes.
.. note::
This method adds a read-modify rule protobuf to the accumulated
read-modify rules on this row, but does not make an API
request. To actually send an API request (with the rules) to the
Google Cloud Bigtable API, call :meth:`commit`.
:type column_family_id: str
:param column_family_id: The column family that contains the column.
Must be of the form
``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family where the cell
is located.
:type int_value: int
:param int_value: The value to increment the existing value in the cell
by. If the targeted cell is unset, it will be treated
as containing a zero. Otherwise, the targeted cell
must contain an 8-byte value (interpreted as a 64-bit
big-endian signed integer), or the entire request
will fail.
"""
column = _to_bytes(column)
rule_pb = data_pb2.ReadModifyWriteRule(family_name=column_family_id,
column_qualifier=column,
increment_amount=int_value)
self._rule_pb_list.append(rule_pb)
def commit(self):
"""Makes a ``ReadModifyWriteRow`` API request.
This commits modifications made by :meth:`append_cell_value` and
:meth:`increment_cell_value`. If no modifications were made, makes
no API request and just returns ``{}``.
Modifies a row atomically, reading the latest existing
timestamp / value from the specified columns and writing a new value by
appending / incrementing. The new cell created uses either the current
server time or the highest timestamp of a cell in that column (if it
exceeds the server time).
After committing the accumulated mutations, resets the local mutations.
.. code:: python
>>> append_row.commit()
{
u'col-fam-id': {
b'col-name1': [
(b'cell-val', datetime.datetime(...)),
(b'cell-val-newer', datetime.datetime(...)),
],
b'col-name2': [
(b'altcol-cell-val', datetime.datetime(...)),
],
},
u'col-fam-id2': {
b'col-name3-but-other-fam': [
(b'foo', datetime.datetime(...)),
],
},
}
:rtype: dict
:returns: The new contents of all modified cells. Returned as a
dictionary of column families, each of which holds a
dictionary of columns. Each column contains a list of cells
modified. Each cell is represented with a two-tuple with the
value (in bytes) and the timestamp for the cell.
:raises: :class:`ValueError <exceptions.ValueError>` if the number of
mutations exceeds the :data:`MAX_MUTATIONS`.
"""
num_mutations = len(self._rule_pb_list)
if num_mutations == 0:
return {}
if num_mutations > MAX_MUTATIONS:
raise ValueError('%d total append mutations exceed the maximum '
'allowable %d.' % (num_mutations, MAX_MUTATIONS))
request_pb = messages_pb2.ReadModifyWriteRowRequest(
table_name=self._table.name,
row_key=self._row_key,
rules=self._rule_pb_list,
)
# We expect a `.data_pb2.Row`
client = self._table._cluster._client
row_response = client._data_stub.ReadModifyWriteRow(
request_pb, client.timeout_seconds)
# Reset modifications after commit-ing request.
self.clear()
# NOTE: We expect row_response.key == self._row_key but don't check.
return _parse_rmw_row_response(row_response)
def _parse_rmw_row_response(row_response):
"""Parses the response to a ``ReadModifyWriteRow`` request.
:type row_response: :class:`.data_pb2.Row`
:param row_response: The response row (with only modified cells) from a
``ReadModifyWriteRow`` request.
:rtype: dict
:returns: The new contents of all modified cells. Returned as a
dictionary of column families, each of which holds a
dictionary of columns. Each column contains a list of cells
modified. Each cell is represented with a two-tuple with the
value (in bytes) and the timestamp for the cell. For example:
.. code:: python
{
u'col-fam-id': {
b'col-name1': [
(b'cell-val', datetime.datetime(...)),
(b'cell-val-newer', datetime.datetime(...)),
],
b'col-name2': [
(b'altcol-cell-val', datetime.datetime(...)),
],
},
u'col-fam-id2': {
b'col-name3-but-other-fam': [
(b'foo', datetime.datetime(...)),
],
},
}
"""
result = {}
for column_family in row_response.families:
column_family_id, curr_family = _parse_family_pb(column_family)
result[column_family_id] = curr_family
return result
def _parse_family_pb(family_pb):
"""Parses a Family protobuf into a dictionary.
:type family_pb: :class:`._generated.bigtable_data_pb2.Family`
:param family_pb: A protobuf
:rtype: tuple
:returns: A string and dictionary. The string is the name of the
column family and the dictionary has column names (within the
family) as keys and cell lists as values. Each cell is
represented with a two-tuple with the value (in bytes) and the
timestamp for the cell. For example:
.. code:: python
{
b'col-name1': [
(b'cell-val', datetime.datetime(...)),
(b'cell-val-newer', datetime.datetime(...)),
],
b'col-name2': [
(b'altcol-cell-val', datetime.datetime(...)),
],
}
"""
result = {}
for column in family_pb.columns:
result[column.qualifier] = cells = []
for cell in column.cells:
val_pair = (
cell.value,
_datetime_from_microseconds(cell.timestamp_micros),
)
cells.append(val_pair)
return family_pb.name, result
| [
"[email protected]"
] | |
15c810e41d2e35212936b8338a7b64880501a161 | 65686dde7560955b2b30011304df350bfb2c7674 | /publishconf.py | 9800b4c9633b7964bd468cd94f69b25655eacf16 | [
"MIT"
] | permissive | BenDoan/Ben-Doan-Portfolio | 35bde70d1b4209d80058e5cdc67f8a09dbfc7311 | 38e9fd503907deea4d3a2dcc036a72057474af02 | refs/heads/master | 2016-09-01T07:37:21.607951 | 2014-02-19T04:40:47 | 2014-02-19T04:40:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
import sys
sys.path.append('.')
from pelicanconf import *
SITEURL = 'http://www.bendoan.me'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
# Uncomment following line for absolute URLs in production:
#RELATIVE_URLS = False
#DISQUS_SITENAME = ""
#GOOGLE_ANALYTICS = ""
| [
"[email protected]"
] | |
20308af37baed98882205bef8b536668b1c942b4 | 0edd5fc4975c112c010267ba235350d8e57e696b | /10_mongo/mongo.py | b069bb37efd5b9ffc1288ad8a35c8cd778d1289a | [] | no_license | hliu01/Softdev2019 | 33963111f502197a25bf300c066b3968e361e473 | 766d89a19cc679a68f6ff23ded7728abecbdc674 | refs/heads/master | 2022-12-10T07:00:21.025185 | 2020-04-21T12:11:16 | 2020-04-21T12:11:16 | 207,879,805 | 0 | 2 | null | 2022-12-08T07:25:08 | 2019-09-11T18:28:06 | HTML | UTF-8 | Python | false | false | 2,807 | py | #Peihua Huang, Henry Liu (Team Computers)
#SoftDev1 pd1
#K10 -- Import/Export Bank
#2020-02-28
# Name of Dataset: Current US Senators
# Description: The dataset contains basic information on all the current US Senators
# Hyperlink: https://www.govtrack.us/api/v2/role?current=true&role_type=senator
# Brief Summary: We imported the dataset by first opening the file and then using loads from bson.json_util to convert the json file into a dictonary. We then inserted each of the senator entries into the mongodb one line at a time.
from bson.json_util import loads
from pymongo import MongoClient
client = MongoClient()
db = client.computers
db.senators.drop()
senators = db.senators
file = open("primer-dataset.json", "r")
content = loads(file.read())["objects"]
for i in range(len(content)):
senators.insert_one(content[i])
# for item in senators.find({}, {"person.name" : 1}):
# print(item)
def find_state(state):
'''Returns all senators representing the specified state'''
return senators.find({"state" : state}, {"_id" : 0, "person.name" : 1})
def find_party(party):
'''Returns all senators in specified party'''
return senators.find({"party" : party}, {"_id" : 0, "person.name" : 1})
def find_gender(gender):
'''Returns all senators of specified gender'''
return senators.find({"person.gender" : gender}, {"_id" : 0, "person.name" : 1})
def find_website(firstname):
'''Returns website of all senators with a given first name'''
return senators.find({"person.firstname" : firstname}, {"_id" : 0, "person.name" : 1, "website" : 1})
def find_description(lastname):
'''Returns description of all senators with a given last name'''
return senators.find({"person.lastname" : lastname}, {"_id" : 0, "person.name" : 1, "description" : 1})
def find_num_gender(gender, number):
'''Return num number of senators of specified gender'''
return senators.find({"person.gender" : gender}, {"_id" : 0, "person.name" : 1}).limit(number)
print("-----FINDING ALL SENATORS IN NY-----")
for item in find_state("NY"):
print(item["person"]["name"])
print("-----FINDING ALL DEMOCRATIC SENATORS-----")
for item in find_party("Democrat"):
print(item["person"]["name"])
print("----FINDING ALL FEMALE SENATORS-----")
for item in find_gender("female"):
print(item["person"]["name"])
print("-----FINDING WEBSITE OF SENATORS WHOSE FIRST NAME IS KEVIN-----")
for item in find_website("Kevin"):
print(item["person"]["name"],":", item["website"])
print("-----FINDING DESCRIPTION OF SENATORS WHOSE LAST NAME IS ALEXANDER-----")
for item in find_description("Alexander"):
print(item["person"]["name"], ":", item["description"])
print("-----FINDING 5 MALE SENATORS-----")
for item in find_num_gender("male", 5):
print(item["person"]["name"])
| [
"[email protected]"
] | |
287049a01cdc9d370dabf60d5f00c362488fd3f5 | 7b5828edda7751700ca7002b40a214e39e5f48a8 | /EA/simulation/server_commands/crafting_commands.py | bda36e7403b1b53df74c775823d29c97068e6327 | [] | no_license | daniela-venuta/Sims-4-Python-Script-Workspace | 54c33dac02f84daed66f46b7307f222fede0fa62 | f408b28fb34626b2e3b2953152343d591a328d66 | refs/heads/main | 2023-03-29T18:08:39.202803 | 2021-03-30T19:00:42 | 2021-03-30T19:00:42 | 353,111,243 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,637 | py | from carry.carry_postures import CarryingObject
from crafting import recipe
from crafting.crafting_interactions import create_craftable
from crafting.crafting_process import CRAFTING_QUALITY_LIABILITY
from crafting.recipe import Recipe
from server_commands.argument_helpers import OptionalTargetParam, get_optional_target, TunableInstanceParam
import crafting.crafting_process
import services
import sims4.commands
from sims4.resources import Types
from tag import Tag
@sims4.commands.Command('crafting.shorten_phases', command_type=sims4.commands.CommandType.Automation)
def shorten_phases(enabled:bool=None, _connection=None):
output = sims4.commands.Output(_connection)
if enabled is None:
do_enabled = not crafting.crafting_process.shorten_all_phases
else:
do_enabled = enabled
crafting.crafting_process.shorten_all_phases = do_enabled
if enabled is None:
if do_enabled:
output('Crafting phases are shortened.')
else:
output('Crafting phases are normal length.')
return True
@sims4.commands.Command('crafting.get_recipes_with_tag', command_type=sims4.commands.CommandType.Automation)
def get_recipes_with_tag(tag:Tag, _connection=None):
output = sims4.commands.Output(_connection)
automation_output = sims4.commands.AutomationOutput(_connection)
recipes = services.get_instance_manager(sims4.resources.Types.RECIPE).get_ordered_types(only_subclasses_of=Recipe)
automation_output('CraftingGetRecipesWithTag; Status:Begin')
for (i, recipe) in enumerate(recipes):
if tag not in recipe.recipe_tags:
pass
elif recipe.final_product.definition is None:
pass
else:
automation_output('CraftingGetRecipesWithTag; Status:Data, RecipeId:{}, Recipe:{}, ProductId:{}'.format(recipe.guid64, recipe.__name__, recipe.final_product_definition_id))
output('{}:{}'.format(recipe.guid64, recipe.__name__))
automation_output('CraftingGetRecipesWithTag; Status:End')
return True
@sims4.commands.Command('crafting.create_recipe', command_type=sims4.commands.CommandType.Automation)
def create_recipe(recipe:TunableInstanceParam(Types.RECIPE), opt_sim:OptionalTargetParam=None, _connection=None):
output = sims4.commands.Output(_connection)
automation_output = sims4.commands.AutomationOutput(_connection)
sim = get_optional_target(opt_sim, _connection)
if sim is None:
output('No sim for recipe creation')
automation_output('CraftingCreateRecipe; Status:No Sim')
return False
craftable = create_craftable(recipe, sim)
if craftable is None:
output('Failed To Create Craftable')
automation_output('CraftingCreateRecipe; Status:Failed To Create Craftable')
return False
CarryingObject.snap_to_good_location_on_floor(craftable, starting_transform=sim.transform, starting_routing_surface=sim.routing_surface)
automation_output('CraftingCreateRecipe; Status:Success, ObjectId:{}'.format(craftable.id))
return True
@sims4.commands.Command('crafting.show_quality')
def show_quality(opt_sim:OptionalTargetParam=None, _connection=None):
sim = get_optional_target(opt_sim, _connection)
if sim is None:
sims4.commands.output('No sim for crafting.show_quality', _connection)
return False
crafting_liability = None
for si in sim.si_state:
crafting_liability = si.get_liability(CRAFTING_QUALITY_LIABILITY)
if crafting_liability is not None:
break
if crafting_liability is None:
sims4.commands.output('Sim {} is not doing any crafting interaction'.format(sim), _connection)
return False
(quality_state, quality_stats_value) = crafting_liability.get_quality_state_and_value()
quality_state_strings = ['None', 'Poor', 'Normal', 'Outstanding']
quality_state = quality_state or 0
sims4.commands.output('Sim {} current crafting quality is {}({})'.format(sim, quality_state_strings[quality_state], quality_stats_value), _connection)
return True
@sims4.commands.Command('crafting.ingredients_required_toggle', command_type=sims4.commands.CommandType.Cheat)
def toggle_ingredients_required(_connection=None):
recipe.debug_ingredient_requirements = not recipe.debug_ingredient_requirements
if recipe.debug_ingredient_requirements:
message = 'Ingredient requirements have been enabled.'
else:
message = 'Ingredient requirements disabled. Craft at will.'
sims4.commands.output(message, _connection)
| [
"[email protected]"
] | |
d640bf3aac532615b91484e1cd51ded14014ffbe | 17f1811abda6c828460b77f460671f9c2f464204 | /30DaysLC/min_stack.py | a81298fa266de49863efd9b54f793c1d53e3772a | [] | no_license | rishabhranawat/challenge | f10f69fc30881a0571c4321b466a89aeeb06e568 | e836343be5185f8843bb77197fccff250e9a77e3 | refs/heads/master | 2021-01-21T15:13:47.590675 | 2020-04-25T15:26:42 | 2020-04-25T15:26:42 | 91,833,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | class MinStack(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.data = []
self.size = 0
def push(self, x):
"""
:type x: int
:rtype: None
"""
self.data.append(x)
self.size += 1
def pop(self):
"""
:rtype: None
"""
self.size -= 1
return self.data.pop(self.size)
def top(self):
"""
:rtype: int
"""
print(self.size, self.data)
return self.data[self.size]
def getMin(self):
"""
:rtype: int
"""
return min(self.data)
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(x)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin() | [
"[email protected]"
] | |
b8d525e33f60b7ccc184e2c1f10ad0dfe3ea2c1d | 47ef11c80f1147940d5bd17d03bdf54a0cfb8648 | /rundoozer/setup.py | 0ed0a4580f83c3783ab85e48eb1022653514e4fc | [
"Apache-2.0"
] | permissive | jupierce/doozer | 71b5f686437c802e775f3137e7b494fed1d756b8 | 564ad02f01ebdb55ed176c07e36986a8b0262857 | refs/heads/master | 2023-08-27T08:17:42.634261 | 2019-10-17T14:57:09 | 2019-10-17T14:57:09 | 215,615,398 | 0 | 0 | Apache-2.0 | 2019-10-16T18:19:40 | 2019-10-16T18:19:40 | null | UTF-8 | Python | false | false | 714 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
def _get_version():
from os.path import abspath, dirname, join
filename = join(dirname(abspath(__file__)), '../doozerlib', 'VERSION')
return open(filename).read().strip()
setup(
name="rundoozer",
author="AOS ART Team",
author_email="[email protected]",
version=_get_version(),
description="CLI tool for managing and automating Red Hat software releases",
url="https://github.com/openshift/doozer",
license="Red Hat Internal",
packages=[],
include_package_data=True,
scripts=[
'rundoozer'
],
install_requires=['pydotconfig>=0.1.5'],
dependency_links=[]
)
| [
"[email protected]"
] | |
5cb34e4746ad5de4d2bb25315c334f9b429ceef2 | da4e01fd4e15e835a2cef4a3b9dee9ef582f1333 | /mosquitto_byte.py | 5b9f856d2dc02827d1f7a122e6a548c17dea4c1b | [
"Apache-2.0"
] | permissive | PBearson/MosquittoByte | f7a4708ce9f0c17c46cab925fce62003d0b892b3 | f93684dc660382db91a6f9dd781fe65d0b902957 | refs/heads/master | 2023-07-07T14:26:42.882500 | 2021-04-24T22:06:15 | 2021-04-24T22:06:15 | 342,062,663 | 1 | 0 | null | 2021-04-07T16:17:19 | 2021-02-24T23:18:27 | Python | UTF-8 | Python | false | false | 32,446 | py | import socket
import random
import time
import sys
import argparse
import math
import os
import os.path
import select
import subprocess
import difflib
import threading
from os import path
from datetime import datetime
from difflib import SequenceMatcher
# Remove bytes in a string
# f : the fuzzable object
# nb : the number of bytes to remove in f
def remove(f, nb):
for n in range(nb):
base = random.randint(0, len(f))
f = f[0:base] + f[base + 1:]
return f
# Add bytes in a string
# f : the fuzzable object
# nb : the number of bytes to add to f
def add(f, nb):
for n in range(nb):
base = random.randint(0, len(f))
byte = random.getrandbits(8).to_bytes(1, sys.byteorder)
f = f[0:base] + byte + f[base:]
return f
# Mutate bytes in a string
# f : the fuzzable object
# nb : the number of bytes to mutate in f
def mutate(f, nb):
bits = random.sample(range(len(f)), min(nb, len(f)))
for b in bits:
byte = random.getrandbits(8).to_bytes(1, sys.byteorder)
f = f[0:b] + byte + f[b + 1:]
return f
def get_payload(file):
f = open(file, "r")
packets = f.read().splitlines()
selection = random.choice(packets)
f.close()
return bytearray.fromhex(selection)
def get_all_payloads():
all_payloads = {
"connect": get_payload("mqtt_corpus/CONNECT"),
"connack": get_payload("mqtt_corpus/CONNACK"),
"pingreq": get_payload("mqtt_corpus/PINGREQ"),
"pingresp": get_payload("mqtt_corpus/PINGRESP"),
"auth": get_payload("mqtt_corpus/AUTH"),
"publish": get_payload("mqtt_corpus/PUBLISH"),
"puback": get_payload("mqtt_corpus/PUBACK"),
"pubrec": get_payload("mqtt_corpus/PUBREC"),
"pubrel": get_payload("mqtt_corpus/PUBREL"),
"pubcomp": get_payload("mqtt_corpus/PUBCOMP"),
"subscribe": get_payload("mqtt_corpus/SUBSCRIBE"),
"suback": get_payload("mqtt_corpus/SUBACK"),
"unsubscribe": get_payload("mqtt_corpus/UNSUBSCRIBE"),
"unsuback": get_payload("mqtt_corpus/UNSUBACK"),
"disconnect": get_payload("mqtt_corpus/DISCONNECT"),
"reserved": get_payload("mqtt_corpus/RESERVED")
}
return all_payloads
# Return c / 100 * len(f), where c is a random number between a and b
# a : a number between 0 and 100
# b : a number between a and 100
# f : the fuzzable object
def select_param_value(f, a, b):
if a == b:
c = round(a / 100 * len(f))
else:
c = random.choice(range(a, b))
c = round(c / 100 * len(f))
return c
def fuzz_target(f, params):
# Get number of bytes to mutate
num_mutate_bytes = select_param_value(f, params["min_mutate"], params["max_mutate"])
# Get number of bytes to add
if params["super_add_enable"] == 0:
num_add_bytes = random.randint(params["super_add_min"], params["super_add_max"])
else:
num_add_bytes = select_param_value(f, params["min_add"], params["max_add"])
# Get number of bytes to remove
num_remove_bytes = select_param_value(f, params["min_remove"], params["max_remove"])
# Randomize which operations we do
fuzz_opts = ["mutate", "add", "remove"]
fuzz_rounds = random.randint(params["min_fuzz_rounds"], params["max_fuzz_rounds"])
for fr in range(fuzz_rounds):
fuzz_selection = random.sample(fuzz_opts, random.randint(1, 3))
for s in fuzz_selection:
if s == "mutate":
f = mutate(f, num_mutate_bytes)
elif s == "add":
f = add(f, num_add_bytes)
elif s == "remove":
f = remove(f, num_remove_bytes)
return f
def source_payload_with_filestream_response(params):
f = open(output_directory + "/filestream_responses.txt", "r")
packets = f.readlines()[1:]
selection_index = random.randint(0, len(packets) - 1)
selection = packets[selection_index].split(",")[1]
payload = bytearray.fromhex(selection)
f.close()
return fuzz_target(payload, params), selection_index
def source_payload_with_network_response(params):
f = open(output_directory + "/network_responses.txt", "r")
packets = f.read().splitlines()[1:]
selection_index = random.randint(0, len(packets) - 1)
selection = packets[selection_index].split(",")[1]
payload = bytearray.fromhex(selection)
f.close()
return fuzz_target(payload, params), selection_index
def source_payload_with_crash(params):
f = open(output_directory + "/crashes.txt", "r")
packets = f.read().splitlines()[1:]
selection_index = random.randint(0, len(packets) - 1)
selection = packets[selection_index].split(",")[11]
payload = bytearray.fromhex(selection)
f.close()
return fuzz_target(payload, params), selection_index
# Return a tuple (a, b) where a and b are between abs_min and abs_max and a <= b
def get_min_max(abs_min, abs_max):
a = random.randint(abs_min, abs_max)
b = random.randint(abs_min, abs_max)
if a < b:
return (a, b)
return (b, a)
def get_params():
min_mutate, max_mutate = get_min_max(0, 10 * fuzz_intensity)
min_add, max_add = get_min_max(0, 10 * fuzz_intensity)
super_add_min, super_add_max = get_min_max(0, 1000 * fuzz_intensity)
super_add_enable = random.randint(0, 50)
min_remove, max_remove = get_min_max(0, 10 * fuzz_intensity)
min_fuzz_rounds, max_fuzz_rounds = get_min_max(0, fuzz_intensity)
# However non-intuitive, a sourcing value of 0 means that the fuzzer WILL source from that target. For example, if sourcing_from_crash = 0 (i.e., source_frequency = 4), then we will source from the crashes log.
if source_frequency == 0:
sourcing_from_crash = 1
elif source_frequency == 1:
sourcing_from_crash = random.randint(0, 100)
elif source_frequency == 2:
sourcing_from_crash = random.randint(0, 10)
elif source_frequency == 3:
sourcing_from_crash = random.randint(0, 1)
else:
sourcing_from_crash = 0
if network_response_frequency == 0:
sourcing_from_network = 1
elif network_response_frequency == 1:
sourcing_from_network = random.randint(0, 100)
elif network_response_frequency == 2:
sourcing_from_network = random.randint(0, 10)
elif network_response_frequency == 3:
sourcing_from_network = random.randint(0, 1)
else:
sourcing_from_network = 0
if filestream_response_frequency == 0:
sourcing_from_filestream = 1
elif filestream_response_frequency == 1:
sourcing_from_filestream = random.randint(0, 100)
elif filestream_response_frequency == 2:
sourcing_from_filestream = random.randint(0, 10)
elif filestream_response_frequency == 3:
sourcing_from_filestream = random.randint(0, 1)
else:
sourcing_from_filestream = 0
params = {
"min_mutate": min_mutate,
"max_mutate": max_mutate,
"min_add": min_add,
"max_add": max_add,
"super_add_enable": super_add_enable,
"super_add_min": super_add_min,
"super_add_max": super_add_max,
"min_remove": min_remove,
"max_remove": max_remove,
"min_fuzz_rounds": min_fuzz_rounds,
"max_fuzz_rounds": max_fuzz_rounds,
"sourcing_from_crash": sourcing_from_crash,
"sourcing_from_network": sourcing_from_network,
"sourcing_from_filestream": sourcing_from_filestream
}
return params
def check_duplicate_source(payload):
f = open(output_directory + "/crashes.txt", "r")
packets = f.read().splitlines()[1:]
f.close()
for p in packets:
curr = p.split(",")[11].strip(" ")
if payload.hex() == curr:
return True
return False
# Check for duplicate responses in the broker response log.
# This includes responses that are too similar, but not exactly
# duplicates.
def check_duplicate_network_response(response):
if not path.exists(output_directory + "/network_responses_raw.txt"):
return False
f = open(output_directory + "/network_responses_raw.txt", "r")
packets = f.read().splitlines()
f.close()
for p in packets:
similarity = SequenceMatcher(None, p, response.hex()).ratio()
if similarity >= max_network_response_threshold:
return True
return False
# Check for duplicate responses in the stream response log.
# This includes responses that are too similar, but not exactly
# duplicates.
def check_duplicate_filestream_response(response):
if not path.exists(output_directory + "/filestream_responses_raw.txt"):
return False
f = open(output_directory + "/filestream_responses_raw.txt", "r")
packets = f.read().splitlines()
f.close()
for p in packets:
similarity = SequenceMatcher(None, p, response).ratio()
if similarity >= max_filestream_response_threshold:
return True
return False
def get_last_index():
try:
f = open(output_directory + "/crashes.txt", "r")
last_entry = f.read().splitlines()[-1]
last_index = last_entry.split(",")[0]
f.close()
return int(last_index)
except (FileNotFoundError, ValueError):
return -1
def handle_network_response(payload, response):
if not path.exists(output_directory + "/network_responses.txt"):
f = open(output_directory + "/network_responses.txt", "w")
f.write("Timestamp, Payload, Response\n")
f.close()
duplicate_response = check_duplicate_network_response(response)
f = open(output_directory + "/network_responses.txt", "r")
f_len = len(f.read().splitlines())
f.close()
if not duplicate_response and f_len < max_network_response_entries:
f = open(output_directory + "/network_responses.txt", "a")
f.write("%s, %s, %s\n" % (datetime.now(), payload.hex(), response.hex()))
f.close()
f = open(output_directory + "/network_responses_raw.txt", "a")
f.write("%s\n" % response.hex())
f.close()
def stream_response_has_keyword(resp, payload):
f = open("keywords.txt", "r")
keywords = f.read().splitlines()
for k in keywords:
if k.upper() in resp.upper():
return True
return False
def handle_filestream_response(proc):
if not path.exists(output_directory + "/filestream_responses.txt"):
f = open(output_directory + "/filestream_responses.txt", "w")
f.write("Timestamp, Payload, Response\n")
f.close()
for line in iter(proc.stdout.readline, b''):
# Remove in-line EOL characters
line = line.decode("latin").replace(r"\n", "").replace(r"\r", "")
if "current_payload" in globals():
has_keyword = stream_response_has_keyword(line, current_payload)
duplicate_response = check_duplicate_filestream_response(line)
logging_check = True
if filestream_logging_preference < 2:
logging_check = filestream_logging_preference & has_keyword
if logging_check and not duplicate_response:
f = open(output_directory + "/filestream_responses.txt", "a")
f.write("%s, %s, %s" % (datetime.now(), current_payload.hex(), line))
f.close()
f = open(output_directory + "/filestream_responses_raw.txt", "a")
f.write(line)
f.close()
def start_broker():
try:
proc = subprocess.Popen(broker_exe.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if not no_filestream_response_log:
broker_thread = threading.Thread(target=handle_filestream_response, args=(proc,))
broker_thread.start()
if verbosity >= 1:
print("Waiting for broker to start")
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((host, port))
s.close()
break
except ConnectionRefusedError:
time.sleep(0.1)
except FileNotFoundError:
print("The broker command/location you provided does not exist.")
exit()
def handle_crash():
if "last_fuzz" not in globals():
if verbosity >= 5:
print("There was an error connecting to the broker.")
try:
start_broker()
except NameError:
print("No MQTT process appears to be running at %s:%s, and you have not defined a broker exe. You must do one or the other." % (host, port))
exit()
else:
if not path.exists(output_directory + "/crashes.txt"):
f = open(output_directory + "/crashes.txt", "w")
f.write("Index, Timestamp, Seed, Fuzz intensity, Construct intensity, Crash index, Network response index, Filestream response index, Crash source frequency, Network source frequency, Filestream source frequency, Payload\n")
f.close()
seed = last_fuzz["seed"]
fi = last_fuzz["fuzz_intensity"]
ci = last_fuzz["construct_intensity"]
si = last_fuzz["crash_index"]
nri = last_fuzz["network_response_index"]
fri = last_fuzz["filestream_response_index"]
sf = last_fuzz["source_frequency"]
nrf = last_fuzz["network_response_frequency"]
frf = last_fuzz["filestream_response_frequency"]
payload = last_fuzz["payload"]
if verbosity >= 1:
print("The following payload crashed the program")
print(payload.hex())
index = get_last_index() + 1
duplicate_source = check_duplicate_source(payload)
if not duplicate_source:
f = open(output_directory + "/crashes.txt", "a")
f.write("%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s\n" % (index, datetime.now(), seed, fi, ci, si, nri, fri, sf, nrf, frf, payload.hex()))
f.close()
f = open(output_directory + "/crashes_raw.txt", "a")
f.write("%s\n" % payload.hex())
f.close()
if not restart_on_crash:
exit()
else:
start_broker()
# Construct the payload according the construct intensity
def construct_payload(all_payloads):
selected_payloads = []
if construct_intensity == 0:
allowed_payloads = ["auth", "pingreq", "pubcomp", "publish", "pubrec", "pubrel", "subscribe", "unsubscribe"]
payloads_subset = {e: all_payloads[e] for e in allowed_payloads}
selected_payloads.append("connect")
key, val = random.choice(list(payloads_subset.items()))
selected_payloads.append(key)
selected_payloads.append("disconnect")
elif construct_intensity == 1:
allowed_payloads = ["auth", "pingreq", "pubcomp", "publish", "pubrec", "pubrel", "subscribe", "unsubscribe"]
payloads_subset = {e: all_payloads[e] for e in allowed_payloads}
num_packets = random.randint(1, 5)
selected_payloads = dict(random.sample(list(payloads_subset.items()), num_packets)).keys()
elif construct_intensity == 2:
num_packets = random.randint(1, 10)
selected_payloads = dict(random.sample(list(all_payloads.items()), num_packets)).keys()
else:
num_packets = random.randint(1, 20)
for n in range(num_packets):
key, val = random.choice(list(all_payloads.items()))
selected_payloads.append(key)
enumerated_payloads = {}
payload = b""
for s in selected_payloads:
payload = payload + all_payloads[s]
enumerated_payloads[s] = all_payloads[s]
return (payload, enumerated_payloads)
def fuzz_payloads(all_payloads, params):
for a in all_payloads:
all_payloads[a] = fuzz_target(all_payloads[a], params)
return all_payloads
# Fuzz MQTT
c_len = -1
nr_len = -1
fr_len = -1
def fuzz(seed):
global last_fuzz, current_payload, c_len, nr_len, fr_len
random.seed(seed)
params = get_params()
if c_len < 2:
# Get number of entries in crash file so far
try:
f = open(output_directory + "/crashes.txt", "r")
c_len = len(f.read().splitlines())
f.close()
except FileNotFoundError:
c_len = -1
if nr_len < 2:
# Get number of entries in network response file so far
try:
f = open(output_directory + "/network_responses.txt", "r")
nr_len = len(f.read().splitlines())
f.close()
except FileNotFoundError:
nr_len = -1
if fr_len < 2:
# Get number of entries in filestream response file so far
try:
f = open(output_directory + "/filestream_responses.txt", "r")
fr_len = len(f.read().splitlines())
f.close()
except FileNotFoundError:
fr_len = -1
crash_index = None
network_response_index = None
filestream_response_index = None
# Order of preference for sourcing: crash log > filestream log > network log
# Don't source the fuzzer with anything
if (c_len < 2 or not params["sourcing_from_crash"] == 0) and (nr_len < 2 or not params["sourcing_from_network"] == 0) and (fr_len < 2 or not params["sourcing_from_filestream"] == 0):
all_payloads = fuzz_payloads(get_all_payloads(), params)
payload, enumerated_payloads = construct_payload(all_payloads)
# Source with previous crash
elif c_len >= 2 and params["sourcing_from_crash"] == 0:
payload, crash_index = source_payload_with_crash(params)
# Source with filestream response
elif fr_len >= 2 and params["sourcing_from_filestream"] == 0:
payload, filestream_response_index = source_payload_with_filestream_response(params)
# Source with network response
else:
payload, network_response_index = source_payload_with_network_response(params)
if payload_only:
print("\nCrash index: " + str(crash_index))
print("Network response index: " + str(network_response_index))
print("Filestream response index: " + str(filestream_response_frequency))
if not params["sourcing_from_crash"] == 0 and not params["sourcing_from_network"] == 0 and not params["sourcing_from_filestream"] == 0:
print("\nFuzzed payload:\t" + payload.hex())
for p in enumerated_payloads:
print("%s: %s" % (p, enumerated_payloads[p].hex()))
else:
print("\nFuzzed payload:\t" + payload.hex())
exit()
current_payload = payload
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((host, port))
s.send(payload)
except ConnectionRefusedError:
handle_crash()
return
except ConnectionResetError:
return
if(verbosity >= 4):
print("Crash log index:\t", crash_index)
print("Network log index:\t", network_response_index)
print("Filestream log index:\t", filestream_response_index)
if(verbosity >= 1):
print("Fuzzed payload:\t\t", payload.hex())
ready = select.select([s], [], [], response_delay)
if ready[0]:
try:
response = s.recv(1024)
if not no_network_response_log:
handle_network_response(payload, response)
if verbosity >= 5:
print("Network response:\t", response.hex())
except ConnectionResetError:
if verbosity >= 4:
print("Error:\t\t\t Broker reset connection.")
else:
if verbosity >= 4:
print("Error:\t\t\tBroker was not ready for reading.")
s.close()
# Update the last fuzz params
last_fuzz = {
"seed": seed,
"fuzz_intensity": fuzz_intensity,
"construct_intensity": construct_intensity,
"crash_index": crash_index,
"network_response_index": network_response_index,
"filestream_response_index": filestream_response_index,
"source_frequency": source_frequency,
"network_response_frequency": network_response_frequency,
"filestream_response_frequency": filestream_response_frequency,
"payload": payload
}
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("-H", "--host", help = "Fuzzing target host. Default is localhost.")
parser.add_argument("-P", "--port", help = "Fuzzing target port. Default is 1883.")
parser.add_argument("-B", "--broker_exe", help = "Set the broker exe command/location. If the broker crashes, this can be used to restart it.")
parser.add_argument("-R", "--restart_on_crash", help = "If set, the fuzzer will try to use the option provided by 'broker_exe' to restart the broker.", action = "store_true")
parser.add_argument("-s", "--seed", help = "Set the seed. If not set by the user, the system time is used as the seed.")
parser.add_argument("-fd", "--fuzz_delay", help = "Set the delay between each fuzzing attempt. Default is 0.1 seconds.")
parser.add_argument("-I", "--index", help = "Source the fuzzer using an index in the crashes.txt log file.")
parser.add_argument("-rd", "--response_delay", help="Set the delay between sending a packet and receiving the response from the broker. Default is whatever fuzz delay is set to.")
parser.add_argument("-m", "--max_runs", help = "Set the number of fuzz attempts made. If not set, the fuzzer will run until the broker crashes.")
parser.add_argument("-fi", "--fuzz_intensity", help = "Set the intensity of the fuzzer, from 0 to 10. 0 means packets are not fuzzed at all. Default is 3.")
parser.add_argument("-ci", "--construct_intensity", help = "Set the intensity of the payload constructer, from 0 to 3. The constructor decides what order to send packets. For example, 0 means all packets begin with CONNECT and end wth DISCONNECT. Default is 0.")
parser.add_argument("-sf", "--source_frequency", help = "Set the frequency of sourcing the fuzzer's input with a packet that previously triggered a crash, from 0 to 4. 0 means never source and 4 means always source. Default is 2.")
parser.add_argument("-nrf", "--network_response_frequency", help = "Set the frequency of sourcing the fuzzer's input with a packet that previously triggered a unique network response from the broker, from 0 to 4. 0 means never source and 4 means always source. Default is 2.")
parser.add_argument("-frf", "--filestream_response_frequency", help = "Set the frequency of sourcing the fuzzer's input with a packet that previously triggered an anamolous response to stdout or stderr, from 0 to 4. 0 means never source and 4 means always source. Default is 2.")
parser.add_argument("-mnt", "--max_network_response_threshold", help = "Set the maximum similarity threshold for entries in the broker response file, from 0 to 1. For example, a threshold of 0.3 means entries will be NOT logged if they are at least 30 percent similar to any other entry. Default is 0.5.")
parser.add_argument("-mft", "--max_filestream_response_threshold", help = "Set the maximum similarity threshold for entries in the filestream response file, from 0 to 1. Default is 0.5.")
parser.add_argument("-mne", "--max_network_response_entries", help = "Set the maximum number of entries allowed in the broker responses file. Fuzzer will not write to this file if the number of entries exceeds this value. Default is 150.")
parser.add_argument("-flp", "--filestream_logging_preference", help = "Set the preference of logging stdout/stderr responses from the broker. 0 means exclude responses that contain keywords in the keywords.txt file. 1 means exclude responses that do not contain keywords. 2 means do not exclude any responses. Default is 2.")
parser.add_argument("-nnl", "--no_network_response_log", help = "If set, do not log network responses from the broker.", action="store_true")
parser.add_argument("-nfl", "--no_filestream_response_log", help="If set, do not log filestream responses from the broker.", action="store_true")
parser.add_argument("-afi", "--auto_fuzz_intensity", help = "If set, the fuzz intensity changes randomly every run.", action="store_true")
parser.add_argument("-aci", "--auto_construct_intensity", help="If set, the construct intensity changes randomly every run.", action="store_true")
parser.add_argument("-v", "--verbosity", help = "Set verbosity, from 0 to 5. 0 means nothing is printed. Default is 1.")
parser.add_argument("-p", "--payload_only", help = "Do not fuzz. Simply return the payload before and after it is fuzzed. Also return the params.", action = "store_true")
parser.add_argument("-rp", "--repeat_payload", help = "Send the same payload over and over again. This essentially just keeps the seed at a fixed value.", action = "store_true")
parser.add_argument("-O", "--output_directory", help = "Set the output directory for files generated by the fuzzer. Default is 'outputs.")
args = parser.parse_args()
global host, port, broker_exe, fuzz_intensity, construct_intensity, source_frequency, network_response_frequency, filestream_response_frequency, construct_payload, payload_only, verbosity, response_delay, restart_on_crash, no_network_response_log, no_filestream_response_log, max_network_response_entries, max_network_response_threshold, max_filestream_response_threshold, output_directory, output_directory, filestream_logging_preference
if(args.host):
host = args.host
else:
host = "localhost"
if(args.port):
port = int(args.port)
else:
port = 1883
if args.output_directory:
output_directory = args.output_directory
else:
output_directory = "outputs"
if not path.exists(output_directory):
os.mkdir(output_directory)
# This arg means we just source from an index in crashes.txt. Handy for verifying a crash quickly.
if args.index:
crash_index = int(args.index)
f = open(output_directory + "/crashes.txt", "r")
selected_line = f.read().splitlines()[crash_index + 1].split(",")
f.close()
seed = int(selected_line[2])
fuzz_intensity = int(selected_line[3])
construct_intensity = int(selected_line[4])
source_frequency = int(selected_line[8])
network_response_frequency = int(selected_line[9])
filestream_response_frequency = int(selected_line[10])
else:
if(args.seed):
seed = int(args.seed)
else:
seed = math.floor(time.time())
if(args.fuzz_intensity):
fuzz_intensity = int(args.fuzz_intensity)
if fuzz_intensity > 10:
fuzz_intensity = 10
if fuzz_intensity < 0:
fuzz_intensity = 0
else:
fuzz_intensity = 3
if(args.construct_intensity):
construct_intensity = int(args.construct_intensity)
if construct_intensity > 3:
construct_intensity = 3
if construct_intensity < 0:
construct_intensity = 0
else:
construct_intensity = 0
if(args.source_frequency):
source_frequency = int(args.source_frequency)
if source_frequency < 0:
source_frequency = 0
if source_frequency > 4:
source_frequency = 4
else:
source_frequency = 2
if(args.network_response_frequency):
network_response_frequency = int(args.network_response_frequency)
if network_response_frequency < 0:
network_response_frequency = 0
if network_response_frequency > 4:
network_response_frequency = 4
else:
network_response_frequency = 2
if(args.filestream_response_frequency):
filestream_response_frequency = int(args.filestream_response_frequency)
if filestream_response_frequency < 0:
filestream_response_frequency = 0
if filestream_response_frequency > 4:
filestream_response_frequency = 4
else:
filestream_response_frequency = 2
if(args.fuzz_delay):
fuzz_delay = float(args.fuzz_delay)
else:
fuzz_delay = 0.1
if(args.response_delay):
response_delay = float(args.response_delay)
else:
response_delay = fuzz_delay
if(args.max_runs):
max_runs = int(args.max_runs)
if(args.auto_fuzz_intensity):
auto_fuzz_intensity = True
else:
auto_fuzz_intensity = False
if(args.auto_construct_intensity):
auto_construct_intensity = True
else:
auto_construct_intensity = False
if(args.verbosity):
verbosity = int(args.verbosity)
if verbosity > 5:
verbosity = 5
if verbosity < 0:
verbosity = 0
else:
verbosity = 1
if(args.no_network_response_log):
no_network_response_log = True
else:
no_network_response_log = False
if(args.no_filestream_response_log):
no_filestream_response_log = True
else:
no_filestream_response_log = False
if(args.max_network_response_entries):
max_network_response_entries = int(args.max_network_response_entries)
else:
max_network_response_entries = 150
if(args.max_network_response_threshold):
max_network_response_threshold = float(args.max_network_response_threshold)
if max_network_response_threshold < 0:
max_network_response_threshold = 0
if max_network_response_threshold > 1:
max_network_response_threshold = 1
else:
max_network_response_threshold = 0.5
if(args.max_filestream_response_threshold):
max_filestream_response_threshold = float(args.max_filestream_response_threshold)
if max_filestream_response_threshold < 0:
max_filestream_response_threshold = 0
if max_filestream_response_threshold > 1:
max_filestream_response_threshold = 1
else:
max_filestream_response_threshold = 0.5
if(args.filestream_logging_preference):
filestream_logging_preference = int(args.filestream_logging_preference)
if filestream_logging_preference < 0:
filestream_logging_preference = 0
if filestream_logging_preference > 2:
filestream_logging_preference = 2
else:
filestream_logging_preference = 2
if(args.payload_only):
payload_only = True
random.seed(seed)
params = get_params()
print("\nYour params: ", params)
else:
payload_only = False
if args.broker_exe and not payload_only:
broker_exe = args.broker_exe
start_broker()
time.sleep(0.1)
if(args.restart_on_crash):
restart_on_crash = True
if "broker_exe" not in globals():
print("You cannot restart on crash if the broker exe is not defined.")
exit()
else:
restart_on_crash = False
print("Hello fellow fuzzer :)")
print("Host: %s, Port: %d" % (host, port))
print("Base seed: ", seed)
print("Fuzz Intensity: ", fuzz_intensity)
print("Construct intensity: ", construct_intensity)
print("Source frequency: ", source_frequency)
print("Network response frequency: ", network_response_frequency)
print("Filestream response frequency: ", filestream_response_frequency)
print("\n")
total_runs = 1
while True:
if verbosity >= 1 and not payload_only:
print("\nRun:\t\t\t", total_runs)
if verbosity >= 3:
print("Seed:\t\t\t", seed)
if verbosity >= 4:
print("Fuzz intensity:\t\t", fuzz_intensity)
print("Construct intensity:\t", construct_intensity)
fuzz(seed)
time.sleep(fuzz_delay)
total_runs += 1
if not args.repeat_payload:
seed += 1
if 'max_runs' in locals():
max_runs -= 1
if max_runs <= 0:
exit()
if auto_fuzz_intensity and not args.repeat_payload:
fuzz_intensity = random.randint(0, 10)
if auto_construct_intensity and not args.repeat_payload:
construct_intensity = random.randint(0, 3)
if __name__ == "__main__":
main(sys.argv[1:]) | [
"[email protected]"
] | |
9f0c67b333e7ed2b34add130f79369733d59c295 | 7dea1f39db25ba8f9899bd9bf23899442cdfd2fd | /course10/newDaysBetweenDates.py | 7b61793ffab6ae9339040ba533249f38c6dfbffe | [] | no_license | skyhack1212/Udacity_cs101 | 5b3b1e899d7b7caf6792fc3a7f7c676003465cfa | a3fa82c11a3a53ccf13fa61da83099a3eab50a5f | refs/heads/master | 2021-01-02T22:56:34.014802 | 2019-01-08T12:10:08 | 2019-01-08T12:10:08 | 99,428,221 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,280 | py | # Credit goes to Websten from forums
#
# Use Dave's suggestions to finish your daysBetweenDates
# procedure. It will need to take into account leap years
# in addition to the correct number of days in each month.
def isLeapYear(year):
if(year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
return True
return False
def daysInMonth(year, month):
if month == 1 or month == 3 or month == 5 or month==7 or month==8 or month==10 or month==12:
return 31
elif month == 2:
if isLeapYear(year):
return 29
else:
return 28
else:
return 30
def nextDay(year, month, day):
"""Simple version: assume every month has 30 days"""
if day < daysInMonth(year, month):
return year, month, day + 1
else:
if month == 12:
return year + 1, 1, 1
else:
return year, month + 1, 1
def dateIsBefore(year1, month1, day1, year2, month2, day2):
"""Returns True if year1-month1-day1 is before year2-month2-day2. Otherwise, returns False."""
if year1 < year2:
return True
if year1 == year2:
if month1 < month2:
return True
if month1 == month2:
return day1 < day2
return False
def daysBetweenDates(year1, month1, day1, year2, month2, day2):
"""Returns the number of days between year1/month1/day1
and year2/month2/day2. Assumes inputs are valid dates
in Gregorian calendar."""
# program defensively! Add an assertion if the input is not valid!
assert not dateIsBefore(year2, month2, day2, year1, month1, day1)
days = 0
while dateIsBefore(year1, month1, day1, year2, month2, day2):
year1, month1, day1 = nextDay(year1, month1, day1)
days += 1
return days
def test():
test_cases = [((2012,1,1,2012,2,28), 58),
((2012,1,1,2012,3,1), 60),
((2011,6,30,2012,6,30), 366),
((2011,1,1,2012,8,8), 585 ),
((1900,1,1,1999,12,31), 36523)]
for (args, answer) in test_cases:
result = daysBetweenDates(*args)
if result != answer:
print "Test with data:", args, "failed"
else:
print "Test case passed!"
test()
| [
"[email protected]"
] | |
25c9c41636dbbcb4310599cacbafa9f03bc91f59 | 75cdf50bd85457d6a87dd4f82f59713627e6868c | /{{cookiecutter.project_slug}}/tasks/common.py | 449babb447d97ec7eff3e2b721ec4c19df8eb0e9 | [
"GPL-1.0-or-later",
"LGPL-2.1-or-later",
"ISC",
"BSD-3-Clause",
"GPL-3.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-free-unknown",
"LGPL-2.0-or-later",
"GPL-3.0-only",
"MIT"
] | permissive | RainrainWu/cookiecutter-python-template | 1d17c553cecac1514aee3858db8ac6d11c0a2769 | 5e4d4e8f8310a48117aab624f7cd3448d468c16d | refs/heads/master | 2023-02-26T06:37:23.843159 | 2021-02-10T13:49:35 | 2021-02-10T13:49:35 | 332,129,150 | 0 | 0 | MIT | 2021-01-23T04:51:54 | 2021-01-23T04:44:09 | null | UTF-8 | Python | false | false | 179 | py | VENV_PREFIX = "{{ cookiecutter.dependency_management_tool }} run"
_COMMON_TARGETS = ["{{ cookiecutter.project_slug }}", "tests"]
COMMON_TARGETS_AS_STR = " ".join(_COMMON_TARGETS)
| [
"[email protected]"
] | |
e2fb15687a2fb4606ef1097d0bad75b43d927800 | c21bc0402cd6c5e337fcb72b3de65ff868c89127 | /BagOfWords/util.py | eae9474dd3dc34c5710446689ae2efbdcda80660 | [] | no_license | snayak04/SentimentAnalysis | cb4f1a45fc3f80fbe1e00807cad991281bb59988 | d9653ad67cdf502dff28f04e70436937371d9177 | refs/heads/master | 2020-04-03T21:55:08.609373 | 2018-11-22T02:22:52 | 2018-11-22T02:22:52 | 155,587,013 | 0 | 0 | null | 2018-10-31T16:11:24 | 2018-10-31T16:11:23 | null | UTF-8 | Python | false | false | 2,055 | py | import string
from nltk.corpus import stopwords
from string import punctuation
from os import listdir
from collections import Counter
### Preprocessing Methods ###
#Open file in read-only and extract content into variable 'content'
def loadFile(filename):
openFile = open(filename, 'r')
content = openFile.read()
openFile.close()
return content
#Tokenize file
def tokenizeFile(filename):
tokens = filename.split() #remove whitespace
tokens = [x.strip(string.punctuation) for x in tokens] #remove punctuation
tokens = [word for word in tokens if word.isalpha()] #remove none alphabetic words
stopWords = set(stopwords.words('english')) #remove stop words
tokens = [word for word in tokens if not word in stopWords]
tokens = [word for word in tokens if len(word) > 1] #remove 1-letter tokens
return tokens
#Convert tokens to single strings for easier encoding
def fileToLine(filename, vocab):
content = loadFile(filename)
tokens = tokenizeFile(content)
tokens = [word for word in tokens if word in vocab]
return ' '.join(tokens)
#Load all reviews and start mapping words to counter
def loadReviews(directory, vocab, is_train):
lines = list()
for filename in listdir(directory):
if filename.startswith('cv9') and is_train:
continue
if not filename.startswith('cv9') and not is_train:
continue
path = directory + '/' + filename
line = fileToLine(path, vocab)
lines.append(line)
return lines
#Predict reviews based on MLP network
def predictReview(review, vocab, tokenizer, model):
#Split review into words and filter based on current vocab
tokens = tokenizeFile(review)
tokens = [word for word in tokens if word in vocab]
lines = ' '.join(tokens)
encode = tokenizer.texts_to_matrix([lines], mode='freq')
#Predict review: 0 if positive, 1 if negative
y = model.predict(encode, verbose=0)
return round(y[0,0])
| [
"snayak04"
] | snayak04 |
c00582835f9a6cc3ab53d75912e263e4440fa610 | 68b562a4450190133575ef6f6f1dbea799c8446f | /python/CHP_model_Casadi_v49.py | 99e0a8cfe019ecb73c84b9c7581f5a2c0cf6be9d | [] | no_license | doanminhdang/chp_scheduling | ca356f8618ecde55b2b3ae34d4be305bb2244791 | 7dd0bd20f17d33b5059c819b8a9c5f56d3184912 | refs/heads/master | 2022-01-05T23:58:41.721515 | 2019-07-15T15:00:31 | 2019-07-15T15:00:31 | 110,808,675 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,707 | py |
# coding: utf-8
# In[1]:
import pylab as pl
import casadi as ca
import numpy as np
print(ca.__version__)
# In[2]:
pl.close("all")
# Parameters for experiments
## Duration and time points
hours = 24
controls_actions_per_hour = 4
t0 = 0.0;
tf = hours * 3600.0;
N = hours * controls_actions_per_hour;
time_points = pl.linspace(t0, tf, N + 1)
dt = (tf - t0) / N # Duration of a time interval
## Number of storage layers in the HOT tank
nlayer_htes = 90
## Number of storage layers in the COLD tank
nlayer_ctes = 40
# In[42]:
# Physical parameters of the system
T_amb = 1
Pth_LOAD_H = 1
Pth_LOAD_C = 1
v_dot_CC_H = 1
T_CC_H_FL = 1
v_dot_CC_C =1
T_CC_C_FL =1
Pth_CHP_Nominal = 9.6e3 # in W
Pel_CHP_Nominal=1
CHP_eta_Thermal=1
CHP_eta_Electrical=1
CHP_Fuel_LHV=1
CHP_Fuel_HHV=12
## AdCM
v_dot_AdCM_LT_set=1
v_dot_AdCM_MT_set=1
v_dot_AdCM_HT_set=1
SF = 0
v_dot_HP_HT=1
v_dot_HP_MT=1
m_dot_HP_HT_FL_Set=1
m_dot_HP_MT_FL_Set=1
v_dot_OC1=1
v_dot_OC2=1
v_dot_CCM_MT=1
v_dot_CCM_LT=1
T_CCM_MT_RL=1
m_dot_CCM_MT_FL_Set=1
m_dot_CCM_LT_FL_Set=1
# Constants
## Heat exchangers
A_HX12 = 1
A_HX1 = A_HX12
A_HX2 = A_HX12
A_HX3 = 1
U_HX12 = 1
U_HX1 = U_HX12
U_HX2 = U_HX12
U_HX3 =1
## Outdoor coils
A_OC =1
U_OC =1
RPM_max=1
rho_water=1
## Tanks
h=1
n=1
pi=3.14
rho=1
D=1.0
t=0.1
## Hot load
T_LOAD_H_CC_FL = 1
## Cold load
T_LOAD_C_CC_FL = 1
# In[37]:
# Secondary parameters to be used in equations (with conversion)
m_dot_CC_H = v_dot_CC_H*rho_water/3600
m_dot_CC_C = v_dot_CC_C*rho_water/3600
m_dot_AdCM_LT_Set = v_dot_AdCM_LT_set*rho_water/3600
m_dot_AdCM_MT_Set = v_dot_AdCM_MT_set*rho_water/3600
m_dot_AdCM_HT_Set = v_dot_AdCM_HT_set*rho_water/3600
m_dot_HP_HT = v_dot_HP_HT*rho_water/3600
m_dot_HP_MT = v_dot_HP_MT*rho_water/3600
m_dot_OC1 = v_dot_OC1*rho_water/3600
m_dot_OC2 = v_dot_OC2*rho_water/3600
m_dot_CCM_MT = v_dot_CCM_MT*rho_water/3600
m_dot_CCM_LT = v_dot_CCM_LT*rho_water/3600
## Parameters for a layer in the HTES
zi = h / n;
mi = pi * (D / 2) ** 2 * zi * rho;
di = D - 2 * t;
Aamb = pi * D * zi;
Alayer = pi * di ** 2 / 4;
# In[ ]:
# Some equations for intermediate quantities that are not real variables
# Can be eliminated by hand
# Pth_AdCM_LT = f(T_HTES_t, T_AdCM_MT_RL, T_CTES_t)
# COP_AdCM = f(T_HTES_t, T_AdCM_MT_RL, T_CTES_t)
# Pth_CCM_LT =
# Pth_AdCM_HT =
# Pth_AdCM_MT =
# Pth_CCM_MT
# In[31]:
# Declaration of variables
## Number of control variables
nu = 4
## Number of state variables
nx = 1 + nlayer_htes + nlayer_ctes
## Number of algebraic states (depending on the way we define variables)
ny = 48
## States and controls
u = ca.SX.sym("u", nu) # Control
x = ca.SX.sym("x", nx ) # Differential states: temperatures in the tank
y = ca.SX.sym("x", ny) # Output variable
# Input variables
CHP_Switch = u[0]
HP_Switch = u[1]
AdCM_Switch = u[2]
CCM_Switch = u[3]
CHP_ON_int = u[0]
RevHP_HP_ON_int = u[1]
AdCM_ON_int = u[2]
RevHP_CC_ON_int = u[3]
# State variables
Pth_CHP_x = x[0]
T_HTES = ca.SX.sym("T_HTES", nlayer_htes)
T_CTES = ca.SX.sym("T_CTES", nlayer_ctes)
for k in range(nlayer_htes):
T_HTES[k] = x[1+k]
for k in range(nlayer_ctes):
T_CTES[k] = x[1+nlayer_htes+k]
# Output variables. We will re-order the component y[i] from 0, 1, ...
mdot_FUEL = y[0]
Pth_CHP = y[1]
Pel_CHP = y[2]
mdot_CHP_nonzero = y[3]
mdot_CHP = y[4]
T_CHP_FL = y[5]
T_HTES_t = y[6]
T_HTES_l = y[7]
T_HTES_b = y[8]
Pth_LOAD_H = y[42]
mdot_LOAD_H = y[9]
T_LOAD_H_FL = y[10]
T_HX3_FL = y[11]
Pel_HP = y[30]
Pth_HP_HT = y[31]
Pth_HP_MT = y[32]
T_HP_HT_FL = y[12]
T_HP_HT_RL = y[13]
T_HP_MT_FL = y[14]
T_HP_MT_RL = y[15]
Pel_OC2 = y[33]
v_dot_OC2 = y[41]
T_OC2_FL = y[16]
T_OC2_RL = y[17]
T_AdCM_HT_FL = y[18]
T_AdCM_MT_FL = y[19]
T_AdCM_MT_RL = y[20]
T_AdCM_LT_FL = y[21]
Pel_AdCM = y[34]
Pel_OC1 = y[35]
v_dot_OC1 = y[40]
T_OC1_FL = y[22]
T_OC1_RL = y[23]
T_CTES_t = y[24]
T_CTES_b = y[25]
Pel_CCM = y[36]
Pth_CCM_LT = y[37]
Pth_CCM_MT = y[38]
T_CCM_LT_FL = y[26]
T_CCM_MT_FL = y[27]
Pth_LOAD_C = y[43]
mdot_LOAD_C = y[28]
T_LOAD_C_FL = y[29]
Pel_OC3 = y[39]
m_dot_CHP_set = y[44]
T_LOAD_H_CC_RL = y[45]
T_LOAD_C_CC_RL = y[46]
#to_be_checked_and_added
# In[6]:
# Secondary variables
v_dot_LOAD_H = mdot_LOAD_H / rho_water
v_dot_LOAD_C = mdot_LOAD_C / rho_water
# In[7]:
# Initial values of variable at time t=0
## Initial states
Pth_CHP_x_t0 = 1
T_HTES_layer_top_t0 = 35.0
T_HTES_layer_bottom_t0 = 25.0
T_HTES_t0 = pl.linspace(T_HTES_layer_top_t0, T_HTES_layer_bottom_t0, nlayer_htes)
T_CTES_layer_top_t0 = 25.0
T_CTES_layer_bottom_t0 = 10.0
T_CTES_t0 = pl.linspace(T_CTES_layer_top_t0, T_CTES_layer_bottom_t0, nlayer_ctes)
x_0 = [Pth_CHP_x_t0] + list(T_HTES_t0) + list(T_CTES_t0)
## Initial controls
chp_status_init = 0.0 * pl.ones(time_points.size - 1)
u_init = chp_status_init # to check
u_0 = [1, 1, 0, 0]
# In[10]:
# Equations
# TODO: make the separate lists of equations from Open Modelica initiated model,
# one file for differential equations, one file for algebraic equations
## Differential equations
dxdt = []
## Use equations like:
# dxdt.append((1.0 / m_s_i) * status_CHP * (mdot_CHP_to_9 * T_out_CHP - mdot_9_to_8 * T_s[8])) # T_9_dot
# ...
# dxdt = ca.vertcat(*dxdt)
## Algebraic equation, left hand side = 0
f_z = []
## Use equations like:
# f_z.append(P_th_CC/c_p - mdot_6_to_LOAD * (T_s[5] - T_LOAD_RL))
# f_z = ca.vertcat(*f_z)
# In[46]:
## Equations copied from Open Modelica initiated model, with variable names changed to Casadi name set
#560.794 * der(Pth_CHP) + Pth_CHP = Pth_CHP * CHP_ON_int
v_dot_CHP = 0.433352645 + -0.01514531 * T_HTES_b + 0.00024329 * T_HTES_b ** 2.0
CHP_H_W_T_M_FL_K = (T_HTES_b+273.15) + 0.2392344497607656 * Pth_CHP / m_dot_CHP_set
v_dot_FUEL = 1000.0 * (Pel_CHP + Pth_CHP) / (853.5 * (CHP_eta_Thermal + CHP_eta_Electrical) * CHP_Fuel_HHV)
Pth_CC = 4.18 * m_dot_CC_H * (T_LOAD_H_CC_FL - T_LOAD_H_CC_RL)
Pth_CC = 4.18 * (v_dot_LOAD_H*rho_water/3600) * ((T_LOAD_H_FL+273.15) - (T_LOAD_H_CC_RL+273.15))
COP = -0.049623287373 + 0.01893348591 * T_CTES_t + 0.013340776694 * T_AdCM_HT_FL + 0.017822939671 * T_AdCM_MT_RL + -0.001280352166 * T_CTES_t ** 2.0 + -0.000190832894 * T_AdCM_HT_FL ** 2.0 + -0.001993352016 * T_AdCM_MT_RL ** 2.0 + T_CTES_t * (-0.000334095159 * T_AdCM_HT_FL + 0.001455689548 * T_AdCM_MT_RL) + 0.000569253554 * T_AdCM_HT_FL * T_AdCM_MT_RL + 1.3421174e-05 * T_CTES_t * T_AdCM_HT_FL * T_AdCM_MT_RL
Pth_AdCM_LT = AdCM_ON_int * (4.07950934099 + 0.04152472361 * T_CTES_t + 0.160630808297 * T_AdCM_HT_FL + -0.859860168466 * T_AdCM_MT_RL + 0.003462744142 * T_CTES_t ** 2.0 + -0.001049096999 * T_AdCM_HT_FL ** 2.0 + 0.015142231276 * T_AdCM_MT_RL ** 2.0 + T_CTES_t * (0.016955368833 * T_AdCM_HT_FL + -0.016151596215 * T_AdCM_MT_RL) + -0.001917799045 * T_AdCM_HT_FL * T_AdCM_MT_RL + -0.000200778961 * T_CTES_t * T_AdCM_HT_FL * T_AdCM_MT_RL)
Pth_AdCM_HT = Pth_AdCM_LT / COP - SF
Pth_AdCM_MT = Pth_AdCM_HT + Pth_AdCM_LT
T_AdCM_LT_FL_K = (T_CTES_t+273.15) + -0.2392344497607656 * Pth_AdCM_LT / m_dot_AdCM_LT_Set
T_AdCM_MT_FL_K = (T_AdCM_MT_RL+273.15) + 0.2392344497607656 * Pth_AdCM_MT / m_dot_AdCM_MT_Set
ADCM_C_W_T_M_HT_FL_K = (T_AdCM_HT_FL+273.15) + -0.2392344497607656 * Pth_AdCM_HT / m_dot_AdCM_HT_Set
Pth_HP_HT = HP_Switch * (9.0 + 0.294510922 * T_HP_MT_FL + T_HP_HT_RL * (0.064700246 + 0.002953381 * T_HP_MT_FL) + -0.001625553 * T_HP_MT_FL ** 2.0 + -0.001627312 * T_HP_HT_RL ** 2.0)
Pth_HP_MT = Pth_HP_HT - Pel_HP
T_HP_HT_FL__K = (T_HP_HT_RL+273.15) + 0.2392344497607656 * Pth_HP_HT / m_dot_HP_HT_FL_Set
RevHP_HC_W_T_M_LT_FL__K = (T_HP_MT_FL+273.15) + -0.2392344497607656 * Pth_HP_MT / m_dot_HP_MT_FL_Set
Pel_HP = HP_Switch * (1.733202228 + -0.007333788 * T_HP_MT_FL + T_HP_HT_RL * (0.019283658 + 0.000450498 * T_HP_MT_FL) + -8.304799999999999e-05 * T_HP_MT_FL ** 2.0 + 0.000671146 * T_HP_HT_RL ** 2.0)
Pth_CCM_LT = CCM_Switch * (9.0 + 0.308329176 * T_CCM_LT_FL + 0.045285097 * T_CCM_MT_RL + 0.002252906 * T_CCM_LT_FL * T_CCM_MT_RL + -0.001213212 * T_CCM_LT_FL ** 2.0 + -0.002264659 * T_CCM_MT_RL ** 2.0)
Pel_CCM = CCM_Switch * (1.833202228 + -0.007333788 * T_CCM_LT_FL + 0.019283658 * T_CCM_MT_RL + 0.000450498 * T_CCM_LT_FL * T_CCM_MT_RL + -8.304799999999999e-05 * T_CCM_LT_FL ** 2.0 + 0.000671146 * T_CCM_MT_RL ** 2.0)
Pth_CCM_MT = Pel_CCM + Pth_CCM_LT
RevHP_HC_W_T_M_LT_FL_K = (T_CCM_LT_FL+273.15) + -0.2392344497607656 * Pth_CCM_LT / m_dot_CCM_LT_FL_Set
T_CCM_MT_FL_K = (T_CCM_MT_RL+273.15) + 0.2392344497607656 * Pth_CCM_MT / m_dot_CCM_MT_FL_Set
Pth_CC = 4.18 * m_dot_CC_C * (T_LOAD_C_CC_RL - T_LOAD_C_CC_FL)
Pth_CC = 4.18 * (v_dot_LOAD_C*rho_water/3600) * ((T_LOAD_C_CC_RL+273.15) - (T_LOAD_C_FL+273.15))
#4.18 * mi * der(HTES_H_W_T_M_IT_K[1]) = Alayer * lambda_eff * (HTES_H_W_T_M_IT_K[2] - HTES_H_W_T_M_IT_K[1]) / zi + 4.18 * (m_dot_LOAD * (T_HTES_LOAD_RL_K - HTES_H_W_T_M_IT_K[1]) + m_dot_AdCM_HT * (T_HTES_AdCM_RL_K - HTES_H_W_T_M_IT_K[1]) + d_pos * m_dot[2] * (HTES_H_W_T_M_IT_K[2] - HTES_H_W_T_M_IT_K[1])) + -20.0 * Aamb * kappa * (HTES_H_W_T_M_IT_K[1] - T_amb_K)
#4.18 * mi * der(HTES_H_W_T_M_IT_K[2]) = Alayer * lambda_eff * (HTES_H_W_T_M_IT_K[1] + -2.0 * HTES_H_W_T_M_IT_K[2] + HTES_H_W_T_M_IT_K[3]) / zi + 4.18 * m_dot[2] * (d_pos * (HTES_H_W_T_M_IT_K[3] - HTES_H_W_T_M_IT_K[2]) + d_neg * (HTES_H_W_T_M_IT_K[2] - HTES_H_W_T_M_IT_K[1])) + COIL_H_E_PT_M / /*Real*/(n) - Aamb * kappa * (HTES_H_W_T_M_IT_K[2] - T_amb_K)
#Chot = 3.66736 * (v_dot_OC1*rho_water/3600)
#Ccold = 1.005 * (v_dot_air_OC1*rho_water/3600)
#Cmin = min(Chot, Ccold)
#Cmax = max(Chot, Ccold)
#qmax = Cmin * (T_OC1_RL - T)
#q = eff * qmax
#eff = (1.0 - exp(NTU * (Cr - 1.0))) / (1.0 - Cr * exp(NTU * (Cr - 1.0)))
#RPM_max / RPM_real = Volt_max / Volt_real
#Pel_max / Pel_OC1 = (RPM_max / RPM_real) ** 3.0
#v_dot_air_max / v_dot_air_real = RPM_max / RPM_real
#Chot = 3.66736 * (v_dot_OC2*rho_water/3600)
#Ccold = 1.005 * (v_dot_air_OC2*rho_water/3600)
#Cmin = min(Chot, Ccold)
#Cmax = max(Chot, Ccold)
#qmax = Cmin * (T_OC2_RL - T)
#q = eff * qmax
#eff = (1.0 - exp(NTU * (Cr - 1.0))) / (1.0 - Cr * exp(NTU * (Cr - 1.0)))
#RPM_max / RPM_real = Volt_max / Volt_real
#Pel_max / Pel_OC2 = (RPM_max / RPM_real) ** 3.0
#v_dot_air_max / v_dot_air_real = RPM_max / RPM_real
#Chot = 4.18 * m_dot_OC
#Ccold = 1.005 * m_dot_air
#Cmin = min(Chot, Ccold)
#Cmax = max(Chot, Ccold)
#qmax = Cmin * (T_CCM_MT_RL - T)
#q = eff * qmax
#eff = (1.0 - exp(NTU * (Cr - 1.0))) / (1.0 - Cr * exp(NTU * (Cr - 1.0)))
#q = Chot * (T_CCM_MT_RL - T_CCM_MT_FL)
#q = Ccold * (T_air_out - T)
#RPM_max / RPM_real = Volt_max / Volt_real
#Pel_max / Pel_OC3 = (RPM_max / RPM_real) ** 3.0
#v_dot_air_max / v_dot_air_real = RPM_max / RPM_real
#4.18 * mi * der(CTES_H_W_T_M_IT_K[1]) = 4.18 * (m_dot_AdCM * (T_CTES_AdCM_In_K - CTES_H_W_T_M_IT_K[1]) + m_dot_RevHP * (T_CTES_RevHP_In_K - CTES_H_W_T_M_IT_K[1])) + Alayer * lambda_eff * (CTES_H_W_T_M_IT_K[2] - CTES_H_W_T_M_IT_K[1]) / zi + 4.18 * d_neg * m_dot[2] * (CTES_H_W_T_M_IT_K[1] - CTES_H_W_T_M_IT_K[2]) - Aamb * kappa * (CTES_H_W_T_M_IT_K[1] - T_amb_K)
#4.18 * mi * der(CTES_H_W_T_M_IT_K[2]) = Alayer * lambda_eff * (CTES_H_W_T_M_IT_K[1] + -2.0 * CTES_H_W_T_M_IT_K[2] + CTES_H_W_T_M_IT_K[3]) / zi + 4.18 * m_dot[2] * (d_neg * (CTES_H_W_T_M_IT_K[2] - CTES_H_W_T_M_IT_K[3]) + d_pos * (CTES_H_W_T_M_IT_K[1] - CTES_H_W_T_M_IT_K[2])) - Aamb * kappa * (CTES_H_W_T_M_IT_K[2] - T_amb_K)
# In[44]:
# In[4]:
# TODO: bring new equations into the form of an optimal control problem
#### (below is untouched)
# In[70]:
# Constraints
T_min_all = 0.0
T_max_all = 100.0
x_lbw = list(T_min_all*pl.ones(nlayer_htes))
x_ubw = list(T_max_all*pl.ones(nlayer_htes))
#zeros_z = list(0.0*pl.ones(f_z.numel()))
#zeros_x = list(0.0*pl.ones(dxdt.numel()))
# In[ ]:
| [
"[email protected]"
] | |
6e4673accf9050042c9ffb31cffdb043d7572107 | 4e3a2a1053866ff1b3bfd5dc1a36291672d218dc | /0x05-python-exceptions/0-safe_print_list.py | 0d196d4f1229af316330cc273c0c3c422d10427d | [] | no_license | RicardoBarretoR/holbertonschool-higher_level_programming | ec423d147e7e2632fefff7e433118cf588bd43dd | 61103213102132a9764b72b4160df3fd71cce95c | refs/heads/master | 2022-12-31T10:14:01.551769 | 2020-09-25T11:58:08 | 2020-09-25T11:58:08 | 259,455,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | #!/usr/bin/python3
def safe_print_list(my_list=[], x=0):
"""prints x elements of a list"""
i = 0
try:
for i in range(x):
print(my_list[i], end="")
i += 1
print()
except IndexError:
print()
return i
| [
"[email protected]"
] | |
f28cefecd13bee7dd27faed4152428c0820398b0 | 5575ac1139172a6176c7b15542bb4d916548a0f3 | /appFinal/migrations/0001_initial.py | 5470f7b2dc77fabd6c1332bdbd2d2c48ddd071e4 | [] | no_license | vordonez74/projectFinal | 7a6b5634641a7e41887c7156d8fdfd6dd949064a | 1447c1cfc6e26d93ab7bdfc398d740fe1c47754a | refs/heads/master | 2023-06-16T10:56:55.328709 | 2021-07-08T01:51:01 | 2021-07-08T01:51:01 | 383,969,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,578 | py | # Generated by Django 3.2.5 on 2021-07-07 19:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Carrito',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('usuario', models.CharField(max_length=64)),
('listaProductos', models.CharField(max_length=64)),
('totalCarrito', models.CharField(max_length=64)),
],
),
migrations.CreateModel(
name='Categorias',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('descripcion', models.CharField(max_length=64)),
],
),
migrations.CreateModel(
name='Productos',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titulo', models.CharField(max_length=64)),
('Imagen', models.CharField(max_length=64)),
('descripcion', models.CharField(max_length=64)),
('precio', models.CharField(max_length=64)),
('Categorias', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='catogory', to='appFinal.categorias')),
],
),
]
| [
"[email protected]"
] | |
adceee9749b6eb09e03688c50cda464468776591 | b65ce61b6c87ce877f24eef2e03883011a37d682 | /src/models.py | 678a367c74dfc27a7d15b92db55c7ca5baffcba1 | [
"MIT"
] | permissive | yiyg510/DSNR | 17d0ffd364a3abc40f03f4d8bb0be61dbdfa07c1 | c352e561b9caebe11a6faccb7877176bc43a7e3f | refs/heads/master | 2022-12-05T03:04:51.258751 | 2020-08-20T08:45:22 | 2020-08-20T08:45:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,008 | py | import tensorflow as tf
import numpy as np
from tensorflow.contrib.rnn import LSTMCell, LSTMStateTuple
from tensorflow.python.ops.rnn import bidirectional_dynamic_rnn as bi_rnn
import networkx as nx
class STNE(object):
############
def encoder(self,X,layers):
for i in range(layers - 1):
name_W = 'encoder_W_' + str(i)
name_b = 'encoder_b_' + str(i)
X = tf.nn.tanh(tf.matmul(X, self.W[name_W]) + self.b[name_b])
return X
def decoder(self,X,layers):
for i in range(layers - 1):
name_W = 'decoder_W_' + str(i)
name_b = 'decoder_b_' + str(i)
X = tf.nn.tanh(tf.matmul(X, self.W[name_W]) + self.b[name_b])
return X
def make_autoencoder_loss(self,X_new,X_re):
def get_autoencoder_loss(X, newX):
return tf.reduce_sum(tf.pow((newX - X), 2))
def get_reg_loss(weights, biases):
reg = tf.add_n([tf.nn.l2_loss(w) for w in weights.values()])
reg += tf.add_n([tf.nn.l2_loss(b) for b in biases.values()])
return reg
loss_autoencoder = get_autoencoder_loss(X_new, X_re)
loss_reg = get_reg_loss(self.W, self.b)
return self.config.alpha * loss_autoencoder + self.config.reg * loss_reg
################
def construct_traget_neighbors(self, nx_G, X, mode='EMN'):
# construct target neighbor feature matrix
X_target = np.zeros(X.shape)
nodes = nx_G.nodes()
if mode == 'OWN':
# autoencoder for reconstructing itself
return X
elif mode == 'EMN':
# autoencoder for reconstructing Elementwise Median Neighbor
for node in nodes:
neighbors = list(nx_G.neighbors(node))
if len(neighbors) == 0:
X_target[node] = X[node]
else:
temp = np.array(X[node])
for n in neighbors:
# # if FLAGS.weighted:
# # # weighted sum
# # # temp = np.vstack((temp, X[n] * edgeWeight))
# # pass
# else:
temp = np.vstack((temp, X[n]))
temp = np.median(temp, axis=0)
X_target[node] = temp
return X_target
#################
def __init__(self,config, hidden_dim, nx_G, X_1,node_num, fea_dim, seq_len, attention_size,
depth=1, node_fea=None, node_fea_trainable=False):
self.node_num, self.fea_dim, self.seq_len = node_num, fea_dim, seq_len
self.attention_size = attention_size
self.nx_G = nx_G
self.X_1 = X_1
self.config = config
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
self.input_seqs = tf.placeholder(tf.int32, shape=(None, self.seq_len), name='input_seq')
print(self.input_seqs,'AZAZAZAAAAAAAAAAAAAAA')
self.dropout = tf.placeholder(tf.float32, name='dropout')
X_target = self.construct_traget_neighbors(self.nx_G, self.X_1, mode='EMN')
print(X_target, '222222222222222222222222')
X_target = tf.constant(X_target, dtype=tf.float32)
self.layers = len(config.struct)
struct = config.struct
self.W = {}
self.b = {}
# encode module
for i in range(self.layers - 1):
name_W = 'encoder_W_' + str(i)
name_b = 'encoder_b_' + str(i)
print(struct[i], '55555555555555555555')
print(struct[i + 1], '6666666666666666666')
self.W[name_W] = tf.get_variable(name_W, [struct[i], struct[i + 1]],
initializer=tf.contrib.layers.xavier_initializer())
print(self.W[name_W], 'AAAAAAAAAAAAAAAAAAAAAAA')
self.b[name_b] = tf.get_variable(name_b, [struct[i + 1]], initializer=tf.zeros_initializer())
print(self.b[name_b], 'SSSSSSSSSSSSSSSSSSSSS')
# decode module
struct.reverse()
for i in range(self.layers - 1):
name_W = 'decoder_W_' + str(i)
name_b = 'decoder_b_' + str(i)
self.W[name_W] = tf.get_variable(name_W, [struct[i], struct[i + 1]],
initializer=tf.contrib.layers.xavier_initializer())
self.b[name_b] = tf.get_variable(name_b, [struct[i + 1]], initializer=tf.zeros_initializer())
config.struct.reverse()
############## define input ###################
self.Y1 = self.encoder(self.X_1,self.layers)
self.X1_reconstruct = self.decoder(self.Y1, self.layers)
self.loss_autoencoder_1 = self.make_autoencoder_loss(X_target, self.X1_reconstruct)
input_seq_embed = tf.nn.embedding_lookup(self.Y1, self.input_seqs, name='input_embed_lookup')
print(input_seq_embed, '44444444444444444NNNNNNNNNNNNNNNNNN4')
# encoder
encoder_cell_fw_0 = tf.contrib.rnn.DropoutWrapper(LSTMCell(hidden_dim), output_keep_prob=1 - self.dropout)
encoder_cell_bw_0 = tf.contrib.rnn.DropoutWrapper(LSTMCell(hidden_dim), output_keep_prob=1 - self.dropout)
if depth == 1:
encoder_cell_fw_all = tf.contrib.rnn.MultiRNNCell([encoder_cell_fw_0])
encoder_cell_bw_all = tf.contrib.rnn.MultiRNNCell([encoder_cell_bw_0])
else:
encoder_cell_fw_1 = tf.contrib.rnn.DropoutWrapper(LSTMCell(hidden_dim), output_keep_prob=1 - self.dropout)
encoder_cell_bw_1 = tf.contrib.rnn.DropoutWrapper(LSTMCell(hidden_dim), output_keep_prob=1 - self.dropout)
encoder_cell_fw_all = tf.contrib.rnn.MultiRNNCell([encoder_cell_fw_0] + [encoder_cell_fw_1] * (depth - 1))
encoder_cell_bw_all = tf.contrib.rnn.MultiRNNCell([encoder_cell_bw_0] + [encoder_cell_bw_1] * (depth - 1))
encoder_outputs, encoder_final = bi_rnn(encoder_cell_fw_all, encoder_cell_bw_all, inputs=input_seq_embed,
dtype=tf.float32)
######################Attention
self.encoder_output = tf.concat(encoder_outputs, 2)
input_shape = self.encoder_output.shape # (batch_size, sequence_length, hidden_size)
sequence_size = input_shape[1].value
hidden_size = input_shape[2].value
attention_w = tf.Variable(tf.truncated_normal([hidden_size, self.attention_size], stddev=0.1),
name='attention_w')
attention_b = tf.Variable(tf.constant(0.1, shape=[self.attention_size]), name='attention_b')
attention_u = tf.Variable(tf.truncated_normal([self.attention_size], stddev=0.1), name='attention_u')
z_list = []
for t in range(sequence_size):
u_t = tf.tanh(tf.matmul(self.encoder_output[:, t, :], attention_w) + tf.reshape(attention_b, [1, -1]))
z_t = tf.matmul(u_t, tf.reshape(attention_u, [-1, 1]))
z_list.append(z_t)
# Transform to batch_size * sequence_size
attention_z = tf.concat(z_list, axis=1)
alpha = tf.nn.softmax(attention_z)
# print((self.encoder_output * tf.reshape(self.alpha, [-1, sequence_size, 1])), '++++++++++++++++++++++')
attention_output = self.encoder_output * tf.reshape(alpha, [-1, sequence_size, 1])
final_output = tf.nn.dropout(attention_output, self.keep_prob)
######################
c_fw_list, h_fw_list, c_bw_list, h_bw_list = [], [], [], []
for d in range(depth):
(c_fw, h_fw) = encoder_final[0][d]
(c_bw, h_bw) = encoder_final[1][d]
c_fw_list.append(c_fw)
h_fw_list.append(h_fw)
c_bw_list.append(c_bw)
h_bw_list.append(h_bw)
decoder_init_state = tf.concat(c_fw_list + c_bw_list, axis=-1), tf.concat(h_fw_list + h_bw_list, axis=-1)
decoder_cell = tf.contrib.rnn.DropoutWrapper(LSTMCell(hidden_dim * 2), output_keep_prob=1 - self.dropout)
decoder_init_state = LSTMStateTuple(
tf.layers.dense(decoder_init_state[0], units=hidden_dim * 2, activation=None),
tf.layers.dense(decoder_init_state[1], units=hidden_dim * 2, activation=None))
encoder_output_T = tf.transpose(final_output, [1, 0, 2])
new_state = decoder_init_state
outputs_list = []
for i in range(seq_len):
new_output, new_state = decoder_cell(tf.zeros(shape=tf.shape(encoder_output_T)[1:]), new_state) # None
outputs_list.append(new_output)
decoder_outputs = tf.stack(outputs_list, axis=0) # seq_len * batch_size * hidden_dim
decoder_outputs = tf.transpose(decoder_outputs, [1, 0, 2]) # batch_size * seq_len * hidden_dim
self.decoder_outputs = decoder_outputs
output_preds = tf.layers.dense(decoder_outputs, units=self.node_num, activation=None)
loss_ce = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.input_seqs, logits=output_preds)
self.loss_ce = tf.reduce_mean(loss_ce, name='loss_ce')
self.toll_loss=config.alpha*self.loss_ce+config.reg*self.loss_autoencoder_1
self.train_op = tf.train.RMSPropOptimizer(config.sg_learning_rate).minimize(self.toll_loss)
class STNEConv(object):
def conv_pool(self, in_tensor, filter_size, num_filters, s_length, embedding_size=256):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, embedding_size, 1, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
# None, seq_len, word_dim, 1
conv = tf.nn.conv2d(in_tensor, W, strides=[1, 1, 1, 1], padding="VALID", name="conv")
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool(
h,
ksize=[1, s_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
# print(pooled)
return pooled # None, 1, 1, num_filters
def __init__(self, hidden_dim, node_num, fea_dim, seq_len, contnt_len, num_filters, word_dim,
vocab_size, attention_size, depth=1, filter_sizes=[2, 4, 8]):
self.node_num, self.fea_dim = node_num, fea_dim
self.attention_size = attention_size
self.seq_len = seq_len
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
self.input_seqs = tf.placeholder(tf.int32, shape=(None, self.seq_len), name='input_seq')
self.input_seq_content = tf.placeholder(tf.int32, shape=(None, self.seq_len, contnt_len),
name='input_seq_content')
self.dropout_rnn = tf.placeholder(tf.float32, name='dropout_rnn')
self.dropout_word = tf.placeholder(tf.float32, name='dropout_word')
self.word_embeds_W = tf.Variable(initial_value=tf.random_uniform(shape=(vocab_size, word_dim)),
name='content_embed', trainable=True)
contnt_embeds = tf.nn.embedding_lookup(self.word_embeds_W, self.input_seq_content, name='input_content_embed')
contnt_embeds = tf.reshape(contnt_embeds, [-1, contnt_len, word_dim, 1])
print(contnt_embeds, '1111111111111111111111')
pooled = []
for fsize in filter_sizes:
# batch*seq_len, 1, num_filters
tmp = self.conv_pool(contnt_embeds, fsize, num_filters, contnt_len, word_dim)
pooled.append(tf.reshape(tmp, [-1, self.seq_len, num_filters]))
input_seq_embed = tf.concat(pooled, axis=-1) # batch, seq_len, num_filters*len(filter_sizes)
input_seq_embed = tf.nn.dropout(input_seq_embed, keep_prob=1 - self.dropout_word)
print(input_seq_embed, '22222222222222222222222')
encoder_cell_fw_0 = tf.contrib.rnn.DropoutWrapper(LSTMCell(hidden_dim), output_keep_prob=1 - self.dropout_rnn)
encoder_cell_bw_0 = tf.contrib.rnn.DropoutWrapper(LSTMCell(hidden_dim), output_keep_prob=1 - self.dropout_rnn)
if depth == 1:
encoder_cell_fw_all = tf.contrib.rnn.MultiRNNCell([encoder_cell_fw_0])
encoder_cell_bw_all = tf.contrib.rnn.MultiRNNCell([encoder_cell_bw_0])
else:
encoder_cell_fw_1 = tf.contrib.rnn.DropoutWrapper(LSTMCell(hidden_dim),
output_keep_prob=1 - self.dropout_rnn)
encoder_cell_bw_1 = tf.contrib.rnn.DropoutWrapper(LSTMCell(hidden_dim),
output_keep_prob=1 - self.dropout_rnn)
encoder_cell_fw_all = tf.contrib.rnn.MultiRNNCell([encoder_cell_fw_0] + [encoder_cell_fw_1] * (depth - 1))
encoder_cell_bw_all = tf.contrib.rnn.MultiRNNCell([encoder_cell_bw_0] + [encoder_cell_bw_1] * (depth - 1))
encoder_outputs, encoder_final = bi_rnn(encoder_cell_fw_all, encoder_cell_bw_all, inputs=input_seq_embed,
dtype=tf.float32)
############################
self.encoder_output = tf.concat(encoder_outputs, 2)
input_shape = self.encoder_output.shape # (batch_size, sequence_length, hidden_size)
sequence_size = input_shape[1].value
hidden_size = input_shape[2].value
attention_w = tf.Variable(tf.truncated_normal([hidden_size, self.attention_size], stddev=0.1),
name='attention_w')
attention_b = tf.Variable(tf.constant(0.1, shape=[self.attention_size]), name='attention_b')
attention_u = tf.Variable(tf.truncated_normal([self.attention_size], stddev=0.1), name='attention_u')
z_list = []
for t in range(sequence_size):
u_t = tf.tanh(tf.matmul(self.encoder_output[:, t, :], attention_w) + tf.reshape(attention_b, [1, -1]))
z_t = tf.matmul(u_t, tf.reshape(attention_u, [-1, 1]))
z_list.append(z_t)
# Transform to batch_size * sequence_size
attention_z = tf.concat(z_list, axis=1)
self.alpha = tf.nn.softmax(attention_z)
# print((self.encoder_output * tf.reshape(self.alpha, [-1, sequence_size, 1])), '++++++++++++++++++++++')
attention_output = self.encoder_output * tf.reshape(self.alpha, [-1, sequence_size, 1])
self.final_output = tf.nn.dropout(attention_output, self.keep_prob)
####################################
c_fw_list, h_fw_list, c_bw_list, h_bw_list = [], [], [], []
for d in range(depth):
(c_fw, h_fw) = encoder_final[0][d]
(c_bw, h_bw) = encoder_final[1][d]
c_fw_list.append(c_fw)
h_fw_list.append(h_fw)
c_bw_list.append(c_bw)
h_bw_list.append(h_bw)
decoder_init_state = tf.concat(c_fw_list + c_bw_list, axis=-1), tf.concat(h_fw_list + h_bw_list, axis=-1)
decoder_cell = tf.contrib.rnn.DropoutWrapper(LSTMCell(hidden_dim * 2), output_keep_prob=1 - self.dropout_rnn)
decoder_init_state = LSTMStateTuple(
tf.layers.dense(decoder_init_state[0], units=hidden_dim * 2, activation=None),
tf.layers.dense(decoder_init_state[1], units=hidden_dim * 2, activation=None))
# self.encoder_output = tf.concat(encoder_outputs, axis=-1)
# encoder_output_T = tf.transpose(self.encoder_output, [1, 0, 2]) # h
encoder_output_T = tf.transpose(self.final_output, [1, 0, 2])
new_state = decoder_init_state
outputs_list = []
for i in range(seq_len):
new_output, new_state = decoder_cell(tf.zeros(shape=tf.shape(encoder_output_T)[1:]), new_state) # None
outputs_list.append(new_output)
decoder_outputs = tf.stack(outputs_list, axis=0) # seq_len * batch_size * hidden_dim
decoder_outputs = tf.transpose(decoder_outputs, [1, 0, 2]) # batch_size * seq_len * hidden_dim
###########################
input_shape = decoder_outputs.shape # (batch_size, sequence_length, hidden_size)
sequence_size = input_shape[1].value
hidden_size = input_shape[2].value
attention_w = tf.Variable(tf.truncated_normal([hidden_size, self.attention_size], stddev=0.1),
name='attention_w')
attention_b = tf.Variable(tf.constant(0.1, shape=[self.attention_size]), name='attention_b')
attention_u = tf.Variable(tf.truncated_normal([self.attention_size], stddev=0.1), name='attention_u')
z_list = []
for t in range(sequence_size):
u_t = tf.tanh(tf.matmul(decoder_outputs[:, t, :], attention_w) + tf.reshape(attention_b, [1, -1]))
z_t = tf.matmul(u_t, tf.reshape(attention_u, [-1, 1]))
z_list.append(z_t)
# Transform to batch_size * sequence_size
attention_z = tf.concat(z_list, axis=1)
self.alpha = tf.nn.softmax(attention_z)
# print((self.encoder_output * tf.reshape(self.alpha, [-1, sequence_size, 1])), '++++++++++++++++++++++')
attention_output = decoder_outputs * tf.reshape(self.alpha, [-1, sequence_size, 1])
final_output_1 = tf.nn.dropout(attention_output, self.keep_prob)
#####################
self.decoder_output = final_output_1
# decoder_outputs, _ = dynamic_rnn(decoder_cell, inputs=self.encoder_output, initial_state=decoder_init_state)
output_preds = tf.layers.dense(final_output_1, units=self.node_num, activation=None)
print(self.input_seqs, '@@@@@@@@@@@@@@@@@@@@')
print(output_preds, '$$$$$$$$$$$$$$$$$$$$')
loss_ce = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.input_seqs, logits=output_preds)
self.loss_ce = tf.reduce_mean(loss_ce, name='loss_ce')
self.global_step = tf.Variable(1, name="global_step", trainable=False)
| [
"[email protected]"
] | |
bd15d3f0a4cbeb2de64c1293f83467585556f5bd | 854082acbfb37f089894820ccdc722159db1503e | /pra/texwrap_fill.py | dc74e98ebd8709d92f4dd5f275a2eec1290acaa1 | [] | no_license | weekenlee/pythoncode | f9ddaada6c75e152a0be910600bb2dd27c539438 | de8f882c612947ca69f1c9ca8aa5159799ff3a56 | refs/heads/master | 2021-06-29T17:58:16.813382 | 2020-09-15T14:32:02 | 2020-09-15T14:32:02 | 139,693,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | #-*- coding:utf-8 -*-
import textwrap
sample_text = u'''helloworld hel 你好 lodjfjafhasdhfhsd;fja;ljsdflajdflajsdklfja;lsjflk;ajdfl;jkal;dfjl;jdadflk;kjakfdljajfk;ajflakjf;ajkfajfa;jfkdsjfskajf;ajdfj;akfj
'''
print(textwrap.fill(sample_text, 18))
print(textwrap.dedent(sample_text))
| [
"[email protected]"
] | |
23f642792dce9f9470a6cbd1725e8f3806b79997 | 63a0da0d4b8a607fcf761566dc57c755b6757f6d | /counting_sort.py | 3784de74c6384131068576f87d08bf1cf9ba31be | [] | no_license | sharvani198/alg-lang | 3d28d1482c930ece1de993113b82881086a1e8e7 | 803fafe97b0ea780e14a749e1bdda496b051fd24 | refs/heads/master | 2021-09-01T10:42:59.132309 | 2017-12-26T15:11:54 | 2017-12-26T15:11:54 | 115,417,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py | def counting_sort(a, k):
c = [0]*(k+1)
b = [0]*(len(a)+1)
for i in range(0, len(a)):
c[a[i]] = c[a[i]]+1
for i in range(1,k+1):
c[i]=c[i]+c[i-1]
for i in range(len(a)-1,-1,-1):
b[c[a[i]]] = a[i]
c[a[i]]-=1
return b[1:]
# arr = [2, 5, 3, 0, 2, 3, 0, 3]
# brr = counting_sort(arr, 5)
# print brr | [
"[email protected]"
] | |
addaa3a239c37f452e9fe35c962b89e5bc2b4099 | bc0d2a64097bffbd1b718e0b6620206068620efe | /Design_Patterns/Creational/Singleton/Singleton.py | f1350ba8456b6a648f65ad09d5cf8ce2489dff8f | [] | no_license | lonesloane/Python-Snippets | 8ab7e502d8df33f07d4a46a9e3f57a1053325690 | bc596d7be03c97782036ce8199aaf97dfc8bf0e2 | refs/heads/master | 2021-01-10T08:13:43.723208 | 2019-09-14T09:56:45 | 2019-09-14T09:56:45 | 49,678,474 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | class Singleton:
__instance = None
def __new__(cls, val=None):
if Singleton.__instance is None:
Singleton.__instance = object.__new__(cls)
Singleton.__instance.val = val
return Singleton.__instance
# === Usage ===
x = Singleton()
x.val = 'burger'
print('x val: %s ' % x.val)
y = Singleton()
y.val = 'chips'
print('y val: %s ' % y.val)
print('x val: %s ' % x.val)
print('x == y ?')
print(x == y)
| [
"Nyarlatothep72"
] | Nyarlatothep72 |
840b8e9a17f3feac6bbf5fba76255e63dbd08862 | 0a14f1ede647d819b23fd7aecde054d5724342e6 | /Whiteboard/longest_increasing.py | a64d153963c0c690981fd6475189dbc08a9bdc21 | [] | no_license | jenihuang/Coding_problems | f1982dc3461cefe4ae2d1720cc84fcbb4c191815 | 136f685466911971c42604e11146ec55a8313371 | refs/heads/master | 2020-04-28T18:51:10.814217 | 2019-03-23T04:13:44 | 2019-03-23T04:13:44 | 175,491,775 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 703 | py | import unittest
def longest_inc(nums):
biggest = 0
count = 1
for i in range(len(nums) - 1):
if nums[i + 1] > nums[i]:
count += 1
else:
if count > biggest:
biggest = count
count = 1
if count > biggest:
biggest = count
return biggest
class Test(unittest.TestCase):
data = [
([0, 1, 2], 3),
([0, 2, 1, 3, 5, 10], 4),
([-6, 2, 8, -2, 0, -1], 3)
]
def test_compress_string(self):
for [test_case, expected] in self.data:
result = longest_inc(test_case)
self.assertEqual(result, expected)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
483255496643c01322415690e91adaeb614d1348 | 81e0867d2bdf9b1ea4c59d420a7ac8b563161a9e | /POO/exercises/tv_mini_project/main.py | 8491cbee14c7bf910b578ed9cbb5e215e4b62f44 | [] | no_license | Jeremias333/python_startlatam | 0e7313578d5370a24ecf2ff71b742161f3488cb3 | 922a775d0bff5cb3b24a7e9a3ae0c8d8af5f0e90 | refs/heads/master | 2022-11-27T19:31:43.278467 | 2020-08-04T14:01:54 | 2020-08-04T14:01:54 | 271,123,557 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | from tv_model import TV
import os
def main():
os.system('cls' if os.name == 'nt' else 'clear')
#criação do objeto tv
list_volume = list()
list_channel = list()
list_volume.append(1)
list_volume.append(10)
list_channel.append(2)
list_channel.append(13)
tv = TV(list_channel, list_volume)
_pass = True
while(_pass == True):
menu(tv)
print("repetiu")
def menu(tv: TV):
os.system('cls' if os.name == 'nt' else 'clear')
print(f"Aviso: {tv.get_warn()}")
if(tv.get_state() == True):
print(f"Volume atual: {tv.get_volume()}")
print(f"Canal atual: {tv.get_channel()}")
option = int(input("\n 1 - Volume + \n 2 - Volume - \n 3 - Canal + \n 4 - Canal - \n 0 - Desligar \n"))
tv.set_warn()
if(option == 1):
tv.volume_more()
if(option == 2):
tv.volume_minum()
if(option == 3):
tv.channel_more()
if(option == 4):
tv.channel.minum()
if(option == 0):
tv.off_tv()
else:
os.system('cls' if os.name == 'nt' else 'clear')
print("Opção inválida.")
else:
os.system('cls' if os.name == 'nt' else 'clear')
option = int(input("\n 1 - Ligar TV \n"))
if(option == 1):
tv.on_tv()
main() | [
"[email protected]"
] | |
fe1e009d1cdaca2b9e6bebb73913ca819e8eb8c3 | c1551488f58893d38221c692208ee07af7d30d09 | /blaze/evaluator/simulator/request_queue.py | d9689d414f32124170f3e7441d1821917ac27b91 | [
"MIT"
] | permissive | muralisr/blaze | 3df2a73fea7eb9834a9689e21352504c83a7de5b | e0d183af441cfe63ba1346cd0d6d8ac76ff494ca | refs/heads/master | 2020-09-12T18:58:16.498218 | 2020-04-10T06:16:50 | 2020-04-10T06:16:50 | 215,646,920 | 0 | 0 | MIT | 2019-10-16T21:22:07 | 2019-10-16T21:22:06 | null | UTF-8 | Python | false | false | 9,496 | py | """
This module defines some helper classes as data structures for the Simulator and RequestQueue.
It also defines the RequestQueue, which simulates the network link.
"""
import copy
from collections import defaultdict
from typing import DefaultDict, Dict, List, NamedTuple, Optional, Set, Tuple
from blaze.config.environment import Resource
from blaze.preprocess.url import Url
from .tcp_state import TCPState
class Node(NamedTuple):
""" A Node in the Simulator graph """
resource: Resource
priority: int
children: List["Node"] = []
parent: Optional["Node"] = None
def __hash__(self):
return hash(self.resource.url)
def __eq__(self, other: "Node"):
return self.resource == other.resource
class QueueItem:
""" An item in the RequestQueue """
def __init__(self, node: Node, size: int, origin: str, delay_ms: float = 0):
self.node = node
self.bytes_left = size
self.origin = origin
self.delay_ms_left = delay_ms
self.time_spent_downloading = 0
class RequestQueue:
"""
RequestQueue simulates ongoing network requests and the amount of time it would
take to complete them.
"""
def __init__(self, bandwidth_kbps: int, rtt_latency_ms: int, loss_prop: float):
self.queue: List[QueueItem] = []
self.delayed: List[QueueItem] = []
self.connected_origins: Set[str] = set()
self.node_to_queue_item_map: Dict[Node, QueueItem] = {}
# convert kilobits per second (kbps) to bytes per second (Bps)
self.link_bandwidth_bps = bandwidth_kbps * (1000 / 8)
self.bandwidth_kbps = bandwidth_kbps
self.rtt_latency_ms = rtt_latency_ms
self.loss_prop = loss_prop
# model TCP dynamics per domain
self.tcp_state: DefaultDict[TCPState] = defaultdict(lambda: TCPState(loss_prop=self.loss_prop))
def __contains__(self, node: Node):
"""
:return: True if the given node is already scheduled for download
"""
return any(qi.node == node for qi in self.queue) or any(qi.node == node for qi in self.delayed)
def __len__(self):
return len(self.queue) + len(self.delayed)
def copy(self) -> "RequestQueue":
"""
:return: a copy of the request queue
"""
rq = RequestQueue(self.bandwidth_kbps, self.rtt_latency_ms, self.loss_prop)
rq.queue = [copy.copy(qi) for qi in self.queue]
rq.delayed = [copy.copy(qi) for qi in self.delayed]
rq.node_to_queue_item_map = {
**{node: copy.copy(qi) for (node, qi) in self.node_to_queue_item_map.items()},
**{qi.node: qi for qi in rq.queue},
**{qi.node: qi for qi in rq.delayed},
}
rq.connected_origins = set(self.connected_origins)
rq.tcp_state = copy.deepcopy(self.tcp_state)
return rq
@property
def bandwidth(self):
"""
Calculates the bandwidth available to each currently-ongoing request. This is
calculated as the total link bandwidth split evenly amongst all of the currently-
downloading files, but could be made more sophisticated by taking into account
per-domain bandwidth limits.
"""
return self.link_bandwidth_bps / (len(self.queue) or 1)
def add(self, node: Node):
""" Adds an item to the queue for immediate download """
self.add_with_delay(node, 0)
def remove(self, node: Node):
"""
Removes the given node from the request queue
:param node: the node to remove
"""
self.queue = [qi for qi in self.queue if qi.node != node]
self.delayed = [qi for qi in self.delayed if qi.node != node]
def add_with_delay(self, node: Node, delay_ms: float, cached: bool = False):
"""
Adds an item to the queue but does not start it until the delay has occurred. Additionally,
this method checks to see if a connection has been opened for the resource's origin. If not,
it adds 2-RTT delay for the resource.
:param node: The node to add to the request queue
:param delay_ms: The milliseconds to delay the request before starting it (not including RTT)
:param cached: Specifies if the given resource is cached and does not need to be downloaded
"""
domain = Url.parse(node.resource.url).domain
if cached:
delay_ms = max(0.0, delay_ms)
queue_item = QueueItem(node, 0, domain, delay_ms)
else:
num_rtts = self.tcp_state[domain].round_trips_needed_for_bytes(node.resource.size)
if domain not in self.connected_origins:
num_rtts += 1
delay_ms = max(0.0, delay_ms + (num_rtts * self.rtt_latency_ms))
queue_item = QueueItem(node, node.resource.size, domain, delay_ms)
if delay_ms <= 0:
self.queue.append(queue_item)
else:
self.delayed.append(queue_item)
self.node_to_queue_item_map[node] = queue_item
def estimated_completion_time(self, node: Node) -> Tuple[float, float]:
"""
Runs through a copy of the request queue and returns the relative time offset
at which the given node would have completed.
:param node: The node to estimate the completion time of
:return: 0 if the node is not in the request queue; the relative time offset
of completion otherwise
"""
if node not in self:
return 0, 0
rq = self.copy()
total_time = 0
completed_nodes, step_ms = [], 0
while rq and node not in completed_nodes:
completed_nodes, step_ms = rq.step()
total_time += step_ms
return total_time, rq.time_spent_downloading(node)
def time_spent_downloading(self, node: Node) -> float:
"""
Returns the ms spent downloading the given node. It returns 0 if the node is not in the queue,
has not been scheduled to download, or has not downloaded any bytes yet
:param node: The node to get the time spent downloading for
"""
if node not in self.node_to_queue_item_map:
return 0
return self.node_to_queue_item_map[node].time_spent_downloading
def remaining_delay(self, node: Node) -> float:
"""
Returns the delay ms left for a node before it starts downloading
"""
if node not in self.node_to_queue_item_map:
return 0
return self.node_to_queue_item_map[node].delay_ms_left
def step(self) -> Tuple[List[Node], float]:
"""
Performs one step through of the request queue, which simulates downloading until
one item finishes downloading (or more, if they finish at the same time). The method
then removes the finished downloads from the request queue and reduces the number
of bytes left to download for the remaining items correspondingly
:return: a tuple where the first value is a list of simulator Nodes that finished
downloading in this step; the second value is the time in milliseconds it took to
download those items in this step
"""
# check if the queue is empty
if not self.queue and not self.delayed:
return [], 0.0
# find the item with the least number of bytes left to download
if self.queue:
bytes_to_download = min(qi.bytes_left for qi in self.queue)
time_ms_to_download = 1000 * bytes_to_download / self.bandwidth
# OR, if the queue is empty, find the next delayed item to enqueue
else:
time_ms_to_download = min(qi.delay_ms_left for qi in self.delayed)
bytes_to_download = (time_ms_to_download * self.bandwidth) / 1000
# Reduce all delayed items by time_ms_to_download
for item in self.delayed:
item.delay_ms_left -= time_ms_to_download
# Reduce all queue elements by bytes_to_download
for item in self.queue:
item.bytes_left -= bytes_to_download
item.time_spent_downloading += time_ms_to_download
# Update the idle time for each TCP state
domains_downloaded_from = set(item.origin for item in self.queue)
for domain, tcp_state in self.tcp_state.items():
if domain in domains_downloaded_from:
tcp_state.add_bytes_sent(bytes_to_download)
else:
tcp_state.add_time_since_last_byte(time_ms_to_download)
# Find all delayed items that are ready to be queued
delayed_items_to_queue = [qi for qi in self.delayed if qi.delay_ms_left < 0.01]
# Find all queued items that have been completed and are ready for removal
completed_nodes = [qi.node for qi in self.queue if qi.bytes_left == 0]
# add origins for newly-queued items
for item in delayed_items_to_queue:
self.connected_origins.add(item.origin)
# update the delayed queue, removing items ready to be queued
self.delayed = [qi for qi in self.delayed if qi.delay_ms_left >= 0.01]
# update the queue, removing items that are done and adding delayed items ready to be queued
self.queue = [qi for qi in self.queue if qi.bytes_left > 0] + delayed_items_to_queue
# return nodes that finished downloading and the total time took in this step
return completed_nodes, time_ms_to_download
| [
"[email protected]"
] | |
66278b5a3570a1dcb7de95a66ba33ae891c66cd1 | 674fadca516b1711294030d002ffde00053a84cf | /Flash/losses/losses.py | ccd9f60c0e045b33acbb87f180c9a16de80d1fe2 | [] | no_license | parth1620/Flash-trainer | 9b9bb616d61ce5452e67ee7f0b0b2f0494bab1da | 5ea43128b71c56c8e9ea850d950bf76ecc743697 | refs/heads/master | 2023-08-05T23:48:38.347116 | 2021-10-11T14:33:05 | 2021-10-11T14:33:05 | 415,963,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
'''
no losses are present at this moment.
'''
class LabelSmoothingForBCE(nn.Module):
def __init__(self, smoothing = 0.09):
super(LabelSmoothingForBCE, self).__init__()
self.smoothing = smoothing
def forward(self,logits, labels):
labels[labels == 1] = 1 - self.smoothing
labels[labels == 0] = self.smoothing
return F.binary_cross_entropy_with_logits(logits, labels)
| [
"[email protected]"
] | |
a7e5f8c54d4ceab61f36ff05c961d5ff8c49e137 | 94ca446c0f17d640f45941fa7c83530ef2fbc099 | /wrs-remote-clients-2.0.2/python-openstackclient-3.12.0/openstackclient/compute/v2/service.py | b5b9bd5e1e2ca06a11fa24ccba389fd67680455a | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | rmoorewrs/tic-windows-remote-clients | c1c2b8924e90ffd2951571bc098ec9873ffd3988 | ae16ee78a720852304d79f8b86dfe44e920cc72d | refs/heads/master | 2023-05-25T13:55:55.603100 | 2019-05-31T20:59:28 | 2019-05-31T20:59:28 | 189,649,925 | 0 | 0 | NOASSERTION | 2023-05-22T20:43:59 | 2019-05-31T19:46:28 | Python | UTF-8 | Python | false | false | 6,968 | py | # Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Service action implementations"""
import logging
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.i18n import _
LOG = logging.getLogger(__name__)
class DeleteService(command.Command):
_description = _("Delete compute service(s)")
def get_parser(self, prog_name):
parser = super(DeleteService, self).get_parser(prog_name)
parser.add_argument(
"service",
metavar="<service>",
nargs='+',
help=_("Compute service(s) to delete (ID only)")
)
return parser
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
result = 0
for s in parsed_args.service:
try:
compute_client.services.delete(s)
except Exception as e:
result += 1
LOG.error(_("Failed to delete compute service with "
"ID '%(service)s': %(e)s"), {'service': s, 'e': e})
if result > 0:
total = len(parsed_args.service)
msg = (_("%(result)s of %(total)s compute services failed "
"to delete.") % {'result': result, 'total': total})
raise exceptions.CommandError(msg)
class ListService(command.Lister):
_description = _("List compute services")
def get_parser(self, prog_name):
parser = super(ListService, self).get_parser(prog_name)
parser.add_argument(
"--host",
metavar="<host>",
help=_("List services on specified host (name only)")
)
parser.add_argument(
"--service",
metavar="<service>",
help=_("List only specified service (name only)")
)
parser.add_argument(
"--long",
action="store_true",
default=False,
help=_("List additional fields in output")
)
return parser
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
if parsed_args.long:
columns = (
"ID",
"Binary",
"Host",
"Zone",
"Status",
"State",
"Updated At",
"Disabled Reason"
)
else:
columns = (
"ID",
"Binary",
"Host",
"Zone",
"Status",
"State",
"Updated At"
)
data = compute_client.services.list(parsed_args.host,
parsed_args.service)
return (columns,
(utils.get_item_properties(
s, columns,
) for s in data))
class SetService(command.Command):
_description = _("Set compute service properties")
def get_parser(self, prog_name):
parser = super(SetService, self).get_parser(prog_name)
parser.add_argument(
"host",
metavar="<host>",
help=_("Name of host")
)
parser.add_argument(
"service",
metavar="<service>",
help=_("Name of service (Binary name)")
)
enabled_group = parser.add_mutually_exclusive_group()
enabled_group.add_argument(
"--enable",
action="store_true",
help=_("Enable service")
)
enabled_group.add_argument(
"--disable",
action="store_true",
help=_("Disable service")
)
parser.add_argument(
"--disable-reason",
default=None,
metavar="<reason>",
help=_("Reason for disabling the service (in quotas). "
"Should be used with --disable option.")
)
up_down_group = parser.add_mutually_exclusive_group()
up_down_group.add_argument(
'--up',
action='store_true',
help=_('Force up service'),
)
up_down_group.add_argument(
'--down',
action='store_true',
help=_('Force down service'),
)
return parser
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
cs = compute_client.services
if (parsed_args.enable or not parsed_args.disable) and \
parsed_args.disable_reason:
msg = _("Cannot specify option --disable-reason without "
"--disable specified.")
raise exceptions.CommandError(msg)
result = 0
enabled = None
try:
if parsed_args.enable:
enabled = True
if parsed_args.disable:
enabled = False
if enabled is not None:
if enabled:
cs.enable(parsed_args.host, parsed_args.service)
else:
if parsed_args.disable_reason:
cs.disable_log_reason(parsed_args.host,
parsed_args.service,
parsed_args.disable_reason)
else:
cs.disable(parsed_args.host, parsed_args.service)
except Exception:
status = "enabled" if enabled else "disabled"
LOG.error("Failed to set service status to %s", status)
result += 1
force_down = None
try:
if parsed_args.down:
force_down = True
if parsed_args.up:
force_down = False
if force_down is not None:
cs.force_down(parsed_args.host, parsed_args.service,
force_down=force_down)
except Exception:
state = "down" if force_down else "up"
LOG.error("Failed to set service state to %s", state)
result += 1
if result > 0:
msg = _("Compute service %(service)s of host %(host)s failed to "
"set.") % {"service": parsed_args.service,
"host": parsed_args.host}
raise exceptions.CommandError(msg)
| [
"[email protected]"
] | |
228acccc4ecfe1b296b5b1504076f808619c76aa | ffc2aec4424ecba3963d7f8b56333796b2a7c07e | /plugins/youtube.py | 1e12164a8b9b5fbe3474b1cc64e807892e4dde26 | [] | no_license | adrian17/inzbot | c73d4635cdf0231d5bcabfeff7fe38fec33c479d | 47981450bbc4af83bcc2f9178fdbcf8e1df09a17 | refs/heads/master | 2021-01-18T16:35:55.582410 | 2018-02-15T18:47:09 | 2018-03-09T22:33:49 | 29,621,285 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,388 | py | from plugin_base import *
import logging
import isodate
import requests
def color(message):
return "\x033" + message + "\x03 "
class YoutubePlugin(Plugin):
"""Prints data about a YouTube video from its link."""
@on_pubmsg
@priority(80)
@pattern(R"youtube\.com\/watch\?v=(?P<id>[-\w]+)")
@pattern(R"youtube\.com\/v\/(?P<id>[-\w]+)")
@pattern(R"youtu\.be\/(?P<id>[-\w]+)")
def handle_line(self, bot, event):
video_id = event.match.group("id")
params = {
"id": video_id,
"key": bot.google_api_key,
"part": "snippet,contentDetails",
"fields": "items(snippet(title,channelTitle),contentDetails(duration))"
}
try:
json = requests.get("https://www.googleapis.com/youtube/v3/videos", params=params).json()
video_title = json["items"][0]["snippet"]["title"].strip()
video_author = json["items"][0]["snippet"]["channelTitle"].strip()
video_len = json["items"][0]["contentDetails"]["duration"]
duration = isodate.parse_duration(video_len)
except Exception:
logging.exception("")
bot.message("possible error, check it", target="adrian17")
return False
bot.message(color("↳ ") + "|{}| by {}, len: {}".format(video_title, video_author, str(duration)))
return True | [
"[email protected]"
] | |
1dfa5e81c5286303ec01df6f942e3e040df4ca14 | 64d16fbfaa6061add61ba82ec7428fde79ce07cb | /devel/lib/python2.7/dist-packages/uuv_control_msgs/srv/_AddWaypoint.py | ad3a65813b860a4bba63d8ceafee242a1e77b06d | [] | no_license | bnb15/clearpath_ros | 519da6013eff041896695416a5411b1ba07b1532 | 2ec03826bf44e17def364784636838a1b3fc64fe | refs/heads/master | 2020-05-31T21:35:26.359260 | 2019-06-06T02:15:56 | 2019-06-06T02:15:56 | 190,498,514 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 18,688 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from uuv_control_msgs/AddWaypointRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
import uuv_control_msgs.msg
import std_msgs.msg
class AddWaypointRequest(genpy.Message):
_md5sum = "3a004c7bf8d1b045f54b4f0d0d7256f0"
_type = "uuv_control_msgs/AddWaypointRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """
uuv_control_msgs/Waypoint waypoint
================================================================================
MSG: uuv_control_msgs/Waypoint
# Copyright (c) 2016 The UUV Simulator Authors.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
std_msgs/Header header
geometry_msgs/Point point
float64 max_forward_speed
float64 heading_offset
bool use_fixed_heading
float64 radius_of_acceptance
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
"""
__slots__ = ['waypoint']
_slot_types = ['uuv_control_msgs/Waypoint']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
waypoint
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(AddWaypointRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.waypoint is None:
self.waypoint = uuv_control_msgs.msg.Waypoint()
else:
self.waypoint = uuv_control_msgs.msg.Waypoint()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.waypoint.header.seq, _x.waypoint.header.stamp.secs, _x.waypoint.header.stamp.nsecs))
_x = self.waypoint.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_5dBd().pack(_x.waypoint.point.x, _x.waypoint.point.y, _x.waypoint.point.z, _x.waypoint.max_forward_speed, _x.waypoint.heading_offset, _x.waypoint.use_fixed_heading, _x.waypoint.radius_of_acceptance))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.waypoint is None:
self.waypoint = uuv_control_msgs.msg.Waypoint()
end = 0
_x = self
start = end
end += 12
(_x.waypoint.header.seq, _x.waypoint.header.stamp.secs, _x.waypoint.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.waypoint.header.frame_id = str[start:end].decode('utf-8')
else:
self.waypoint.header.frame_id = str[start:end]
_x = self
start = end
end += 49
(_x.waypoint.point.x, _x.waypoint.point.y, _x.waypoint.point.z, _x.waypoint.max_forward_speed, _x.waypoint.heading_offset, _x.waypoint.use_fixed_heading, _x.waypoint.radius_of_acceptance,) = _get_struct_5dBd().unpack(str[start:end])
self.waypoint.use_fixed_heading = bool(self.waypoint.use_fixed_heading)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.waypoint.header.seq, _x.waypoint.header.stamp.secs, _x.waypoint.header.stamp.nsecs))
_x = self.waypoint.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_5dBd().pack(_x.waypoint.point.x, _x.waypoint.point.y, _x.waypoint.point.z, _x.waypoint.max_forward_speed, _x.waypoint.heading_offset, _x.waypoint.use_fixed_heading, _x.waypoint.radius_of_acceptance))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.waypoint is None:
self.waypoint = uuv_control_msgs.msg.Waypoint()
end = 0
_x = self
start = end
end += 12
(_x.waypoint.header.seq, _x.waypoint.header.stamp.secs, _x.waypoint.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.waypoint.header.frame_id = str[start:end].decode('utf-8')
else:
self.waypoint.header.frame_id = str[start:end]
_x = self
start = end
end += 49
(_x.waypoint.point.x, _x.waypoint.point.y, _x.waypoint.point.z, _x.waypoint.max_forward_speed, _x.waypoint.heading_offset, _x.waypoint.use_fixed_heading, _x.waypoint.radius_of_acceptance,) = _get_struct_5dBd().unpack(str[start:end])
self.waypoint.use_fixed_heading = bool(self.waypoint.use_fixed_heading)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_5dBd = None
def _get_struct_5dBd():
global _struct_5dBd
if _struct_5dBd is None:
_struct_5dBd = struct.Struct("<5dBd")
return _struct_5dBd
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from uuv_control_msgs/AddWaypointResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
import uuv_control_msgs.msg
import std_msgs.msg
class AddWaypointResponse(genpy.Message):
_md5sum = "48bd8f09705ced6872f0bda693e6f08c"
_type = "uuv_control_msgs/AddWaypointResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """bool success
uuv_control_msgs/Waypoint[] waypoints
================================================================================
MSG: uuv_control_msgs/Waypoint
# Copyright (c) 2016 The UUV Simulator Authors.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
std_msgs/Header header
geometry_msgs/Point point
float64 max_forward_speed
float64 heading_offset
bool use_fixed_heading
float64 radius_of_acceptance
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
"""
__slots__ = ['success','waypoints']
_slot_types = ['bool','uuv_control_msgs/Waypoint[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
success,waypoints
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(AddWaypointResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.success is None:
self.success = False
if self.waypoints is None:
self.waypoints = []
else:
self.success = False
self.waypoints = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_get_struct_B().pack(self.success))
length = len(self.waypoints)
buff.write(_struct_I.pack(length))
for val1 in self.waypoints:
_v1 = val1.header
buff.write(_get_struct_I().pack(_v1.seq))
_v2 = _v1.stamp
_x = _v2
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v1.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_v3 = val1.point
_x = _v3
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_x = val1
buff.write(_get_struct_2dBd().pack(_x.max_forward_speed, _x.heading_offset, _x.use_fixed_heading, _x.radius_of_acceptance))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.waypoints is None:
self.waypoints = None
end = 0
start = end
end += 1
(self.success,) = _get_struct_B().unpack(str[start:end])
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.waypoints = []
for i in range(0, length):
val1 = uuv_control_msgs.msg.Waypoint()
_v4 = val1.header
start = end
end += 4
(_v4.seq,) = _get_struct_I().unpack(str[start:end])
_v5 = _v4.stamp
_x = _v5
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v4.frame_id = str[start:end].decode('utf-8')
else:
_v4.frame_id = str[start:end]
_v6 = val1.point
_x = _v6
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_x = val1
start = end
end += 25
(_x.max_forward_speed, _x.heading_offset, _x.use_fixed_heading, _x.radius_of_acceptance,) = _get_struct_2dBd().unpack(str[start:end])
val1.use_fixed_heading = bool(val1.use_fixed_heading)
self.waypoints.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_B().pack(self.success))
length = len(self.waypoints)
buff.write(_struct_I.pack(length))
for val1 in self.waypoints:
_v7 = val1.header
buff.write(_get_struct_I().pack(_v7.seq))
_v8 = _v7.stamp
_x = _v8
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v7.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_v9 = val1.point
_x = _v9
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_x = val1
buff.write(_get_struct_2dBd().pack(_x.max_forward_speed, _x.heading_offset, _x.use_fixed_heading, _x.radius_of_acceptance))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.waypoints is None:
self.waypoints = None
end = 0
start = end
end += 1
(self.success,) = _get_struct_B().unpack(str[start:end])
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.waypoints = []
for i in range(0, length):
val1 = uuv_control_msgs.msg.Waypoint()
_v10 = val1.header
start = end
end += 4
(_v10.seq,) = _get_struct_I().unpack(str[start:end])
_v11 = _v10.stamp
_x = _v11
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v10.frame_id = str[start:end].decode('utf-8')
else:
_v10.frame_id = str[start:end]
_v12 = val1.point
_x = _v12
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_x = val1
start = end
end += 25
(_x.max_forward_speed, _x.heading_offset, _x.use_fixed_heading, _x.radius_of_acceptance,) = _get_struct_2dBd().unpack(str[start:end])
val1.use_fixed_heading = bool(val1.use_fixed_heading)
self.waypoints.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
_struct_2dBd = None
def _get_struct_2dBd():
global _struct_2dBd
if _struct_2dBd is None:
_struct_2dBd = struct.Struct("<2dBd")
return _struct_2dBd
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
_struct_3d = None
def _get_struct_3d():
global _struct_3d
if _struct_3d is None:
_struct_3d = struct.Struct("<3d")
return _struct_3d
class AddWaypoint(object):
_type = 'uuv_control_msgs/AddWaypoint'
_md5sum = 'e853788769392728a6445812f447d75e'
_request_class = AddWaypointRequest
_response_class = AddWaypointResponse
| [
"[email protected]"
] | |
16a7313728d0e9233eb8ec1d62cdc8ee92db5597 | 6df57f3e07b40d02cf908959582457525c49e26d | /05 Variables/variables-numbers.py | ec71636db49962963ef3861673b366ec2de65466 | [] | no_license | Megamala/Learning-Python | 9763caaf35a04ea0bd7643fe876e262294ae765a | c144682e3b4ce96fcd252b6cce4b3e73b480e04b | refs/heads/master | 2021-01-17T10:33:12.933470 | 2016-06-13T17:49:59 | 2016-06-13T17:49:59 | 59,484,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | #! C:\Python31\python
def main():
num = round(42 / 9, 2), 42 % 9, float(42)
print(type(num), num)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
15b3b8fe1ccf80c4f1bb2fce47f283106b2b63c5 | 665b63dee0016b3f219b26e0f6e3dd860c0977a6 | /humset/utils/shdi.py | a3f658fd93031b189123bbe2bf4bffa9085bab71 | [] | no_license | ChristopherAbram/ResearchProjectAutumn2020 | 517d3a15c160b366882ac7304c5e3ed279c9051d | 4a954f774c24819e8fac0f8e32db457b22b59767 | refs/heads/master | 2023-01-30T16:46:44.029289 | 2020-12-12T00:48:19 | 2020-12-12T00:48:19 | 299,929,694 | 0 | 0 | null | 2020-11-25T16:22:33 | 2020-09-30T13:33:45 | Jupyter Notebook | UTF-8 | Python | false | false | 572 | py | import os
import numpy as np
import pandas as pd
from humset.utils.definitions import get_project_path
def get_shdi(country_iso_code='NGA'):
hdi_path = os.path.join(get_project_path(), 'data', 'shdi', 'SHDI Complete 4.0 (1).csv')
data = pd.read_csv(hdi_path, usecols=['iso_code', 'year', 'level', 'GDLCODE', 'shdi'])
data = data.set_index('iso_code', drop=False)
data = data.loc[
(data['iso_code'] == country_iso_code) &
(data['year'] == 2018) &
(data['level'] == 'Subnat')]
return np.array(data.loc[:,['GDLCODE', 'shdi']]) | [
"[email protected]"
] | |
8ebe30c87198cca38f7fa30971095fcd5e45d5d4 | fed9ad8ad28bab27a3d4fa579c5a6bf0c2b8171e | /ml_hw2/perceptron_a.py | 2ec33519b7a38f3efc12557e2dbab3d06d2e3da1 | [] | no_license | egall/Machine_Learning | 650bccd24b8ac32e07adfef5ae612d0112572603 | 616c1b150e63092aa5cba5eb931bb797096cae54 | refs/heads/master | 2021-01-19T10:26:11.157550 | 2014-11-27T02:46:59 | 2014-11-27T02:46:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,312 | py | #!/usr/bin/python
import random
import numpy as np
def preceptron_algo():
my_inst_array = generate_instances_list()
w_array = [0,0,0,0,0,0,0,0,0,0,0]
y = 0
x = 0
b = 0
mistakes = 0
for i in range(0,500):
my_inst = my_inst_array[i]
y = my_inst[0]
a = np.dot(w_array, my_inst[1])
if (y*a <= 0):
print "updating"
w_array = w_array + np.dot(y, my_inst[1])
print "w_array = ", w_array
# for j in range(0,11):
# x = my_inst[1][j]
# w = w_array[j]
# a = w*x + b
# if (y*a <= 0):
# w_array[j] += w + x*y
# mistakes += 1
def generate_instances_list():
# create an array to hold each of the 500 instance
instance_array = []
for i in range(0,500):
# list that will hold list of 11 random 0/1 values
bin_list = []
# populate bin_list
for j in range(0,11):
binnum = random.randint(0,1)
if binnum == 0:
feature = -1
else:
feature = 1
bin_list.append(feature)
new_inst = (bin_list[0], bin_list)
instance_array.append(new_inst)
return instance_array
if __name__ == "__main__":
preceptron_algo()
| [
"[email protected]"
] | |
356b4e875d79d72486affcd76b9f87afeceeec89 | 2bc23edccbc37963830b1520c7124249d14d468b | /PrimaryPositionCorrection.py | a0ee6a247ba93f6c0264bd02ddcebd03b68fe221 | [
"MIT"
] | permissive | andregtorres/ISTTOK | a856351736d6b174c2eda4f5d9f7486c5bbf8c72 | 56c8c7d46ed3a473d0e5e26963197fa19e661f18 | refs/heads/master | 2021-08-02T06:07:54.821919 | 2021-07-21T20:12:45 | 2021-07-21T20:12:45 | 142,753,763 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,673 | py | #Andre Torres 5-11-2018
#run in terminal as root
#computes the 'real' position of the primary field coils
from field import *
from getMirnov import *
from scipy.constants import mu_0
import keyboard
#SDAS
shotP=44501
#Coil signals
prim, times, tbs = getSignal(ch_prim, shotP)
#mirnov signals
times, dataP = getMirnovs(shotP,mirnv,True)
iV=np.mean(prim[2100:5900])
ftP=-flatTops(dataP,5000,6000) #MINUS SIGN!
P0=getMirnovFlux([62.,62.],[-13.,13.],[+1.,1.],14,biotSavart=False)*iV
print("START")
plt.ion()
x=np.arange(12)+1
fig=plt.figure()
ax=fig.add_subplot(211)
plt.tight_layout()
plt.ylabel("Mirnov Flux [uV s]")
line0,=ax.plot(x,ftP*1e6, label="Measurement")
line1,=ax.plot(x,P0*1e6, label="Original positions")
line2,=ax.plot(x,P0*1e6, label="Optimized positions")
plt.legend()
ax2=fig.add_subplot(212, sharex=ax)
plt.xlabel("Mirnov probe")
plt.ylabel("Flux difference [uV s]")
line20,=ax2.plot(x,(ftP-ftP)*1e6)
line21,=ax2.plot(x,(ftP-P0)*1e6)
line22,=ax2.plot(x,(ftP-P0)*1e6)
d=[[0,-1],[0,+1],[0,0],[1.,1.]]
initial=[[62.,62.],[-13.,13.]]
coil=1
while( not keyboard.is_pressed("q")):
if keyboard.is_pressed("1"):
coil=1
if keyboard.is_pressed("2"):
coil=2
if keyboard.is_pressed("3"):
coil=3
if keyboard.is_pressed("4"):
coil=4
if keyboard.is_pressed("5"):
coil=5
if keyboard.is_pressed("a"):
d[coil-1][0]+=0.1
if keyboard.is_pressed("z"):
d[coil-1][0]-=0.1
if keyboard.is_pressed("s"):
d[coil-1][0]+=0.5
if keyboard.is_pressed("x"):
d[coil-1][0]-=0.5
if keyboard.is_pressed("d"):
if coil in [1,2]:
d[0][1]+=0.1
d[1][1]+=0.1
else:
d[coil-1][1]+=0.1
if keyboard.is_pressed("c"):
if coil in [1,2]:
d[0][1]-=0.1
d[1][1]-=0.1
else:
d[coil-1][1]-=0.1
if keyboard.is_pressed("f"):
if coil in [1,2]:
d[0][1]+=0.5
d[1][1]+=0.5
else:
d[coil-1][1]+=0.5
if keyboard.is_pressed("v"):
if coil in [1,2]:
d[0][1]-=0.5
d[1][1]-=0.5
else:
d[coil-1][1]-=0.5
if keyboard.is_pressed(" "):
print d
P=getMirnovFluxCorrected([62.+d[0][0],62.+d[1][0]],[-13.+d[0][1],13.+d[1][1]],[+1.,1.],14,d[3],biotSavart=False)*(iV+d[2][0])
line22.set_ydata((ftP-P)*1e6)
line2.set_ydata(P*1e6)
fig.canvas.draw()
fig.canvas.flush_events()
print d
#[-0.1, -3.9], [2.7, 5.9], [3.5, -9.1], [2.4, 7.1] EM relacao a optimizacao horizontal
#[-2.9, -6.2], [-2.6, 9.7], [3.5, -8.2], [2.2, 5.7]
'''
#plots
d=[[-1.3, 1.7], [-2.6, 0.7], [-38., 0]] #6/11
Pold=getMirnovFlux([62.+d[0][0],62.+d[1][0]],[-13.+d[0][1],13.+d[1][1]],[+1.,1.],14,biotSavart=False)*(iV+d[2][0])
d=[[-1.4, -1.1], [-1.4, 0.9], [-24.7, 0]]
Pnew=getMirnovFlux([62.+d[0][0],62.+d[1][0]],[-13.+d[0][1],13.+d[1][1]],[+1.,1.],14,biotSavart=False)*(iV+d[2][0])
Pnew2=getMirnovFlux([62.+d[0][0],62.+d[1][0]],[-13.+d[0][1],13.+d[1][1]],[+1.,1.],14,biotSavart=True)*(iV+d[2][0])
d=[[-0.5, -1.0], [-0.5, 1.0], [-15, 0]]
Pprof=getMirnovFlux([62.+d[0][0],62.+d[1][0]],[-13.+d[0][1],13.+d[1][1]],[+1.,1.],14,biotSavart=False)*(iV+d[2][0])
d=[[-0, -0], [-0, 0], [0, 0]]
P0=getMirnovFlux([62.+d[0][0],62.+d[1][0]],[-13.+d[0][1],13.+d[1][1]],[+1.,1.],14,biotSavart=False)*(iV+d[2][0])
P0BS=getMirnovFlux([62.+d[0][0],62.+d[1][0]],[-13.+d[0][1],13.+d[1][1]],[+1.,1.],14,biotSavart=True)*(iV+d[2][0])
plt.figure()
plt.plot(np.arange(12)+1,Pold*1e6 -ftP*1e6, label="Old optimization -Measured flux")
plt.plot(np.arange(12)+1,Pnew*1e6 -ftP*1e6, label="New optimization -Measured flux")
plt.plot(np.arange(12)+1,Pprof*1e6 -ftP*1e6, label="Professors optimization -Measured flux")
plt.xlabel("Mirnov probe")
plt.ylabel("Mirnov Flux [uV s]")
plt.title ("Pulse #44501 - Mirnov flux with optimized coil position")
plt.legend()
plt.tight_layout()
plt.savefig("plots/PrimOptim_44501_comparison_rel2.png")
plt.figure()
plt.plot(np.arange(12)+1,ftP*1e6, "-", label="Measured flux")
plt.plot(np.arange(12)+1,Pnew*1e6,"-", label="New optimization")
plt.plot(np.arange(12)+1,Pnew2*1e6,"-", label="New optimization BS")
plt.plot(np.arange(12)+1,P0BS*1e6,"-", label="NO optimization BS")
plt.plot(np.arange(12)+1,P0*1e6,"-", label="NO optimization")
plt.xlabel("Mirnov probe")
plt.ylabel("Mirnov Flux [uV s]")
plt.title ("Pulse #44501 - Mirnov flux with optimized coil position")
plt.legend()
plt.tight_layout()
plt.figure()
plt.plot(np.arange(11)+1,np.diff(ftP)*1e6, "x", label="Measured flux")
plt.plot(np.arange(11)+1,np.diff(Pold)*1e6,"x", label="Old optimization")
plt.plot(np.arange(11)+1,np.diff(Pnew)*1e6,"x", label="New optimization")
plt.plot(np.arange(11)+1,np.diff(Pprof)*1e6, "x",label="Professors optimization")
plt.xlabel("Mirnov probe")
plt.ylabel("Mirnov Flux [uV s]")
plt.title ("Pulse #44501 - Mirnov flux with optimized coil position")
plt.legend()
plt.tight_layout()
plt.savefig("plots/PrimOptim_44501_comparison_diff2.png")
shotV=42952 #shot antigo
vert, times, tbs = getSignal(ch_vert, shotV )
times, dataV = getMirnovs(shotV,mirnv,True)
iV=np.mean(vert[1100:5900])
ftV=flatTops(dataV,4000,6000)
V0=getMirnovFlux([58.,58.,35.,35.],[-7.,7.,-7.,7.],[-1.,-1.,1.,1.],5,biotSavart=False)*iV
V2=getMirnovFlux([58.+d[0][0],58.+d[1][0],35.+d[2][0],35.+d[3][0]],[-7.+d[0][1],7.+d[1][1],-7+d[2][1],7.+d[3][1]],[-1.,-1.,1.,1.],5,biotSavart=False)*iV
plt.figure()
plt.plot(np.arange(12)+1,-ftV*1e6, label="Measured flux")
plt.plot(np.arange(12)+1,V0*1e6, label="Original PF positions")
plt.plot(np.arange(12)+1,V2*1e6, label="Optimized PF positions")
plt.xlabel("Mirnov probe")
plt.ylabel("Mirnov Flux [uV s]")
plt.title ("Pulse #42952 - Mirnov flux with optimized coil position")
plt.legend()
plt.tight_layout()
plt.savefig("plots/VerticalOptim_42952.png")
shotV=44473
vert, times, tbs = getSignal(ch_vert, shotV )
times, dataV = getMirnovs(shotV,mirnv,True)
plt.plot(vert)
iV=np.mean(vert[1100:5900])
ftV=flatTops(dataV,4000,6000)
V0=getMirnovFlux([58.,58.,35.,35.],[-7.,7.,-7.,7.],[-1.,-1.,1.,1.],5,biotSavart=False)*iV
V2=getMirnovFlux([58.+d[0][0],58.+d[1][0],35.+d[2][0],35.+d[3][0]],[-7.+d[0][1],7.+d[1][1],-7+d[2][1],7.+d[3][1]],[-1.,-1.,1.,1.],5,biotSavart=False)*iV
plt.figure()
plt.plot(np.arange(12)+1,ftV*1e6, label="Measured flux")
plt.plot(np.arange(12)+1,V0*1e6, label="Original PF positions")
plt.plot(np.arange(12)+1,V2*1e6, label="Optimized PF positions")
plt.xlabel("Mirnov probe")
plt.ylabel("Mirnov Flux [uV s]")
plt.title ("Pulse #44473 - Mirnov flux with optimized coil position")
plt.legend()
plt.tight_layout()
plt.savefig("plots/VerticalOptim_44473.png")
'''
| [
"[email protected]"
] | |
6c46a926d9225e406176b89f40a567506cb3720f | a6764755670483fd938c290cd4178669ee703585 | /toolkit/vector2d.py | c7523a76a8e557c0558b07f877ffc2639ac4fce3 | [] | no_license | PumpkinJimmy/pygamepp | 27b1a54100ac0703532cfa0ef80a2207ae6d3f1f | d30c729c797c4d18ee50b340363772493d4cf272 | refs/heads/master | 2021-01-20T10:24:27.425173 | 2017-09-09T00:16:28 | 2017-09-09T00:16:28 | 101,634,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,379 | py | ################## http://www.pygame.org/wiki/2DVectorClass ##################
import operator
import math
class Vec2d(object):
"""2d vector class, supports vector and scalar operators,
and also provides a bunch of high level functions
"""
__slots__ = ['x', 'y']
def __init__(self, x_or_pair, y=None):
if y == None:
self.x = x_or_pair[0]
self.y = x_or_pair[1]
else:
self.x = x_or_pair
self.y = y
def __len__(self):
return 2
def __getitem__(self, key):
if key == 0:
return self.x
elif key == 1:
return self.y
else:
raise IndexError("Invalid subscript " + str(key) + " to Vec2d")
def __setitem__(self, key, value):
if key == 0:
self.x = value
elif key == 1:
self.y = value
else:
raise IndexError("Invalid subscript " + str(key) + " to Vec2d")
# String representaion (for debugging)
def __repr__(self):
return 'Vec2d(%s, %s)' % (self.x, self.y)
# Comparison
def __eq__(self, other):
if hasattr(other, "__getitem__") and len(other) == 2:
return self.x == other[0] and self.y == other[1]
else:
return False
def __ne__(self, other):
if hasattr(other, "__getitem__") and len(other) == 2:
return self.x != other[0] or self.y != other[1]
else:
return True
def __nonzero__(self):
return bool(self.x or self.y)
# Generic operator handlers
def _o2(self, other, f):
"Any two-operator operation where the left operand is a Vec2d"
if isinstance(other, Vec2d):
return Vec2d(f(self.x, other.x),
f(self.y, other.y))
elif hasattr(other, "__getitem__"):
return Vec2d(f(self.x, other[0]),
f(self.y, other[1]))
else:
return Vec2d(f(self.x, other),
f(self.y, other))
def _r_o2(self, other, f):
"Any two-operator operation where the right operand is a Vec2d"
if hasattr(other, "__getitem__"):
return Vec2d(f(other[0], self.x),
f(other[1], self.y))
else:
return Vec2d(f(other, self.x),
f(other, self.y))
def _io(self, other, f):
"inplace operator"
if hasattr(other, "__getitem__"):
self.x = f(self.x, other[0])
self.y = f(self.y, other[1])
else:
self.x = f(self.x, other)
self.y = f(self.y, other)
return self
# Addition
def __add__(self, other):
if isinstance(other, Vec2d):
return Vec2d(self.x + other.x, self.y + other.y)
elif hasattr(other, "__getitem__"):
return Vec2d(self.x + other[0], self.y + other[1])
else:
return Vec2d(self.x + other, self.y + other)
__radd__ = __add__
def __iadd__(self, other):
if isinstance(other, Vec2d):
self.x += other.x
self.y += other.y
elif hasattr(other, "__getitem__"):
self.x += other[0]
self.y += other[1]
else:
self.x += other
self.y += other
return self
# Subtraction
def __sub__(self, other):
if isinstance(other, Vec2d):
return Vec2d(self.x - other.x, self.y - other.y)
elif hasattr(other, "__getitem__"):
return Vec2d(self.x - other[0], self.y - other[1])
else:
return Vec2d(self.x - other, self.y - other)
def __rsub__(self, other):
if isinstance(other, Vec2d):
return Vec2d(other.x - self.x, other.y - self.y)
if hasattr(other, "__getitem__"):
return Vec2d(other[0] - self.x, other[1] - self.y)
else:
return Vec2d(other - self.x, other - self.y)
def __isub__(self, other):
if isinstance(other, Vec2d):
self.x -= other.x
self.y -= other.y
elif hasattr(other, "__getitem__"):
self.x -= other[0]
self.y -= other[1]
else:
self.x -= other
self.y -= other
return self
# Multiplication
def __mul__(self, other):
if isinstance(other, Vec2d):
return Vec2d(self.x * other.x, self.y * other.y)
if hasattr(other, "__getitem__"):
return Vec2d(self.x * other[0], self.y * other[1])
else:
return Vec2d(self.x * other, self.y * other)
__rmul__ = __mul__
def __imul__(self, other):
if isinstance(other, Vec2d):
self.x *= other.x
self.y *= other.y
elif hasattr(other, "__getitem__"):
self.x *= other[0]
self.y *= other[1]
else:
self.x *= other
self.y *= other
return self
# Division
def __div__(self, other):
return self._o2(other, operator.div)
def __rdiv__(self, other):
return self._r_o2(other, operator.div)
def __idiv__(self, other):
return self._io(other, operator.div)
def __floordiv__(self, other):
return self._o2(other, operator.floordiv)
def __rfloordiv__(self, other):
return self._r_o2(other, operator.floordiv)
def __ifloordiv__(self, other):
return self._io(other, operator.floordiv)
def __truediv__(self, other):
return self._o2(other, operator.truediv)
def __rtruediv__(self, other):
return self._r_o2(other, operator.truediv)
def __itruediv__(self, other):
return self._io(other, operator.floordiv)
# Modulo
def __mod__(self, other):
return self._o2(other, operator.mod)
def __rmod__(self, other):
return self._r_o2(other, operator.mod)
def __divmod__(self, other):
return self._o2(other, operator.divmod)
def __rdivmod__(self, other):
return self._r_o2(other, operator.divmod)
# Exponentation
def __pow__(self, other):
return self._o2(other, operator.pow)
def __rpow__(self, other):
return self._r_o2(other, operator.pow)
# Bitwise operators
def __lshift__(self, other):
return self._o2(other, operator.lshift)
def __rlshift__(self, other):
return self._r_o2(other, operator.lshift)
def __rshift__(self, other):
return self._o2(other, operator.rshift)
def __rrshift__(self, other):
return self._r_o2(other, operator.rshift)
def __and__(self, other):
return self._o2(other, operator.and_)
__rand__ = __and__
def __or__(self, other):
return self._o2(other, operator.or_)
__ror__ = __or__
def __xor__(self, other):
return self._o2(other, operator.xor)
__rxor__ = __xor__
# Unary operations
def __neg__(self):
return Vec2d(operator.neg(self.x), operator.neg(self.y))
def __pos__(self):
return Vec2d(operator.pos(self.x), operator.pos(self.y))
def __abs__(self):
return Vec2d(abs(self.x), abs(self.y))
def __invert__(self):
return Vec2d(-self.x, -self.y)
# vectory functions
def get_length_sqrd(self):
return self.x ** 2 + self.y ** 2
def get_length(self):
return math.sqrt(self.x ** 2 + self.y ** 2)
def __setlength(self, value):
length = self.get_length()
self.x *= value / length
self.y *= value / length
length = property(get_length, __setlength, None, "gets or sets the magnitude of the vector")
def rotate(self, angle_degrees):
radians = math.radians(angle_degrees)
cos = math.cos(radians)
sin = math.sin(radians)
x = self.x * cos - self.y * sin
y = self.x * sin + self.y * cos
self.x = x
self.y = y
def rotated(self, angle_degrees):
radians = math.radians(angle_degrees)
cos = math.cos(radians)
sin = math.sin(radians)
x = self.x * cos - self.y * sin
y = self.x * sin + self.y * cos
return Vec2d(x, y)
def get_angle(self):
if (self.get_length_sqrd() == 0):
return 0
return math.degrees(math.atan2(self.y, self.x))
def __setangle(self, angle_degrees):
self.x = self.length
self.y = 0
self.rotate(angle_degrees)
angle = property(get_angle, __setangle, None, "gets or sets the angle of a vector")
def get_angle_between(self, other):
cross = self.x * other[1] - self.y * other[0]
dot = self.x * other[0] + self.y * other[1]
return math.degrees(math.atan2(cross, dot))
def normalized(self):
length = self.length
if length != 0:
return self / length
return Vec2d(self)
def normalize_return_length(self):
length = self.length
if length != 0:
self.x /= length
self.y /= length
return length
def perpendicular(self):
return Vec2d(-self.y, self.x)
def perpendicular_normal(self):
length = self.length
if length != 0:
return Vec2d(-self.y / length, self.x / length)
return Vec2d(self)
def dot(self, other):
return float(self.x * other[0] + self.y * other[1])
def get_distance(self, other):
return math.sqrt((self.x - other[0]) ** 2 + (self.y - other[1]) ** 2)
def get_dist_sqrd(self, other):
return (self.x - other[0]) ** 2 + (self.y - other[1]) ** 2
def projection(self, other):
other_length_sqrd = other[0] * other[0] + other[1] * other[1]
projected_length_times_other_length = self.dot(other)
return other * (projected_length_times_other_length / other_length_sqrd)
def cross(self, other):
return self.x * other[1] - self.y * other[0]
def interpolate_to(self, other, range):
return Vec2d(self.x + (other[0] - self.x) * range, self.y + (other[1] - self.y) * range)
def convert_to_basis(self, x_vector, y_vector):
return Vec2d(self.dot(x_vector) / x_vector.get_length_sqrd(), self.dot(y_vector) / y_vector.get_length_sqrd())
def __getstate__(self):
return [self.x, self.y]
def __setstate__(self, dict):
self.x, self.y = dict
| [
"[email protected]"
] | |
dac0fb04c810b23f6daa4d74eb866acd12e58597 | 6fec54ad58eb2ca49c0efb5af17c39729de1077e | /i2c_motorcontrol.py | 2432e777072ad606d2d1c40367958732e415a447 | [] | no_license | yongkingdom/sanitizationondemandrobot_with_deliverysecured | 8c543c3a3dbf51d5c7f6671792c5f842452a6c2e | 95bcc9cd4367add8bd892aeb1d5e21c22fe42414 | refs/heads/master | 2023-04-26T05:00:16.833427 | 2021-06-05T13:15:12 | 2021-06-05T13:15:12 | 277,834,628 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,952 | py | from smbus2 import SMBus
import time
address = 0x0A
read_on = 1
read_off = 0
write_reg = 0 # 0 = i2c writing is disable for motor control
# 1 = i2c writing is enable for motor control
# 2 = rf is disable for motor control
# 3 = rf is enable for motor control
# 4 = readRFChannel5 is enable for motor control read
def motor_read_block(regread = read_on,num=10):
with SMBus(1) as bus:
# Read a block of 16 bytes from address 80, offset 0
block = bus.read_i2c_block_data(address, regread, num)
# Returned value is a list of 16 bytes
#print(block)
return(block,num)
def motor_write_block(regwrite = 0,data = [0,1,2,3,4]):
with SMBus(1) as bus:
# Write a byte to address 80, offset 0
bus.write_i2c_block_data(address, regwrite, data)
def rf_disable(write_reg = 2):
with SMBus(1) as bus:
bus.write_i2c_block_data(address, write_reg, [44])
time.sleep(0.01)
def rf_enable(write_reg = 3):
with SMBus(1) as bus:
bus.write_i2c_block_data(address, write_reg, [88])
time.sleep(0.01)
def motor_forward(speed = 10,waitime=0.1,write_reg = 1):
with SMBus(1) as bus:
bus.write_i2c_block_data(address, write_reg, [8,speed])
time.sleep(waitime)
def motor_reverse(speed = 10,waitime=0.1,write_reg = 1):
with SMBus(1) as bus:
bus.write_i2c_block_data(address, write_reg, [2,speed])
time.sleep(waitime)
def motor_stop(deceleration = 0,write_reg = 1):
with SMBus(1) as bus:
bus.write_i2c_block_data(address, write_reg, [5,deceleration])
if(deceleration>100):
time.sleep(2)
elif ((deceleration>10) & (deceleration<100)):
time.sleep(1)
else:
time.sleep(0)
def motor_left(speed = 10,waitime=0.1,write_reg = 1):
with SMBus(1) as bus:
bus.write_i2c_block_data(address, write_reg, [4,speed])
time.sleep(waitime)
def motor_right(speed = 10,waitime=0.1,write_reg = 1):
with SMBus(1) as bus:
bus.write_i2c_block_data(address, write_reg, [6,speed])
time.sleep(waitime)
def motor_cw(speed = 10,waitime=0.1,write_reg = 1):
with SMBus(1) as bus:
bus.write_i2c_block_data(address, write_reg, [10,speed])
time.sleep(waitime)
def motor_ccw(speed = 10,waitime=0.1,write_reg = 1):
with SMBus(1) as bus:
bus.write_i2c_block_data(address, write_reg, [11,speed])
time.sleep(waitime)
def motor_fwdspinleft(speed = 10,waitime=0.1,write_reg = 1):
with SMBus(1) as bus:
bus.write_i2c_block_data(address, write_reg, [15,speed])
time.sleep(waitime)
def motor_fwdspinright(speed = 10,waitime=0.1,write_reg = 1):
with SMBus(1) as bus:
bus.write_i2c_block_data(address, write_reg, [16,speed])
time.sleep(waitime)
def motor_rwdspinleft(speed = 10,waitime=0.1,write_reg = 1):
with SMBus(1) as bus:
bus.write_i2c_block_data(address, write_reg, [17,speed])
time.sleep(waitime)
def motor_rwdspinright(speed = 10,waitime=0.1,write_reg = 1):
with SMBus(1) as bus:
bus.write_i2c_block_data(address, write_reg, [18,speed])
time.sleep(waitime)
def readRF_enable(write_reg = 4):
with SMBus(1) as bus:
bus.write_i2c_block_data(address, write_reg, [1])
time.sleep(0.01)
def readRFChannel5(regread = read_on,num=1):
with SMBus(1) as bus:
block = bus.read_i2c_block_data(address, regread, num)
# Returned value is a list of 16 bytes
#print(block)
return(block,num)
time.sleep(0.01)
rf_disable() #disable Radio before run control from RPI.
if(__name__=='__main__'):
motor_fwdspinleft(45,1)
motor_fwdspinright(45,1)
motor_rwdspinleft(45,1)
motor_rwdspinright(45,1)
motor_stop(15)
# rf_enable()
# readRF_enable()
# time.sleep(0.1)
# for i in range(20):
# print(readRFChannel5())
# time.sleep(0.1)
| [
"[email protected]"
] | |
415ddf32d382ad1784dd763a346699f3e9498114 | fa63ea75ac7e25a424aee501c955ffce18b030b5 | /tests/basics/class_instance_override.py | 1ebd61d062f0acca4a70464b1ce79ac18242394a | [
"MIT"
] | permissive | HangCoder/micropython-simulator | ee72145b55c21ebc20c5ca38a95e9df26871e29a | 7fb13eeef4a85f21cae36f1d502bcc53880e1815 | refs/heads/master | 2022-02-17T22:59:59.079722 | 2019-09-09T12:19:38 | 2019-09-09T12:19:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | # test that we can override a class method with an instance method
class A:
def foo(self):
return 1
a = A()
print(a.foo())
a.foo = lambda:2
print(a.foo())
print("PASS") | [
"[email protected]"
] | |
348c0d76e8311e454ffeb8f380f781cdd3e04f9e | ab60b97b72d596c0833d8b1c424a69737e768813 | /general/forms.py | 4c619967f6ee67ae00d697a7589c106eef09cdd8 | [] | no_license | Richa-09/Car-Pool-Management-System | c5ce55acafb0de01b89440aca955a15259ac6901 | bf9cccf5be8bf1e814046cc41264ae4cd9d91c1a | refs/heads/master | 2021-09-24T03:55:41.446611 | 2020-03-13T16:19:26 | 2020-03-13T16:19:26 | 245,968,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,844 | py | from django import forms
class DateInput(forms.DateInput):
input_type = 'date'
class TimeInput(forms.TimeInput):
input_type = 'time'
class OfferForm(forms.Form):
destination1 = forms.CharField(label="From", widget = forms.TextInput(
attrs = {
'class' : 'form-control',
'placeholder' : 'Enter start of journey...',
'label' : 'From'
}
))
destination2 = forms.CharField(label="To", widget = forms.TextInput(
attrs = {
'class' : 'form-control',
'placeholder' : 'Enter end of journey...',
'label' : 'To'
}
))
date = forms.DateField(label="Date of Journey", widget = DateInput(
attrs = {
'class' : 'form-control',
'placeholder' : 'Enter date of journey',
'label' : 'Date of Journey'
}
))
time = forms.TimeField(label="Time of Journey",widget= TimeInput(
attrs = {
'class' : 'form-control',
'placeholder' : 'Enter start of journey...',
'label' : 'Time of Journey'
}
))
carModel = forms.CharField(label="Model of vehicle", widget=forms.TextInput(
attrs = {
'class' : 'form-control',
'placeholder' : 'Specify car model',
'label' : 'From'
}
))
seatsAvailable = forms.IntegerField(label="Number of seats available", widget=forms.TextInput(
attrs = {
'class' : 'form-control',
'placeholder' : 'Specify total extra seats',
'label' : 'From'
}
))
cost = forms.IntegerField(label="Cost per Person", widget=forms.TextInput(
attrs = {
'class' : 'form-control',
'placeholder' : 'Total charge in Rupees',
'label' : 'From'
}
))
| [
"[email protected]"
] | |
6c50bbd84ea20f575427c4cfbb5e4458c4e27aa5 | 22a5d684341cee8f1095c3fe193f01f40f8121db | /2019/Qualification/B. You Can Go Your Own Way/2019-q-b.py | 7c1a25eb2456f2f3582410a44bbae9eefdcc4e0f | [] | no_license | andy1li/codejam | 161b1db6faab372a4c2c4ce5956942387c650bed | 3aa6ab1673064b8c80b5f56422bd496b372b30f3 | refs/heads/master | 2022-06-28T02:42:53.980149 | 2022-06-27T20:15:11 | 2022-06-27T20:15:11 | 53,395,936 | 6 | 4 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | # 2019 Qualification Round - B. You Can Go Your Own Way
# https://codingcompetitions.withgoogle.com/codejam/round/0000000000051705/00000000000881da
def solve(p) -> str:
return ''.join(
'E' if move=='S' else 'S'
for move in p
)
#------------------------------------------------------------------------------#
for case in range(1, int(input())+1):
_, p = input(), input()
result = solve(p)
output = 'Case #%d: %s' %(case, result)
print(output) | [
"[email protected]"
] | |
d1ce267d0a88bfa1444e2fff0194b5aa2cd274cd | 78d35bb7876a3460d4398e1cb3554b06e36c720a | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_11_01/aio/operations/_vpn_connections_operations.py | a69c4f47e8deefb250db906418d47320e65733b4 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | catchsrinivas/azure-sdk-for-python | e35f59b60318a31b3c940a7a3a07b61b28118aa5 | 596227a7738a5342274486e30489239d539b11d1 | refs/heads/main | 2023-08-27T09:08:07.986249 | 2021-11-11T11:13:35 | 2021-11-11T11:13:35 | 427,045,896 | 0 | 0 | MIT | 2021-11-11T15:14:31 | 2021-11-11T15:14:31 | null | UTF-8 | Python | false | false | 36,272 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VpnConnectionsOperations:
"""VpnConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
gateway_name: str,
connection_name: str,
**kwargs: Any
) -> "_models.VpnConnection":
"""Retrieves the details of a vpn connection.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the vpn connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.VpnConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
gateway_name: str,
connection_name: str,
vpn_connection_parameters: "_models.VpnConnection",
**kwargs: Any
) -> "_models.VpnConnection":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_connection_parameters, 'VpnConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
gateway_name: str,
connection_name: str,
vpn_connection_parameters: "_models.VpnConnection",
**kwargs: Any
) -> AsyncLROPoller["_models.VpnConnection"]:
"""Creates a vpn connection to a scalable vpn gateway if it doesn't exist else updates the
existing connection.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the connection.
:type connection_name: str
:param vpn_connection_parameters: Parameters supplied to create or Update a VPN Connection.
:type vpn_connection_parameters: ~azure.mgmt.network.v2020_11_01.models.VpnConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VpnConnection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_11_01.models.VpnConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
connection_name=connection_name,
vpn_connection_parameters=vpn_connection_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
gateway_name: str,
connection_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
gateway_name: str,
connection_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a vpn connection.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
connection_name=connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore
async def _start_packet_capture_initial(
self,
resource_group_name: str,
gateway_name: str,
vpn_connection_name: str,
parameters: Optional["_models.VpnConnectionPacketCaptureStartParameters"] = None,
**kwargs: Any
) -> Optional[str]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._start_packet_capture_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'vpnConnectionName': self._serialize.url("vpn_connection_name", vpn_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'VpnConnectionPacketCaptureStartParameters')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_start_packet_capture_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{vpnConnectionName}/startpacketcapture'} # type: ignore
async def begin_start_packet_capture(
self,
resource_group_name: str,
gateway_name: str,
vpn_connection_name: str,
parameters: Optional["_models.VpnConnectionPacketCaptureStartParameters"] = None,
**kwargs: Any
) -> AsyncLROPoller[str]:
"""Starts packet capture on Vpn connection in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param vpn_connection_name: The name of the vpn connection.
:type vpn_connection_name: str
:param parameters: Vpn Connection packet capture parameters supplied to start packet capture on
gateway connection.
:type parameters: ~azure.mgmt.network.v2020_11_01.models.VpnConnectionPacketCaptureStartParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_packet_capture_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
vpn_connection_name=vpn_connection_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'vpnConnectionName': self._serialize.url("vpn_connection_name", vpn_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start_packet_capture.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{vpnConnectionName}/startpacketcapture'} # type: ignore
async def _stop_packet_capture_initial(
self,
resource_group_name: str,
gateway_name: str,
vpn_connection_name: str,
parameters: Optional["_models.VpnConnectionPacketCaptureStopParameters"] = None,
**kwargs: Any
) -> Optional[str]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._stop_packet_capture_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'vpnConnectionName': self._serialize.url("vpn_connection_name", vpn_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'VpnConnectionPacketCaptureStopParameters')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_stop_packet_capture_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{vpnConnectionName}/stoppacketcapture'} # type: ignore
async def begin_stop_packet_capture(
self,
resource_group_name: str,
gateway_name: str,
vpn_connection_name: str,
parameters: Optional["_models.VpnConnectionPacketCaptureStopParameters"] = None,
**kwargs: Any
) -> AsyncLROPoller[str]:
"""Stops packet capture on Vpn connection in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param vpn_connection_name: The name of the vpn connection.
:type vpn_connection_name: str
:param parameters: Vpn Connection packet capture parameters supplied to stop packet capture on
gateway connection.
:type parameters: ~azure.mgmt.network.v2020_11_01.models.VpnConnectionPacketCaptureStopParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._stop_packet_capture_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
vpn_connection_name=vpn_connection_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'vpnConnectionName': self._serialize.url("vpn_connection_name", vpn_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop_packet_capture.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{vpnConnectionName}/stoppacketcapture'} # type: ignore
def list_by_vpn_gateway(
self,
resource_group_name: str,
gateway_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ListVpnConnectionsResult"]:
"""Retrieves all vpn connections for a particular virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnConnectionsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_11_01.models.ListVpnConnectionsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnConnectionsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_vpn_gateway.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnConnectionsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_vpn_gateway.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections'} # type: ignore
| [
"[email protected]"
] | |
e8036859d6eb86ecf2842b24e7d7974337d97760 | 583db8851c609f03f722884557cfc67de0ce564e | /pysmapi/interfaces/VMRELOCATE_Image_Attributes.py | 80284c22530f022bfd3a85ce3cb8de7f622ba371 | [
"Apache-2.0"
] | permissive | lllucius/pysmapi | ab0b4409bfda6a61dab7805e2033d71d09a96493 | c0d802edb58e835e4d48cb9c28ccfccfe5b5c686 | refs/heads/master | 2020-04-20T18:07:46.699611 | 2019-06-25T04:27:41 | 2019-06-25T04:27:41 | 169,009,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,070 | py |
# Copyright 2018-2019 Leland Lucius
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required domain_name applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
from pysmapi.smapi import *
class VMRELOCATE_Image_Attributes(Request):
def __init__(self,
relocation_setting = "",
domain_name = "",
archforce = "",
**kwargs):
super(VMRELOCATE_Image_Attributes, self).__init__(**kwargs)
# Request parameters
self._relocation_setting = relocation_setting
self._domain_name = domain_name
self._archforce = archforce
@property
def relocation_setting(self):
return self._relocation_setting
@relocation_setting.setter
def relocation_setting(self, value):
self._relocation_setting = value
@property
def domain_name(self):
return self._domain_name
@domain_name.setter
def domain_name(self, value):
self._domain_name = value
@property
def archforce(self):
return self._archforce
@archforce.setter
def archforce(self, value):
self._archforce = value
def pack(self):
buf = ""
# relocation_setting=value (string,2-3,char26)
buf += f"relocation_setting={self._relocation_setting}\x00"
# domain_name=value (string,0-8,char42)
if len(self._domain_name) > 0:
buf += f"domain_name={self._domain_name}\x00"
# archforce=value (string,0-3,char26)
if len(self._archforce) > 0:
buf += f"archforce={self._archforce}\x00"
return s2b(buf)
| [
"[email protected]"
] | |
72d7ed4c9a7d93991c9ae22fdc5bf27081241d55 | b8e594b62094039547eb2068d584e5f3df3bdbc6 | /filterandgroup.py | 37bd98345f80ee783a9c9f17a9f75c3152c7c2c7 | [] | no_license | hgelfond/assignments | b5ae27d70b69e157d63bad485b5038d36cc124b3 | 673b8748c98f1dd6e3cac73ce2a068e3a0342082 | refs/heads/master | 2021-10-10T20:35:06.598235 | 2019-01-16T19:49:10 | 2019-01-16T19:49:10 | 164,486,225 | 0 | 0 | null | 2019-01-08T23:57:45 | 2019-01-07T19:59:46 | Python | UTF-8 | Python | false | false | 737 | py | from pprint import pprint
cars = [
{"model": "Yaris", "make": "Toyota", "color": "red"},
{"model": "Auris", "make": "Toyota", "color": "red"},
{"model": "Camry", "make": "Toyota", "color": "green"},
{"model": "Prius", "make": "Toyota", "color": "yellow"},
{"model": "Civic", "make": "Honda", "color": "red"},
{"model": "Model 3", "make": "Tesla", "color": "red"}
]
# filter to red cars
red_cars = []
for car in cars:
if car['color'] == 'red':
red_cars.append(car)
# group by make
red_cars_by_make = {}
for car in red_cars:
make = car['make']
if make in red_cars_by_make:
red_cars_by_make[make].append(car)
else:
red_cars_by_make[make] = [car]
pprint(red_cars_by_make) | [
"[email protected]"
] | |
04b2b23ecc92dc86537e3f3f2dd8b526dead7b52 | de156134a1b22eef9f766f06e9d0b51eefbb5de8 | /018 doubleChars.py | a7ea620ccc73ee8f1e0a8ee1a843e50c0fa4e58a | [] | no_license | 6chelo6/python-dev-fund | da00a8575e3453d860a75c21d4105956e9dda6c3 | dc6bef4921038aed5eeb10385d8ed921fc66036c | refs/heads/master | 2020-08-19T01:39:39.220670 | 2019-10-17T18:35:19 | 2019-10-17T18:35:19 | 215,860,370 | 0 | 0 | null | 2019-10-17T18:35:22 | 2019-10-17T18:31:42 | null | UTF-8 | Python | false | false | 119 | py | #!/usr/bin/env python
def double_char(str):
a = ""
for i in str:
a += i*2
return a
print double_char('Marcelo')
| [
"Marcelo [email protected]"
] | Marcelo [email protected] |
fa3a5f89cbe61e9cc2841cf568ba9590707d9f1b | 9ac99a99dc8f79f52fbbe3e8a5b311b518fe45d9 | /apps/performance/api/HCSSYS_Departments.py | 2c65525475a54e449c8bcf2577f5509aeb1051f7 | [] | no_license | nttlong/quicky-01 | eb61620e01f04909d564244c46a03ca2b69dfecc | 0f5610aa7027429bdd9ca9b45899a472c372c6cc | refs/heads/master | 2020-03-25T17:45:31.633347 | 2018-11-27T15:02:30 | 2018-11-27T15:02:30 | 143,994,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,381 | py | # -*- coding: utf-8 -*-
from bson import ObjectId
import models
import datetime
import logging
import threading
import common
logger = logging.getLogger(__name__)
global lock
lock = threading.Lock()
from Query import DepartmentGroup
from hcs_authorization import action_type, authorization
import qmongo
@authorization.authorise(common = True)
def get_list(args):
items = qmongo.models.HCSSYS_Departments.aggregate.project(
department_code = 1,
department_name = 1,
parent_code = 1
)
return items.get_list()
@authorization.authorise(action = action_type.Action.READ)
def get_department_by_dept_code(args):
try:
if args['data'] != None and args['data'].has_key('department_code'):
items =qmongo.models.HCSSYS_Departments.aggregate.project(
department_code = 1,
department_name = 1,
department_name2 = 1,
department_alias = 1,
parent_code = 1,
level = 1,
level_code = 1,
department_tel = 1,
department_fax = 1,
department_email = 1,
department_address = 1,
nation_code = 1,
province_code = 1,
district_code = 1,
is_company = 1,
is_fund = 1,
is_fund_bonus = 1,
decision_no = 1,
decision_date = 1,
effect_date = 1,
license_no = 1,
tax_code = 1,
lock_date = 1,
logo_image = 1,
manager_code = 1,
secretary_code = 1,
ordinal = 1,
lock = 1,
note = 1,
region_code = 1,
domain_code = 1,
signed_by = 1
).match("department_code == {0}", args['data']['department_code'])
return items.get_item()
raise(Exception("not found department_code"))
except Exception as ex:
raise(ex)
def get_list_department_by_parent_code(args):
searchText = args['data'].get('search', '')
pageSize = args['data'].get('pageSize', 0)
pageIndex = args['data'].get('pageIndex', 20)
sort = args['data'].get('sort', 20)
pageIndex = (lambda pIndex: pIndex if pIndex != None else 0)(pageIndex)
pageSize = (lambda pSize: pSize if pSize != None else 20)(pageSize)
ret=qmongo.models.HCSSYS_Departments.aggregate.project(
department_code = 1,
department_name = 1,
department_alias = 1,
department_tel = 1,
level_code = 1,
lock = 1
)
ret.match("level_code == {0}", args['data']['where']['department_code'])
ret = common.filter_lock(ret, args)
if(searchText != None):
ret.match("contains(department_code, @name) or contains(department_name, @name)" + \
"or contains(department_alias, @name) or contains(department_tel, @name)",name=searchText.strip())
if(sort != None):
ret.sort(sort)
return ret.get_page(pageIndex, pageSize)
@authorization.authorise(action = action_type.Action.READ)
def get_tree(args):
ret=DepartmentGroup.get_department_group()
return ret.get_list()
@authorization.authorise(common = True)
def get_department_group():
ret=qmongo.models.HCSSYS_Departments.aggregate
ret.left_join(models.auth_user_info(), "created_by", "username", "uc")
ret.left_join(models.auth_user_info(), "modified_by", "username", "um")
ret.project(
_id = "_id",
department_code = "department_code",
factor_group_name = "factor_group_name",
factor_group_name2 = "factor_group_name2",
parent_code = "parent_code",
level = "level",
level_code = "level_code",
ordinal = "ordinal",
note = "note",
lock = "lock",
created_by="uc.login_account",
created_on= "created_on",
modified_on="switch(case(modified_on!='',modified_on),'')",
modified_by="switch(case(modified_by!='',um.login_account),'')",
)
ret.sort(dict(
ordinal = 1
))
return ret
@authorization.authorise(action = action_type.Action.CREATE)
def insert(args):
try:
lock.acquire()
ret = {}
if args['data'] != None:
if not args['data'].has_key('department_code') or not args['data'].has_key('department_name'):
field_list = []
if not args['data'].has_key('department_code'):
field_list.append("department_code")
if not args['data'].has_key('department_name'):
field_list.append("department_name")
lock.release()
return {
"error":{
"fields":field_list,
"code":"missing"
}
}
data = set_dict_data(args)
if args['data'].has_key('parent_code') and args['data']["parent_code"] != None :
parent_dept = qmongo.models.HCSSYS_Departments.aggregate.project(
department_code=1,
level=1,
level_code=1
).match("department_code == {0}", args['data']['parent_code']).get_item()
data['level'] = parent_dept['level'] + 1
data['level_code'] = parent_dept['level_code'] + [args['data']['department_code']]
ret = qmongo.models.HCSSYS_Departments.insert(data)
lock.release()
return ret
lock.release()
return dict(
error = "request parameter is not exist"
)
except Exception as ex:
lock.release()
raise(ex)
@authorization.authorise(action = action_type.Action.WRITE)
def update(args):
try:
lock.acquire()
ret = {}
if args['data'] != None:
data = set_dict_data(args)
ret = qmongo.models.HCSSYS_Departments.update(data, "department_code == @department_code", department_code = args['data']['department_code'])
lock.release()
return ret
lock.release()
return dict(
error = "request parameter is not exist"
)
except Exception as ex:
lock.release()
raise(ex)
@authorization.authorise(action = action_type.Action.DELETE)
def delete(args):
try:
lock.acquire()
ret = {}
if args['data'] != None:
ret = qmongo.models.HCSSYS_Departments.delete("department_code in {0}", [x["department_code"]for x in args['data']])
lock.release()
return ret
lock.release()
return dict(
error = "request parameter is not exist"
)
except Exception as ex:
lock.release()
raise(ex)
@authorization.authorise(common = True)
def set_dict_data(args):
data = dict(
department_code = args['data']['department_code'],
department_name = args['data']['department_name'],
department_name2 = (lambda x: x['department_name2'] if x.has_key('department_name2') else None)(args['data']),
department_alias = (lambda x: x['department_alias'] if x.has_key('department_alias') else None)(args['data']),
parent_code = (lambda x: x['parent_code'] if x.has_key('parent_code') else None)(args['data']),
level = (lambda x: x['level'] if x.has_key('level') else None)(args['data']),
level_code = (lambda x: x['level_code'] if x.has_key('level_code') else None)(args['data']),
department_tel = (lambda x: x['department_tel'] if x.has_key('department_tel') else None)(args['data']),
department_fax = (lambda x: x['department_fax'] if x.has_key('department_fax') else None)(args['data']),
department_email = (lambda x: x['department_email'] if x.has_key('department_email') else None)(args['data']),
department_address = (lambda x: x['department_address'] if x.has_key('department_address') else None)(args['data']),
nation_code = (lambda x: x['nation_code'] if x.has_key('nation_code') else None)(args['data']),
province_code = (lambda x: x['province_code'] if x.has_key('province_code') else None)(args['data']),
district_code = (lambda x: x['district_code'] if x.has_key('district_code') else None)(args['data']),
is_company = (lambda x: x['is_company'] if x.has_key('is_company') else None)(args['data']),
is_fund = (lambda x: x['is_fund'] if x.has_key('is_fund') else None)(args['data']),
is_fund_bonus = (lambda x: x['is_fund_bonus'] if x.has_key('is_fund_bonus') else None)(args['data']),
decision_no = (lambda x: x['decision_no'] if x.has_key('decision_no') else None)(args['data']),
decision_date = (lambda x: x['decision_date'] if x.has_key('decision_date') else None)(args['data']),
effect_date = (lambda x: x['effect_date'] if x.has_key('effect_date') else None)(args['data']),
license_no = (lambda x: x['license_no'] if x.has_key('license_no') else None)(args['data']),
tax_code = (lambda x: x['tax_code'] if x.has_key('tax_code') else None)(args['data']),
lock_date = (lambda x: x['lock_date'] if x.has_key('lock_date') else None)(args['data']),
logo_image = (lambda x: x['logo_image'] if x.has_key('logo_image') else None)(args['data']),
manager_code = (lambda x: x['manager_code'] if x.has_key('manager_code') else None)(args['data']),
secretary_code = (lambda x: x['secretary_code'] if x.has_key('secretary_code') else None)(args['data']),
ordinal = (lambda x: x['ordinal'] if x.has_key('ordinal') else None)(args['data']),
lock = (lambda x: x['lock'] if x.has_key('lock') else None)(args['data']),
note = (lambda x: x['note'] if x.has_key('note') else None)(args['data']),
region_code = (lambda x: x['region_code'] if x.has_key('region_code') else None)(args['data']),
domain_code = (lambda x: x['domain_code'] if x.has_key('domain_code') else None)(args['data']),
signed_by = (lambda x: x['signed_by'] if x.has_key('signed_by') else None)(args['data'])
)
return data
def getRootDepartment(args):
ret = {}
collection = common.get_collection('HCSSYS_Departments').aggregate([
{"$match": {
"$and": [{'parent_code': None}, {'level':1}]
}},
{"$project": {
"parent_code":1,
"department_code":1,
"department_name":1,
"level_code":1,
"level":1,
"ordinal":1
}},
])
ret = list(collection)
return ret | [
"[email protected]"
] | |
cd369342bfc0279dbec7e630389ff1ade7de3026 | 2c5e83885f6802305f7d6cb3b75462c4e52a41a1 | /blog/forms.py | 46b1206e87fa68dee59b3c3b553fc5b551c5fe61 | [] | no_license | carlmachaalany/MyBlog | 51db139391610f213ef265eb084f4607b44fbf09 | 9342a8fcad29699ecd283977f04829c48693097e | refs/heads/master | 2023-02-26T18:18:20.679981 | 2021-02-08T15:07:06 | 2021-02-08T15:07:06 | 337,112,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 891 | py | from django import forms
from blog.models import Post, Comment
class PostForm(forms.ModelForm):
class Meta():
model = Post
fields = ('author', 'title', 'text')
# Here we are assigning classes to some of the fields in Post. Adding the
# attrs in widgets is equivalent to adding attributes inside of an html tag
# like class, style, href, etc.
widgets = {
'title':forms.TextInput(attrs={'class':'textinputclass'}),
'text':forms.Textarea(attrs={'class':'editable medium-editor-textarea postcontent'})
}
class CommentForm(forms.ModelForm):
class Meta():
model = Comment
fields = ('author', 'text')
widgets = {
'author':forms.TextInput(attrs={'class':'textinputclass'}),
'text':forms.Textarea(attrs={'class':'editable medium-editor-textarea'})
}
| [
"[email protected]"
] | |
3c33c27bdb417291386c9e4e246ff8868bfaa73f | 4ef95e264c6ebf59051b209ed4e586b49f2d958a | /process_batch.py | e6948a793566063626355eeb8354328abf7c722f | [] | no_license | quanchaozhao/digitalTibetanSymbolDemo | 99e1ef0c9b437064dd83c70b5ae8d04b55503cf9 | c5a1bde31607f62c63a93915bea196538d14cb1f | refs/heads/master | 2020-03-21T17:26:41.843938 | 2018-06-28T05:44:32 | 2018-06-28T05:44:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,357 | py | # coding:utf-8
from __future__ import print_function,division
from pic_process.img_process import line_extract, char_extract
IN_PATH = "/home/lyx2/img_in"
CHAR_OUT_PATH = r"D:\Users\Riolu\Desktop\aabbcc"
CLASSIFY_OUT_PATH = R"D:\npq"
MERGED_PATH = r"D:\merged"
SORTED_PATH = r"D:\sorted"
WRAP_SIZE = 60
# 这里用的数据是经过倾斜校正,噪点去除,文本区域检测后的数据
# 生成文本并进行初步的归类 归类的方法是按照 余弦相似度
import os
from shutil import copyfile,rmtree,copytree
import numpy as np
from skimage import io
from skimage import transform
from skimage import util
from sklearn.metrics.pairwise import cosine_similarity
TH = 0.9
# 比较两个图片的余弦相似度
def cosine_similarity_2(img1,img2):
img1 = np.asarray(img1)
img2 = np.asarray(img2)
return cosine_similarity(img1.reshape((1,-1)), img2.reshape((1,-1))).ravel()[0]
# 将文件夹下相似度大于 TH 的文件归并成一类,相同类的路径名称相同,用于对于切分好的文字进行分类。
# 可以替换成其他分类方法
def classify_image_by_cosine_similarity(path_in=CHAR_OUT_PATH,path_out=CLASSIFY_OUT_PATH):
counter = 0
sample_img_list = []
sample_file_name_list = []
total = len(os.listdir(path_in))
for i,file_name in enumerate(os.listdir(path_in)):
img_path = path_in + os.path.sep + file_name
if not os.path.isdir(img_path):
img1 = io.imread(img_path, as_grey=True)
img1 = util.invert(img1)
img1 = transform.resize(img1, (100, 100),mode="edge")
for index, img in enumerate(sample_img_list):
if cosine_similarity_2(img, img1) > TH:
sample_file_name_list[index].append(file_name)
break
else:
img1 = transform.resize(img1, (100, 100),mode="edge")
sample_img_list.append(img1)
sample_file_name_list.append([file_name])
counter += 1
if i % 10 == 0:
print("%d of %d" % (i,total))
for i, file_names in enumerate(sample_file_name_list):
out_dir = path_out + os.path.sep + str(i)
os.mkdir(out_dir)
for file_name in file_names:
src = path_in + os.path.sep + file_name
dst = out_dir + os.path.sep + file_name
copyfile(src, dst)
return sample_img_list
# 将目录下的文件按照目录名称生成数据集 100个作为训练集,20个作为测试集
def extract_exp_data_by_path_name(path_in,wrap_size=100,to_npz=True):
data = None
for file_dir in os.listdir(path_in):
img_dir_path = os.path.join(path_in,file_dir)
if os.path.isdir(img_dir_path):
files = os.listdir(img_dir_path)
file_numbers = len(files)
print(file_numbers)
if file_numbers < 15:
if data is None:
data = []
for file in files:
img_file_path = os.path.join(img_dir_path,file)
img = io.imread(img_file_path,as_grey=True)
img = util.invert(img)
data.append(img)
print("need copy to 15")
while file_numbers < 15:
pass
if file_numbers < 30:
print("need to blur")
if file_numbers < 60:
print("need to rotation")
if file_numbers < 120:
print("need to add noise")
pass
# 将目录下的目录中的文件按照目录的名称标记数据并生成数据集
def label_image_by_path_name(path_in,index_list=None,shuffle=True,wrap=True,wrap_size=60,to_npz=True):
'''
将path_in目录下的图片文件按照目录,生成数据集
:param path_in: 已经按照文件目录分好的图片
:param shuffle: 是否打乱
:param index_list 如果给定 index_list则会按照index_list中编码出现的顺序,将数据目标label标为数据
:param to_npz 是否导出到npz文件
:return:
例如: 路径名为 'a','b','c' index_list=['b','c','a'] 则'a'路径下的所有的样本将被标记为2,'b'为0,'c'为1
'''
data = []
for file_dir in os.listdir(path_in):
img_dir_path = path_in + os.path.sep + file_dir
if os.path.isdir(img_dir_path):
label = file_dir
if index_list is not None:
try:
label = index_list.index(file_dir)
except Exception as e:
print(e)
for i, file_name in enumerate(os.listdir(img_dir_path)):
img_file_path = img_dir_path + os.path.sep + file_name
if not os.path.isdir(img_file_path):
img = io.imread(img_file_path,as_grey=True)
img = util.invert(img)
if wrap:
img = transform.resize(img,(wrap_size,wrap_size),mode='reflect')
data.append([img,label])
if i > 100:
break
# 打乱数据
if shuffle:
np.random.shuffle(data)
x_data = []
y_data = []
for img,label in data:
x_data.append(img)
y_data.append(label)
if to_npz:
x_train = np.asarray(x_data)
y_train = np.asarray(y_data)
print(x_train.shape)
print(y_train.shape)
np.savez("D:\\data.npz",x_train=x_train,y_train=y_train)
return (x_data,y_data)
# 从目录中读取图片
def batch_extract_char_2_file(image_file_path=IN_PATH,out_to_folder=True,char_out_path = CHAR_OUT_PATH):
'''
# 从目录中读取图片,按图片名输出切分的字符到给定路径中
:param image_file_path: 输入的图片的路径
:param char_out_path: 输出路径
:return:
'''
img_file_names = os.listdir(image_file_path)
page_list = []
for file_name in img_file_names:
img_path = image_file_path + os.path.sep + file_name
if not os.path.isdir(img_path):
img_org = io.imread(img_path, as_grey=True)
img = np.where(img_org > 128, 1, 0)
line_imgs = line_extract(img)
path_name = os.path.splitext(file_name)[0]
if out_to_folder:
os.mkdir(char_out_path + os.path.sep + path_name)
line_list = []
for i, line_img in enumerate(line_imgs):
char_position_arr = char_extract(line_img, [[0, line_img.shape[0], 0, line_img.shape[1]]])
char_list = []
for j, char_position in enumerate(char_position_arr[0]):
start_row, end_row, start_col, end_col = char_position
sub_img = line_img[start_row:end_row, start_col:end_col]
sub_img = np.where(sub_img == 1, 255, 0)
if (end_row-start_row) * (end_col - start_col) > 50:
if out_to_folder:
save_str = char_out_path + \
os.path.sep + \
path_name + \
os.path.sep + \
os.path.splitext(file_name)[0] + \
"_" + str(i) + "_" + str(j) + ".png"
io.imsave(save_str, sub_img)
char_list.append(sub_img)
line_list.append(char_list)
page_list.append(line_list)
return page_list,img_file_names
def classify_folder(path_in=CHAR_OUT_PATH,out_path=CLASSIFY_OUT_PATH):
'''
按照文件夹中的图片,对每个文件夹的图片进行分类
:param path_in:
:param out_path:
:return:
'''
file_names = os.listdir(path_in)
for file_name in file_names:
char_file_path = path_in + os.path.sep + file_name
char_file_out_path = out_path + os.path.sep + file_name
if os.path.exists(char_file_out_path):
rmtree(char_file_out_path)
os.mkdir(char_file_out_path)
sample_img_list = classify_image_by_cosine_similarity(path_in=char_file_path,path_out=char_file_out_path)
np.save(char_file_out_path+os.path.sep+file_name,sample_img_list)
def merge_classified_chars(path_in=CLASSIFY_OUT_PATH, path_out=MERGED_PATH):
'''
将已分类的文件夹合并
:param path_in:
:param path_out:
:return:
'''
path_in = r"D:\npq"
path_out = MERGED_PATH
sample_img_list_merged = None
class_num = 0
if os.path.exists(path_out):
rmtree(path_out)
total= len(os.listdir(path_in))
for i,file_name in enumerate(os.listdir(path_in)):
print("%d of %d" % (i, total))
file_char_path = path_in + os.path.sep + file_name
if i == 0:
sample_img_list_merged = np.load(file_char_path+os.path.sep+file_name+".npy")
class_num = sample_img_list_merged.shape[0]
for char_class_path_name in os.listdir(file_char_path):
input_folder_full_path = file_char_path + os.path.sep + char_class_path_name
out_folder_full_path = path_out + os.path.sep + char_class_path_name
if os.path.isdir(input_folder_full_path):
copytree(input_folder_full_path, out_folder_full_path)
else:
sample_img_list_new = np.load(file_char_path + os.path.sep + file_name + ".npy")
for j, img_new in enumerate(sample_img_list_new):
input_folder_full_path = file_char_path + os.path.sep + str(j)
for k, img_merged in enumerate(sample_img_list_merged):
out_folder_full_path = path_out + os.path.sep + str(k)
if cosine_similarity_2(img_new,img_merged) > TH:
for char_file_name in os.listdir(input_folder_full_path):
copyfile(input_folder_full_path + os.path.sep + char_file_name,out_folder_full_path+ os.path.sep + char_file_name)
break
else:
np.concatenate((sample_img_list_merged,[img_new]))
class_num += 1
out_folder_full_path = path_out + os.path.sep + str(class_num)
copytree(input_folder_full_path, out_folder_full_path)
def extract_top_n_sample(path_in=MERGED_PATH,path_out=SORTED_PATH,N = 500):
dir_list = []
for i, folder_name in enumerate(os.listdir(path_in)):
folder_path = path_in + os.path.sep + folder_name
num = len(os.listdir(folder_path))
dir_list.append((int(folder_name),num))
dir_list_new = sorted(dir_list,key=lambda x:x[1],reverse=True)
arr = np.asarray(dir_list_new)
arr_extract = arr[arr[:,1] > 2]
if os.path.exists(path_out):
rmtree(path_out)
os.mkdir(path_out)
for i, v in arr_extract:
src = path_in + os.path.sep + str(i)
dst = path_out + os.path.sep + str(i)
copytree(src,dst)
# 使用已经训练好的编码器对样本进行编码
def encode_sample(encoder_file_path="encoder.h5",sample_file_path=r"D:\data.npz"):
import keras
x_train = np.load(sample_file_path)['x_train']
x_train = np.reshape(x_train,(-1,60,60,1))
encoder = keras.models.load_model(encoder_file_path)
x_train_coded = encoder.predict(x_train)
np.savez("D:/encoded_data.npz",x_train_coded)
def predict_image_with_consine_similarity(encoder_data_file_path=r"D:\encoded_data.npz",
encoder_file_path="encoder.h5",
rec_input_path=r"D:\Users\Riolu\Desktop\新建文件夹 (2)"):
'''
通过余弦相似度 标记 rec_input_path 中的数据
:param encoder_data_file_path: 已经使用 encoder 编码的数据
:param encoder_file_path: encoder 模型文件
:param rec_input_path: 识别的目录下的文件
:return:
# TODO 将识别的文件归类
'''
import keras
unrecognize_picture_dir = "D:\\img_out"
x_train,y_train = np.load("D:/data.npz")['x_train'],np.load("D:/data.npz")['y_train']
encoder = keras.models.load_model(encoder_file_path)
t = len(os.listdir(rec_input_path))
utfcode_str_list = []
x_train_coded = np.load(encoder_data_file_path)['arr_0']
for i,file_name in enumerate(os.listdir(rec_input_path)):
full_path = rec_input_path + os.path.sep + file_name
img = io.imread(full_path,as_grey=True)
img_resize = transform.resize(util.invert(img),(60,60),mode='reflect')
img2 = np.reshape(img_resize,(1,60,60,1))
encoded_img = encoder.predict(img2)
lbs = []
for img_t in x_train_coded:
lbs.append(cosine_similarity_2(encoded_img,img_t))
mx = np.argmax(lbs)
tibetan_word_code = []
with open("words_Titan.txt") as f:
for line in f.readlines():
tibetan_word_code.append(line.strip())
if lbs[mx] > 0.9:
rec_str = tibetan_word_code[y_train[mx]]
print(rec_str,file_name)
utfcode_str_list.append(rec_str)
else:
utfcode_str_list.append('*')
io.imsave(unrecognize_picture_dir+os.path.sep+file_name,img)
# print("%d / %d" % (i,t))
print(utfcode_str_list)
def predict_classified_image(encoder_data_file_path=r"D:\encoded_data.npz",
encoder_file_path="encoder.h5",
rec_input_path=r"D:\img_test",
out_put_path = r"D:\img_out"
):
'''
用于分类已经经过初步分类好的未进行人工标记的数据。由于模型已经训练好,此方法可以不用
:param encoder_data_file_path:
:param encoder_file_path:
:param rec_input_path:
:param out_put_path:
:return:
'''
labeled_data = np.load("D:/data.npz")
x_train,y_train = labeled_data['x_train'],labeled_data['y_train']
import keras
encoder = keras.models.load_model(encoder_file_path)
input_folders = os.listdir(rec_input_path)
x_train_coded = np.load(encoder_data_file_path)['arr_0']
tibetan_word_code = []
with open("words_Titan.txt") as f:
for line in f.readlines():
tibetan_word_code.append(line.strip())
for i,folder in enumerate(input_folders):
img_folder_full_path = rec_input_path + os.sep + folder
img_name = os.listdir(img_folder_full_path)[0]
rec_img_path = rec_input_path + os.sep + folder + os.sep + img_name
img = io.imread(rec_img_path,as_grey=True)
img_resize = transform.resize(util.invert(img),(60,60),mode='reflect')
img2 = np.reshape(img_resize,(1,60,60,1))
encoded_img = encoder.predict(img2)
lbs = []
for img_t in x_train_coded:
lbs.append(cosine_similarity_2(encoded_img,img_t))
mx = np.argmax(lbs)
if lbs[mx] > 0.9:
out_classified_path = tibetan_word_code[y_train[mx]]
else:
out_classified_path = "unrecognized"
for img_file in os.listdir(img_folder_full_path):
img_full_path = os.path.join(img_folder_full_path, img_file)
img_out_put_path = os.path.join(out_put_path, out_classified_path)
if not os.path.exists(img_out_put_path):
os.mkdir(img_out_put_path)
copyfile(img_full_path, os.path.join(img_out_put_path, img_file))
print("%d of %d" % (i,len(input_folders)))
def classifiy_chars_by_pages(path_in='/home/lyx2/img_in',path_out=r"/home/lyx2/img_out"):
encoder_file_path = "./encoder.h5"
encoder_data_file_path = r"./encoded_data.npz"
labeled_data = np.load(r"./data.npz")
temp_folder = r'./temp__'
x_train, y_train = labeled_data['x_train'], labeled_data['y_train']
if os.path.exists(temp_folder):
rmtree(temp_folder)
os.mkdir(temp_folder)
batch_extract_char_2_file(char_out_path=temp_folder)
import keras
encoder = keras.models.load_model(encoder_file_path)
x_train_coded = np.load(encoder_data_file_path)['arr_0']
tibetan_word_code = []
with open("words_Titan.txt") as f:
for line in f.readlines():
tibetan_word_code.append(line.strip())
image_to_recog_list = os.listdir(temp_folder)
for i,image_file_name in enumerate(image_to_recog_list):
page_path = os.path.join(temp_folder,image_file_name)
t = len(os.listdir(page_path))
for j,img_name in enumerate(os.listdir(page_path)):
img_path = os.path.join(page_path,img_name)
charactor_img = io.imread(img_path,as_grey=True)
img_resize = transform.resize(util.invert(charactor_img), (60, 60), mode='reflect')
img2 = np.reshape(img_resize, (1, 60, 60, 1))
encoded_img = encoder.predict(img2)
lbs = []
for img_t in x_train_coded:
lbs.append(cosine_similarity_2(encoded_img, img_t))
mx = np.argmax(lbs)
if lbs[mx] > 0.9:
out_classified_path = tibetan_word_code[y_train[mx]]
else:
out_classified_path = "unrecognized"
img_out_put_path = os.path.join(path_out, out_classified_path)
if not os.path.exists(img_out_put_path):
os.mkdir(img_out_put_path)
img_out_put_path = os.path.join(img_out_put_path,img_name)
copyfile(img_path,img_out_put_path)
print("Page %d, %d of %d" % (i,j,t))
rmtree(temp_folder)
if __name__ == '__main__':
extract_exp_data_by_path_name(path_in=r'D:\Users\Riolu\Desktop\merged_ultimate2017-06-08',wrap_size=100)
# encode_sample()
# label_image_by_path_name()
# batch_extract_char_2_file()
# tibetan_word_code = []
# with open("words_Titan.txt") as f:
# for line in f.readlines():
# tibetan_word_code.append(line.strip())
#
# x_train,y_train = label_image_by_path_name("D:\labeled_data_4_exp",tibetan_word_code,True)
#
# img = x_train[0]
# label = y_train[0]
# plt.imshow(img,cmap='gray')
# plt.title(tibetan_word_code[label])
# plt.show()
| [
"[email protected]"
] | |
cf6e8391100bc3d21839923f4764aa442d4027d4 | 731f50dd747c488fab19d19f1502c5439c4ccfc1 | /feedback/reviews/migrations/0001_initial.py | 0ba05987a064a36d643b7bbd03491e038b7d684a | [] | no_license | deepakdeedar/Django | b710e0f34540fabd8e492fd854066f7b052920f5 | a8235217cfde06712a65717ad8849a5e5387370c | refs/heads/master | 2023-05-06T22:23:37.910663 | 2021-06-01T12:32:48 | 2021-06-01T12:32:48 | 361,150,332 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | # Generated by Django 3.1.7 on 2021-05-30 12:40
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_name', models.CharField(max_length=100)),
('review_text', models.TextField()),
('rating', models.IntegerField()),
],
),
]
| [
"[email protected]"
] | |
5e7d8d53d398f90c3415f6261c9339941249e345 | 3c266f16d058e8d404727d6844d456b05e918423 | /While loop.py | 97ad28f32c80d0163b17a2faa55ae189fef309b2 | [] | no_license | Tracker71/Astr119-Week-1 | 056b12fec309a7bbc761f8a743e7f92edcc33992 | 85c78083914cc07a7d745002f601079bd8044725 | refs/heads/master | 2020-05-05T01:26:33.691541 | 2019-04-24T15:22:19 | 2019-04-24T15:22:19 | 179,603,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | # -*- coding: utf-8 -*-
"""
while loop
consider object thrown up into the air
Find maximum height
"""
import numpy as np
import matplotlib.pyplot as plt
v0= 5 #m/s
g = 9.8
n = 2000 # time steps
a_t = np.linspace( 0, 1, n)
#computations
y = v0*a_t - .5*g*a_t**2
print( a_t)
print ( y)
# find max heght in while loop
i=1
# y[-1] last entry in array
while y[i] > y[i-1]:
largest_height = y[i]
i += 1
print( "max. height: %10.2f"%( largest_height))
plt.plot( a_t, y)
plt.show()
#To show graph better type in console "auto", to bring it back to console "inline" | [
"[email protected]"
] | |
6c67aca905fda078d0b41a88abc92769843fefb5 | 015efec4aff1d0daec5dca64b158014eca8899ac | /Amazon/critical_connections.py | 05db60184734036980bf32f667e442ec5d358818 | [] | no_license | sayali-nakashe/Practice_coding_questions | cccf0802d81e19d7575533eef912c6952df36c23 | d885366f2770bbc2abd9a1021769b4dd9bfefb54 | refs/heads/master | 2020-06-20T09:59:33.921810 | 2019-12-03T04:21:25 | 2019-12-03T04:21:25 | 197,086,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,427 | py | from collections import defaultdict
class Solution(object):
def criticalConnections(self, n, connections):
"""
:type n: int
:type connections: List[List[int]]
:rtype: List[List[int]]
"""
disc = [None for _ in range(n+1)]
low = [None for _ in range(n+1)]
graph = defaultdict(list)
for c in connections:
graph[c[0]].append(c[1])
graph[c[1]].append(c[0])
self.time = 0
bridges = []
def dfs(node, parent):
if disc[node] is None:
disc[node] = self.time
low[node] = self.time
self.time += 1
for n in graph[node]:
if disc[n] is None:
dfs(n, node)
if parent is not None:
l = min([low[i] for i in graph[node] if i!=parent]+[low[node]])
else:
l = min(low[i] for i in graph[node]+[low[node]])
low[node] = l
dfs(1, None)
for v in connections:
if disc[v[0]]<low[v[1]] or disc[v[1]]<low[v[0]]:
bridges.append(v)
return bridges
if __name__ == '__main__':
n = 9
edges = [[1, 2], [1, 3], [2, 3], [3, 4], [3, 6], [4, 5], [6, 7], [6, 9], [7, 8], [8, 9]]
solution = Solution()
print solution.criticalConnections(n, edges)
| [
"[email protected]"
] | |
ebf9c99ce045eb4ca45a1040dcbb3b7260122a6f | e46a34140916bd7b943c70535e2e87f290dd5724 | /ForestTest/ForestTest.py | e82c138654d854007a5987ee004b8ffb70281c92 | [] | no_license | Madi89/Fractal-Tree | db6a25a755bdfec747e875eae9ea10fca2ee6964 | 2c219f14e1b0127c7ce3fdbd8d5d0354e1f2469d | refs/heads/master | 2023-07-28T11:35:11.189106 | 2021-09-10T16:11:17 | 2021-09-10T16:11:17 | 405,140,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,206 | py | import turtle
turtle.title("Fractal tree") #changes the title of the TurtleScreen
turtle.bgcolor("black") #changes the background color of the TurtleScreen
a=turtle.Turtle() #initializes a variable 'a', which we use throughout the program to refer to turtle
a.lt(90) #the turtle rotates 90 degrees to the left (start position > after 90 degrees ^)
a.shape("turtle") #changes the shape of the turtle ("turtle","circle" etc.)
a.shapesize(0.8) #increase or decrease the size of the onscreen turtle
a.pencolor("blue") #changes the outline color of the turtle
a.fillcolor("black") #changes the fill color of the turtle
a.speed("fastest") #speed of our turtle cursor
#call the tree function inside the tree frunction
def tree(num):
while num>10:
a.fd(num) #the turtle moves forward (60 pixel)
a.lt(30) #turns 30 degrees to the left
tree(4*num/5)
a.rt(60) #turns 60 degrees to the right
tree(4*num/5)
a.lt(30) #turns 30 degrees to the left
a.bk(num) #the turtle moves backwards by distance
return
tree(60)
#stop execution
turtle.done() | [
"[email protected]"
] | |
8e3c8cb9ebe5542d32da283fe8b8d56411898e13 | 7d90d2ce27c6ee0af74391b09909edbd45fdc2f0 | /renix_py_api/api_gen/Dot3ahStartLoopBackCommand_Autogen.py | c2ad76c47f9cefd64511bdef10260bb7ad6ac909 | [] | no_license | gaoxingyu-hub/54testframework-master-e284 | d7ea0d4a715b65c8652430e963a86b9522a7237a | 57dd2197e7d91b8ad8fb2bd0e3503f10afa08544 | refs/heads/master | 2023-04-30T05:50:41.542402 | 2021-05-28T09:19:37 | 2021-05-28T09:19:37 | 309,922,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | """
Auto-generated File
Create Time: 2019-12-27 02:33:27
"""
from .ROMEnum_Autogen import *
from renix_py_api.renix_common_api import *
from renix_py_api import rom_manager
from .ROMCommand_Autogen import ROMCommand
@rom_manager.rom
class Dot3ahStartLoopBackCommand(ROMCommand):
def __init__(self, Dot3ahConfigs=None, **kwargs):
self._Dot3ahConfigs = Dot3ahConfigs # 802.3ah Protocol Configs
properties = kwargs.copy()
if Dot3ahConfigs is not None:
properties['Dot3ahConfigs'] = Dot3ahConfigs
# call base class function, and it will send message to renix server to create a class.
super(Dot3ahStartLoopBackCommand, self).__init__(**properties)
@property
def Dot3ahConfigs(self):
"""
get the value of property _Dot3ahConfigs
"""
return self._Dot3ahConfigs
@Dot3ahConfigs.setter
def Dot3ahConfigs(self, value):
self._Dot3ahConfigs = value
def _set_dot3ahconfigs_with_str(self, value):
tmp_value = value.strip()
if tmp_value.startswith('{'):
tmp_value = tmp_value[1:-1]
self._Dot3ahConfigs = tmp_value.split()
| [
"[email protected]"
] | |
548fe0be0c448638017bc8c742a173431fadef31 | 44cedd413bc869f67c0336aa8b69b5a3f12f16da | /www/cgi-bin/user.py | fddbfc03f5cfb43499ed44afb92a3d498fc4a890 | [] | no_license | ShampaSinha/PythonCGI-Automation-with-Ansible | 7a333bb65d8591d85de28f4511196ed8de8497c6 | 4dc4585cd23363eff627deffa19c046cb0886778 | refs/heads/master | 2020-12-26T10:45:39.978764 | 2020-01-31T18:00:38 | 2020-01-31T18:00:38 | 237,485,734 | 0 | 0 | null | 2020-01-31T18:00:39 | 2020-01-31T17:54:16 | Python | UTF-8 | Python | false | false | 402 | py | #!/usr/bin/python36
import subprocess
import cgi
print("context-type: text/html")
print()
var=cgi.FieldStorage()
a=var.getvalue("n")
b=var.getvalue("p")
x=subprocess.getoutput("sudo useradd {}".format(a))
print(x)
y=subprocess.getoutput('id {}'.format(a))
print(y)
z1=subprocess.getstatusoutput("echo '{}' | sudo passwd {} --stdin".format(b,a))
if(z1[0]==0):
print("It worked:\n")
else:
print("GOT")
| [
"[email protected]"
] | |
c6ec0ca892dcb0cde388ea4178e0c97c3f70b87f | ea089c8f997382ea0b09f5f08eb79e9a5f76707b | /tests/test_save.py | d1fe0584c69d87b8dc3e346ecf73a4e0535f1444 | [] | permissive | QPC-github/wextracto | 09489a07dde44583bbc07d19a8a06c22f0698b99 | 9c789b1c98d95a1e87dbedfd1541a8688d128f5c | refs/heads/master | 2023-07-06T08:26:06.647509 | 2017-10-24T18:54:38 | 2017-10-24T18:54:38 | 662,192,705 | 1 | 1 | BSD-3-Clause | 2023-07-04T14:54:07 | 2023-07-04T14:54:01 | null | UTF-8 | Python | false | false | 1,954 | py | import os
import errno
import pytest
from six import next, BytesIO
from wex.readable import EXT_WEXIN
#from wex.save import save_url_from_path, save_output_from_readable
#
#
#def read_chunks(readable, size=2**16):
# chunks = []
# while True:
# chunk = readable.read(size)
# if not chunk:
# break
# chunks.append(chunk)
# return b''.join(chunks)
#
#
#def test_save_url_from_path(tmpdir):
# url_from_path = save_url_from_path(tmpdir.strpath)
# url = url_from_path('http://httpbin.org/get?this=that')
# for readable in url.get():
# read_chunks(readable)
# files = [os.path.join(dirpath, filename)
# for dirpath, dirnames, filenames in os.walk(tmpdir.strpath)
# for filename in filenames]
# assert len(files) == 1
# assert os.path.basename(files[0]) == '0' + EXT_WEXIN
#
#
#def test_save_url_from_path_oserror(tmpdir):
# ro = tmpdir.mkdir('ro')
# st = os.stat(ro.strpath)
# os.chmod(ro.strpath, 0444)
# try:
# url_from_path = save_url_from_path(ro.strpath)
# url = url_from_path('http://httpbin.org/get?this=that')
# with pytest.raises(OSError) as excinfo:
# next(url.get())
# finally:
# # Need to change it back or else it won't get cleaned up :(
# os.chmod(ro.strpath, st.st_mode)
# assert excinfo.value.errno == errno.EACCES
#
#
#def test_save_output_from_readable(tmpdir):
# stdout = BytesIO()
# wexin = tmpdir.join('0.wexin')
# wexout = tmpdir.join('0.wexout')
# with wexin.open('w') as fp:
# fp.write('foo')
# with save_output_from_readable(wexin.open('r'), stdout) as write:
# write(('this', 'that'))
# assert wexout.open('r').read() == 'this\t"that"\n'
#
#
#def test_save_output_from_readable_no_name(tmpdir):
# stdout = BytesIO()
# readable = BytesIO('foo')
# with save_output_from_readable(readable, stdout) as write:
# write(('this', 'that'))
| [
"[email protected]"
] | |
68f874ebfa0efe7aa5cac94356e1bf0cfdd3e695 | c2cc46057130a2520c4b74b04204af77f5bdeea6 | /Assign3-kpca.py | 06c4939da94094c6e4b85799105e0484673f7961 | [] | no_license | yangs21/Data-Mining | 401e8a70ac85f02d48e33fafa99b46a0cd5194ab | 674a9210b9e2837fef7662141edd978cd394de6f | refs/heads/main | 2023-08-14T11:06:45.113216 | 2021-10-06T14:09:53 | 2021-10-06T14:09:53 | 414,228,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,238 | py | import pandas as pd
import numpy as np
import math
import seaborn as sns
import matplotlib.pyplot as plt
alpha = 0.95
'''
print("###### Linear Kernel ######")
# Load in entire dataset first:
data = pd.read_csv("energydata_complete.csv", sep=',', header=None)
# Convert the matrix into a matrix and drop the first and last columns:
data = np.array(data)
attribute = data[0, 1:28]
data = data[1:data.shape[0], 1:28]
data = data.astype('float32')
n = data.shape[0]
d = data.shape[1]
# Step1. Compute the linear kernel matrix:
n = 5000
K_linear = np.zeros([n, n], dtype=np.float32)
for i in range(0, n):
for j in range(0, n):
K_linear[i, j] = np.dot(data[i, :], data[j, :].transpose())
print("The linear kernel is:")
print(K_linear, "\n")
# Step2. Center the linear kernel matrix:
# Create the n*n identity matrix:
I = np.identity(n)
# Create the n*n 1's matrix:
one_matrix = np.ones([n, n], dtype=np.float32)
# Compute the centered linear kernel:
K_linear_center = (I - one_matrix/n) @ K_linear @ (I - one_matrix/n)
#print(K_linear_center.shape)
# Step3-4. Get the eigenvalues and eigenvectors of centered linear kernel matrix:
# Calculate the eigenvectors and eigen values:
eigen_vals, eigen_vecs = np.linalg.eig(K_linear_center)
# Sort the eigen values from largest to smallest:
idx = eigen_vals.argsort()[::-1]
eigen_vals = eigen_vals[idx]
eigen_vecs = eigen_vecs[:, idx]
#print(eigen_vals)
#print(eigen_vecs)
# Step5. Compute variance for each component:
unit_eigen_vals = eigen_vals/n
# Step6-7. Compute fraction of total variance:
# Compute fraction and choose dimensionality:
sum_eig = 0
for i in range(0, len(unit_eigen_vals)):
sum_eig = sum_eig + unit_eigen_vals[i]
frac = sum_eig/unit_eigen_vals.sum()
if (frac >= alpha):
r = i
break
print("In linear kernel PCA, the number of dimensions required to capture alpha=0.95 is:")
print("r = %d.\n" %(r+1))
print("The three dominant eigenvalues are %f, %f and %f.\n" %(unit_eigen_vals[0], unit_eigen_vals[1], unit_eigen_vals[2]))
# Step8. Get the first r unit eigenvectors that capture alpha=0.95:
unit_eigen_vecs = np.zeros([n, r+1], dtype=np.float32)
for i in range(0, r+1):
unit_eigen_vecs[:, i] = math.sqrt(1/eigen_vals[i]) * eigen_vecs[:, i]
#print(unit_eigen_vecs)
# Step 9. Get reduced basis for the first 2 PCs:
Cr = unit_eigen_vecs[:, 0:2]
print("The two dominant PCs are:")
print("u1:")
print(Cr[:, 0])
print("u2:")
print(Cr[:, 1], "\n")
# Step10. Get the reduced dimensionality data:
A_linear = np.empty([n, 2], dtype=np.float32)
A_linear = K_linear_center @ Cr
print("In linear kernel, the projected data onto the two dominant PCs is:")
print(A_linear)
# Created scattered plot for A_linear:
sns.scatterplot(x=A_linear[:, 0], y=A_linear[:, 1])
plt.title("Linear Kernel: Projection onto Two Dominant PCs")
plt.show()
'''
# -------------------------------------------------------------------------------------
# Apply regular PCA with covariance matrix
print("###### Regular PCA ######")
# Load in data file:
data = pd.read_csv("energydata_complete.csv", sep=',', header=None)
# Convert the matrix into a matrix and drop the first and last columns:
data = np.array(data)
attribute = data[0, 1:28]
data = data[1:data.shape[0], 1:28]
data = data.astype('float32')
# Calculate mean vector and centered data matrix:
nrows = data.shape[0]
ncols = data.shape[1]
mean_vector = np.sum(data, axis=0)/nrows
centered_data = data - mean_vector
# Calculate total variance:
squared_norm = np.empty(data.shape[0], dtype=np.float32)
for i in range(0, data.shape[0]):
norm = np.linalg.norm(centered_data[i, :])
squared_norm[i] = norm * norm
total_var = squared_norm.sum()/data.shape[0]
# Calculate the Covariance Matrix:
sum_inner = np.empty([ncols, ncols], dtype=np.float32)
for i in range(0, ncols):
for j in range(0, ncols):
zt = (centered_data[:, j]).transpose()
z = (centered_data[:, i])
sum_inner[i, j] = np.dot(z,zt)
cov = sum_inner/nrows
# Calculate the eigenvectors and eigen values:
eigen_vals, eigen_vecs = np.linalg.eig(cov)
# Sort the eigen values from largest to smallest:
idx = eigen_vals.argsort()[::-1]
eigen_vals = eigen_vals[idx]
eigen_vecs = eigen_vecs[:, idx]
# Compute fraction and choose dimensionality:
sum_eig = 0
for i in range(0, len(eigen_vals)):
sum_eig = sum_eig + eigen_vals[i]
frac = sum_eig/eigen_vals.sum()
if (frac >= alpha):
r = i
break
print("In regular PCA, the number of dimensions required to capture alpha=0.95 is:")
print("r = %d.\n" %(r+1))
# Generate reduced basis for the first two PCs:
Ur = eigen_vecs[:, 0:2]
# Get reduced dimensionality data:
A_regular = np.empty([nrows, 2], dtype=np.float32)
for i in range(0, nrows):
A_regular[i, :] = Ur.transpose() @ centered_data[i, :]
print("In regular PCA, the three largest eigenvalues are: %f, %f and %f.\n" %(eigen_vals[0], eigen_vals[1], eigen_vals[2]))
print("The two dominant regular PCs are:")
print(Ur, "\n")
print("In regular PCA, the projected data onto the two dominant PCs is:")
print(A_regular)
# Created scattered plot for regular PCA:
sns.scatterplot(x=A_regular[:, 0], y=A_regular[:, 1])
plt.title("Regular PCA: Projection onto Two Dominant PCs")
plt.show()
| [
"[email protected]"
] | |
e32f13276fd6abb002fc129f39f6daa2d9db5997 | 64e457b38f2a3f0db80e55543571eb07954302af | /kafka_server.py | d2081d5665acb0203fdb21028deeab2e5bb6416b | [] | no_license | CharlesJonah/SF-crime-statistics-with-spark-streaming-1 | a120e5915fea6c05f8425493e39b2943cad3545e | af83fc8a427ce861abab0359b716714160ca4450 | refs/heads/master | 2022-12-26T16:27:49.715591 | 2020-10-11T00:42:00 | 2020-10-11T00:42:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | import producer_server
def run_kafka_server():
# TODO get the json file path
input_file = "police-department-calls-for-service.json"
# TODO fill in blanks
producer = producer_server.ProducerServer(
input_file=input_file,
topic="sf.crime.statistics.topic",
bootstrap_servers="localhost:9092",
client_id=None
)
#if producer.bootstrap_connected():
# print("Connected to bootstrap")
return producer
def feed():
producer = run_kafka_server()
producer.generate_data()
if __name__ == "__main__":
feed()
| [
"[email protected]"
] | |
d7ed48e2f144dfdde2a16148fa870c38f09c3a64 | ba597f50f938dea39a0e980b8fbcca8749016996 | /app_main.py | 1f02414353bcd40e3f406b5354a1090254a74be5 | [] | no_license | zhu-dev/idcard_ocr | be00d03649f6b68b1ff8fdd5476f2090aa14d20a | 0314d28d593d62c1d02d2a1650115616eb8ae5f5 | refs/heads/master | 2022-12-06T23:04:16.393841 | 2020-03-22T06:34:22 | 2020-03-22T06:34:22 | 249,125,951 | 1 | 0 | null | 2022-11-22T04:59:20 | 2020-03-22T06:24:24 | Python | UTF-8 | Python | false | false | 3,581 | py | # -*- coding: utf-8 -*-
# import idcardocr
from PIL import Image, ImageTk
from PIL.ImageTk import PhotoImage
import findidcard
import recognizeidcard
import tkinter as tk
import cv2
def process(img_name):
try:
idfind = findidcard.findidcard()
idcard_img = idfind.find(img_name)
result_dict = recognizeidcard.idcardocr(idcard_img)
result_dict['error'] = 0
except Exception as e:
result_dict = {'error': 1}
print(e)
return result_dict
if __name__ == '__main__':
idcardimagepath = 'images/gfm.jpg'
# 实例化object,建立窗口window
window = tk.Tk()
# 给窗口的可视化起名字
window.title('身份证字符识别工具:高傅敏')
# 设定窗口的大小(长 * 宽)
window.geometry('700x700') # 这里的乘是小x
# 在图形界面上设定标签
title = tk.Label(window, text='身份证字符识别工具', bg='green', font=('Arial', 12), width=30, height=2).pack(side="top",
fill='x')
# 说明: bg为背景,font为字体,width为长,height为高,这里的长和高是字符的长和高,比如height=2,就是标签有2个字符这么高
img = Image.open(idcardimagepath)
photo = ImageTk.PhotoImage(img)
img_label = tk.Label(window, image=photo)
img_label.pack()
name_str = tk.StringVar()
nation_str = tk.StringVar()
sex_str = tk.StringVar()
birth_str = tk.StringVar()
address_str = tk.StringVar()
idnum_str = tk.StringVar()
name_label = tk.Label(window, textvariable=name_str, bg='green', font=('Arial', 12), width=30, height=2).pack(fill='x')
nation_label = tk.Label(window, textvariable=nation_str, bg='green', font=('Arial', 12), width=30, height=2).pack(fill='x')
sex_label = tk.Label(window, textvariable=sex_str, bg='green', font=('Arial', 12), width=30, height=2).pack(fill='x')
birth_label = tk.Label(window, textvariable=birth_str, bg='green', font=('Arial', 12), width=30, height=2).pack(fill='x')
address_label = tk.Label(window, textvariable=address_str, bg='green', font=('Arial', 12), width=30, height=2).pack(fill='x')
idnum_label = tk.Label(window, textvariable=idnum_str, bg='green', font=('Arial', 12), width=30, height=2).pack(fill='x')
info = process(idcardimagepath)
error = info['error']
if error == 0:
name = info['name']
nation = info['nation']
sex = info['sex']
birth = info['birth']
address = info['address']
idnum = info['idnum']
print('*' * 30)
print('姓名: ' + name)
print('民族: ' + nation)
print('性别: ' + sex)
print('生日: ' + birth)
print('地址: ' + address)
print('公民身份证号码: ' + idnum)
print('*' * 30)
name_str.set(name)
nation_str.set(nation)
sex_str.set(sex)
birth_str.set(birth)
address_str.set(address)
idnum_str.set(idnum)
else:
print(info)
#主窗口循环显示
window.mainloop()
# 注意,loop因为是循环的意思,window.mainloop就会让window不断的刷新,如果没有mainloop,就是一个静态的window,传入进去的值就不会有循环,mainloop就相当于一个很大的while循环,有个while,每点击一次就会更新一次,所以我们必须要有循环
# 所有的窗口文件都必须有类似的mainloop函数,mainloop是窗口文件的关键的关键。
| [
"[email protected]"
] | |
8c80086394b1c7e7b1dc6a5bd735aaea141e42e5 | 25a798b43dc891260f40011c3fd30a0a079ad76c | /detect.py | a806adeac6936145afd653ba671a72884211c499 | [] | no_license | fuyunguagua/watermark_sender | 473c8293d481a471c01f71c029ea5e48bfa75684 | 5329c9e5b6fa7b32a3a00e918f8fa9be16410deb | refs/heads/master | 2020-04-09T04:37:09.488937 | 2018-12-02T08:57:58 | 2018-12-02T08:57:58 | 160,029,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | try:
import scapy.all as scapy
except ImportError:
import scapy
try:
# This import works from the project directory
import scapy_http.http
except ImportError:
# If you installed this package via pip, you just need to execute this
from scapy.layers import http
from scapy.all import *
pcap = rdpcap('C:/Desktop/detect.pcap')[TCP]
for i in range(len(pcap)):
try:
raw = str(pcap[i][Raw])
# if 'a=' in raw and 'b=' in raw and 'c=' and 'd=' in raw:
if 'wangyang' in raw:
print pcap[i][Raw]
print pcap[i]['IP'].src,'----',pcap[i]['IP'].dst
# print(raw)
# print(raw[0:raw.find('a=')])
except:
continue
| [
"[email protected]"
] | |
29e4f545719e43f2a49b80ffdf16616925d10a45 | 2494b93425260afe2826b27cde13fe24fc8d85c7 | /main.py | d0ab5dfd951fff2bdd2394c9285337a4acc9350d | [] | no_license | ankituf/Speech-Recognition-System | 3dac44af5c2f96d3fb3a47704229a16901a57f1a | 011a3a7bde4285645a2d9162b18921e0534c3896 | refs/heads/master | 2021-01-23T08:34:12.681493 | 2017-09-05T22:38:05 | 2017-09-05T22:38:05 | 102,534,984 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | from tester import *
import os
word=[]
for file in os.listdir("test_files"):
if file.endswith(".wav"):
base=os.path.basename(file)
word.append(os.path.splitext(base)[0])
# print word
for i in range(0,len(word)):
filename = "test_files/"+word[i]+".wav"
print "said word :", word[i]
output=feedToNetwork(extractFeature(filename), testInit())
print ""
| [
"[email protected]"
] | |
803fc33371c41205e3598194d1c1f91265ad7010 | e0ce113811e5fc5e49c49d620284a54fbd7ed17b | /firstPythonScript.py | cded3dabb6e1817075ca2d4965a3a3b56ca13436 | [] | no_license | ztanverakul/SWC_Workshop | 9d6a37947cb67a617aa4ed0a104a8b4fc31524f7 | b38314a50af5bcb395eff722fb31e674d2f0a83d | refs/heads/master | 2020-05-29T16:34:54.319734 | 2019-05-29T21:21:52 | 2019-05-29T21:21:52 | 189,252,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | # Author: Zach Tan
# Date: 05/29/2019
# Description: First Python Script Workshop
# Collaborators: tsk tsk
import numpy as np
from math import pi, sqrt, cos
PRIMES = [2, 3, 5, 7, 11, 13, 17, 19]
def sumOfAllPrimes():
return sum(PRIMES)
if __name__ == "__main__":
x = sumOfAllPrimes()
print(x)
| [
"[email protected]"
] | |
76a61090fc74d8ed5225d817554cbcbc9dcb39b6 | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/client/gui/impl/lobby/mode_selector/tooltips/simply_format_tooltip.py | 26dbba5d74d1f6d2d0d538cdcda996ec56235958 | [] | no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 890 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/impl/lobby/mode_selector/tooltips/simply_format_tooltip.py
from gui.impl.gen import R
from gui.impl.pub import ViewImpl
from frameworks.wulf import ViewSettings
from gui.impl.gen.view_models.windows.simple_tooltip_content_model import SimpleTooltipContentModel
class SimplyFormatTooltipView(ViewImpl):
def __init__(self, header, body):
settings = ViewSettings(R.views.lobby.mode_selector.tooltips.SimplyFormatTooltip(), model=SimpleTooltipContentModel())
settings.args = (header, body)
super(SimplyFormatTooltipView, self).__init__(settings)
@property
def viewModel(self):
return super(SimplyFormatTooltipView, self).getViewModel()
def _onLoading(self, header, body):
self.viewModel.setHeader(header)
self.viewModel.setBody(body)
| [
"[email protected]"
] | |
23dda399322e76d00571fdb6a25bdae6312b289c | dec1c2390df41f93e43cea6d78aee433facfef6a | /XChfut/spiders/xchfut.py | 0e6335e5698ef18307fefd976afaa96d1e8eb2fc | [] | no_license | w376692784/XChfut | 183d8a716ce255a302ccc9daf2d9c05e091e8e00 | ef4d5f64c4c794894060a79287be87108c09572e | refs/heads/master | 2020-03-07T18:38:33.537361 | 2018-04-01T16:08:24 | 2018-04-01T16:08:24 | 127,646,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,968 | py | # -*- coding: utf-8 -*-
import scrapy
from XChfut.items import XchfutItem
class XchfutSpider(scrapy.Spider):
name = 'xchfut'
allowed_domains = ['xc.hfut.edu.cn']
# start_urls = ['http://xc.hfut.edu.cn/120/list']
baseURL = 'http://xc.hfut.edu.cn/120/list'
offset = 1
start_urls = [baseURL + str(offset) + '.htm']
print(start_urls)
def parse(self, response):
# print(response.body.decode('utf-8'))
url_list = response.xpath('//a[@class=" articlelist1_a_title "]')
# print(url_list)
for url in url_list:
# print(url.xpath('./@href').extract()[0])
if 'news.hfut.edu.cn' not in url.xpath('./@href').extract()[0]:
# print('http://xc.hfut.edu.cn/' + url.xpath('./@href').extract()[0])
yield scrapy.Request('http://xc.hfut.edu.cn/' + url.xpath('./@href').extract()[0]
,callback=self.parse_image)
# print(response.xpath('//a[@title="进入下一页"]/@disabled').extract())
if response.xpath('//a[@title="进入下一页"]/@disabled').extract() != ['disabled']:
self.offset += 1
yield scrapy.Request(self.baseURL + str(self.offset) + '.htm',callback=self.parse)
else:return
def parse_image(self,response):
item = XchfutItem()
# print(response.body.decode('utf-8'))
item['title_name'] = response.xpath('//h1[@class="atitle"]/text()').extract()[0].strip()
item['publish_data'] = response.xpath('//span[@class="posttime"]/text()').extract()[0][5:]
# print(item)
image_links = response.xpath('//p/img/@src | //span/img/@src').extract()
# print(image_links)
item['image_link'] = []
for image_link in image_links:
if '.jpg' in image_link or '.png' in image_link:
item['image_link'].append('http://xc.hfut.edu.cn'+image_link)
yield item
# print(item)
| [
"[email protected]"
] | |
952e9a66bd22c3f99c23a479012139aa46c0c990 | 4578b30c433510cf370d51475ec11cac9c3de1cb | /serpent/wamp_components/dashboard_api_component.py | d5d6ba83642cf2c066df0bb44b7f6cf853798e4b | [
"MIT"
] | permissive | SerpentAI/SerpentAI | 0a5b2d567b50388722c3a3c5152555ce94256c49 | 00a487dd088c6ca2528d025f3273c0a796efe210 | refs/heads/dev | 2023-03-08T14:14:07.171435 | 2020-05-22T22:34:09 | 2020-05-22T22:34:09 | 88,444,621 | 7,216 | 950 | MIT | 2020-07-15T00:41:35 | 2017-04-16T21:48:39 | Python | UTF-8 | Python | false | false | 5,508 | py | import asyncio
from autobahn.asyncio.wamp import ApplicationSession, ApplicationRunner
from autobahn.wamp.types import RegisterOptions, SubscribeOptions
from autobahn.wamp import auth
from serpent.config import config
from pony.orm import *
from serpent.dashboard.models import *
import json
class DashboardAPIComponent:
@classmethod
def run(cls):
print(f"Starting {cls.__name__}...")
url = "ws://%s:%s" % (config["crossbar"]["host"], config["crossbar"]["port"])
runner = ApplicationRunner(url=url, realm=config["crossbar"]["realm"])
runner.run(DashboardAPIWAMPComponent)
class DashboardAPIWAMPComponent(ApplicationSession):
def __init__(self, c=None):
super().__init__(c)
def onConnect(self):
self.join(config["crossbar"]["realm"], ["wampcra"], config["crossbar"]["auth"]["username"])
def onDisconnect(self):
print("Disconnected from Crossbar!")
def onChallenge(self, challenge):
secret = config["crossbar"]["auth"]["password"]
signature = auth.compute_wcs(secret.encode('utf8'), challenge.extra['challenge'].encode('utf8'))
return signature.decode('ascii')
async def onJoin(self, details):
@db_session
def list_dashboards():
dashboards = Dashboard.select(lambda d: True).order_by(lambda d: d.name)[:]
return {"dashboards": [dashboard.as_list_json() for dashboard in dashboards]}
@db_session
def fetch_dashboard(uuid):
dashboard = Dashboard.get(uuid=UUID(uuid))
if dashboard is None:
return {"error": f"No Dashboard found with uuid '{uuid}'..."}
return {"dashboard": dashboard.as_json()}
@db_session
def create_dashboard(dashboard_data):
dashboard = Dashboard.get(name=dashboard_data.get("name"))
if dashboard is not None:
return {"error": f"A Dashboard with name '{dashboard.name}' already exists..."}
dashboard = Dashboard.create(dashboard_data)
return {"dashboard": dashboard.as_list_json()}
@db_session
def delete_dashboard(uuid):
dashboard = Dashboard.get(uuid=UUID(uuid))
if dashboard is None:
return {"error": f"No Dashboard found with uuid '{uuid}'..."}
dashboard.delete()
commit()
return {"dashboard": None}
@db_session
def create_dashboard_metric(dashboard_uuid, metric_data):
dashboard = Dashboard.get(uuid=UUID(dashboard_uuid))
if dashboard is None:
return {"error": f"No Dashboard found with uuid '{dashboard_uuid}'..."}
metric = Metric(**{
**metric_data,
"dashboard": dashboard,
"x": 0,
"y": 0,
"w": 9,
"h": 5
})
commit()
return {"metric": metric.as_json()}
@db_session
def update_dashboard_metric(dashboard_uuid, metric_data):
dashboard = Dashboard.get(uuid=UUID(dashboard_uuid))
if dashboard is None:
return {"error": f"No Dashboard found with uuid '{dashboard_uuid}'..."}
metric_uuid = metric_data.pop("uuid")
metric = Metric.get(uuid=UUID(metric_uuid))
if metric is None:
return {"error": f"No Metric found with uuid '{metric_uuid}'..."}
metric.set(**metric_data)
commit()
return {"metric": metric.as_json()}
@db_session
def delete_dashboard_metric(uuid):
metric = Metric.get(uuid=UUID(uuid))
if metric is None:
return {"error": f"No Dashboard Metric found with uuid '{uuid}'..."}
metric.delete()
commit()
return {"metric": None}
@db_session
def save_dashboard_layout(uuid, layout):
dashboard = Dashboard.get(uuid=UUID(uuid))
if dashboard is None:
return {"error": f"No Dashboard found with uuid '{uuid}'..."}
dashboard.save_layout(layout)
return {"dashboard": dashboard.as_json()}
await self.register(list_dashboards, f"{config['crossbar']['realm']}.list_dashboards", options=RegisterOptions(invoke="roundrobin"))
await self.register(fetch_dashboard, f"{config['crossbar']['realm']}.fetch_dashboard", options=RegisterOptions(invoke="roundrobin"))
await self.register(create_dashboard, f"{config['crossbar']['realm']}.create_dashboard", options=RegisterOptions(invoke="roundrobin"))
await self.register(delete_dashboard, f"{config['crossbar']['realm']}.delete_dashboard", options=RegisterOptions(invoke="roundrobin"))
await self.register(create_dashboard_metric, f"{config['crossbar']['realm']}.create_dashboard_metric", options=RegisterOptions(invoke="roundrobin"))
await self.register(update_dashboard_metric, f"{config['crossbar']['realm']}.update_dashboard_metric", options=RegisterOptions(invoke="roundrobin"))
await self.register(delete_dashboard_metric, f"{config['crossbar']['realm']}.delete_dashboard_metric", options=RegisterOptions(invoke="roundrobin"))
await self.register(save_dashboard_layout, f"{config['crossbar']['realm']}.save_dashboard_layout", options=RegisterOptions(invoke="roundrobin"))
if __name__ == "__main__":
DashboardAPIComponent.run()
| [
"[email protected]"
] | |
455d0c34150b8e6b7ed81c6e60b50af4c973c050 | 2fb074eca893dd9ab67f430b8d8d142f7d565abd | /train.py | 7b1971074812f7540037070421fa8508d95e0a88 | [] | no_license | jireh-father/tensorflow-object-detection-framework | 63ec7225a68973ac024db90ed809dd91c6085151 | 21d2d849be31b28c7d2e34a6f589d501e1351699 | refs/heads/master | 2020-03-27T10:06:44.428006 | 2018-08-31T09:08:01 | 2018-08-31T09:08:01 | 146,396,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,470 | py | import tensorflow as tf
from data.pascal_voc import pascal_voc
from model import yolo
max_iter = 10000
pascal = pascal_voc('train')
inputs = tf.placeholder(tf.float32, [None, 448, 448, 3])
labels = tf.placeholder(tf.float32, [None, 7, 7, 5 + 20])
is_training = tf.placeholder(tf.bool, shape=(), name="is_training")
logits = yolo.build_model(inputs, is_training)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=labels))
predict = tf.argmax(logits, 1)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predict, tf.argmax(labels, 1)), tf.float32))
train = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=1.0).minimize(loss)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for step in range(1, max_iter):
tr_x, tr_y = pascal.get()
feed_dict = {inputs: tr_x, labels: tr_y, is_training: True}
_, accuracy_result, loss_result = sess.run([train, accuracy, loss], feed_dict={inputs: tr_x, labels: tr_y,
is_training: True})
print('%d train accuracy: %f, loss: %f' % (step, accuracy_result, loss_result))
if step % 100 == 0 and step > 0:
valid_x, valid_y = data_sets.validation.next_batch(batch_size)
valid_x = valid_x.reshape((batch_size, 28, 28, 1))
accuracy_result, loss_result = sess.run([accuracy, loss],
feed_dict={inputs: valid_x, labels: valid_y,
is_training: True})
print('%d validation accuracy: %f, loss: %f' % (step, accuracy_result, loss_result))
for step in range(1, max_iter + 1):
tr_x, tr_y = pascal.get()
feed_dict = {inputs: tr_x, labels: tr_y}
summary_str, loss, _ = self.sess.run(
[self.summary_op, self.net.total_loss, self.train_op],
feed_dict=feed_dict)
train_timer.toc()
log_str = '''{} Epoch: {}, Step: {}, Learning rate: {},'''
''' Loss: {:5.3f}\nSpeed: {:.3f}s/iter,'''
'''' Load: {:.3f}s/iter, Remain: {}'''.format(
datetime.datetime.now().strftime('%m-%d %H:%M:%S'),
self.data.epoch,
int(step),
round(self.learning_rate.eval(session=self.sess), 6),
loss,
train_timer.average_time,
load_timer.average_time,
train_timer.remain(step, self.max_iter))
print(log_str)
self.writer.add_summary(summary_str, step)
| [
"[email protected]"
] | |
97afb1da589398dcd9a82433e3d4866fe5bf319c | cb9f7b356bd0c06a88bf0ef3a3cabf6d7a8ecc03 | /tests/testSemiEmpiricalPrior.py | 57a03d12630f467bcd37d69ab58cfb352f7b7f5e | [] | no_license | jonathansick-shadow/meas_modelfit | ceace53cd8acf5723b860f80a4e8745e63a5857d | def2b87bb69b5d9cdd9d734520246b92a8a0ff29 | refs/heads/master | 2021-01-12T12:02:59.873544 | 2016-03-10T06:25:46 | 2016-03-10T06:25:46 | 45,875,371 | 0 | 0 | null | 2015-11-10T00:00:55 | 2015-11-10T00:00:55 | null | UTF-8 | Python | false | false | 4,792 | py | #!/usr/bin/env python
#
# LSST Data Management System
# Copyright 2008-2013 LSST Corporation.
#
# This product includes software developed by the
# LSST Project (http://www.lsst.org/).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the LSST License Statement and
# the GNU General Public License along with this program. If not,
# see <http://www.lsstcorp.org/LegalNotices/>.
#
import unittest
import os
import numpy
import lsst.utils.tests
import lsst.shapelet
import lsst.afw.geom.ellipses
import lsst.meas.modelfit
try:
import scipy.integrate
except ImportError:
scipy = None
numpy.random.seed(500)
lsst.pex.logging.Debug("meas.modelfit.SemiEmpiricalPrior", 10)
class SemiEmpiricalPriorTestCase(lsst.utils.tests.TestCase):
NUM_DIFF_STEP = 1E-4
def setUp(self):
# a prior with broad ramps and non-zero slope; broad ramps makes evaluating numerical
# derivatives easier, and we want to do that to check the analytic ones
self.ctrl = lsst.meas.modelfit.SemiEmpiricalPrior.Control()
self.ctrl.ellipticityCore = 4.0
self.ctrl.ellipticitySigma = 10.0
self.ctrl.logRadiusMinOuter = self.ctrl.logRadiusMinInner - 8.0
self.ctrl.logRadiusMu = 2.0
self.ctrl.logRadiusSigma = 5.0
self.ctrl.logRadiusNu = 2.0
self.prior = lsst.meas.modelfit.SemiEmpiricalPrior(self.ctrl)
self.amplitudes = numpy.array([1.0], dtype=lsst.meas.modelfit.Scalar)
dtype = numpy.dtype([("eta1", float), ("eta2", float), ("lnR", float), ("p", float),
("d_eta1", float), ("d_eta2", float), ("d_lnR", float),
("d2_eta1_eta1", float), ("d2_eta1_eta2", float),
("d2_eta1_lnR", float), ("d2_eta2_eta2", float),
("d2_eta2_lnR", float), ("d2_lnR_lnR", float)])
self.data = numpy.loadtxt("tests/data/SEP.txt", dtype=dtype)
def tearDown(self):
del self.prior
del self.amplitudes
def testEvaluate(self):
for row in self.data:
p = self.prior.evaluate(numpy.array([row["eta1"], row["eta2"], row["lnR"]]), self.amplitudes)
self.assertClose(row["p"], p)
def testGradient(self):
for row in self.data:
grad = numpy.zeros(4, dtype=float)
hess = numpy.zeros((4,4), dtype=float)
self.prior.evaluateDerivatives(
numpy.array([row["eta1"], row["eta2"], row["lnR"]]),
self.amplitudes,
grad[:3], grad[3:],
hess[:3,:3], hess[3:, 3:], hess[:3,3:]
)
self.assertClose(row["d_eta1"], grad[0])
self.assertClose(row["d_eta2"], grad[1])
self.assertClose(row["d_lnR"], grad[2])
def testHessian(self):
for row in self.data:
grad = numpy.zeros(4, dtype=float)
hess = numpy.zeros((4,4), dtype=float)
self.prior.evaluateDerivatives(
numpy.array([row["eta1"], row["eta2"], row["lnR"]]),
self.amplitudes,
grad[:3], grad[3:],
hess[:3,:3], hess[3:, 3:], hess[:3,3:]
)
self.assertClose(row["d2_eta1_eta1"], hess[0,0])
self.assertClose(row["d2_eta1_eta2"], hess[0,1])
self.assertClose(row["d2_eta1_lnR"], hess[0,2])
self.assertClose(row["d2_eta2_eta2"], hess[1,1])
self.assertClose(row["d2_eta2_lnR"], hess[1,2])
self.assertClose(row["d2_lnR_lnR"], hess[2,2])
def evaluatePrior(self, eta1, eta2, lnR):
b = numpy.broadcast(eta1, eta2, lnR)
p = numpy.zeros(b.shape, dtype=lsst.meas.modelfit.Scalar)
for i, (eta1i, eta2i, lnRi) in enumerate(b):
p.flat[i] = self.prior.evaluate(numpy.array([eta1i, eta2i, lnRi]), self.amplitudes)
return p
def suite():
"""Returns a suite containing all the test cases in this module."""
lsst.utils.tests.init()
suites = []
suites += unittest.makeSuite(SemiEmpiricalPriorTestCase)
suites += unittest.makeSuite(lsst.utils.tests.MemoryTestCase)
return unittest.TestSuite(suites)
def run(shouldExit=False):
"""Run the tests"""
lsst.utils.tests.run(suite(), shouldExit)
if __name__ == "__main__":
run(True)
| [
"[email protected]"
] | |
09d60b3f73b86b0125b906dfd570b06727b86988 | ffd9601a04a6b00c9a0f972a93837e536d93aad2 | /posts/tests.py | 371e51fc48dd2aac74edf4f6375d6bd3cdea90dc | [] | no_license | cb-demos/microblog-backend | c2d1f28c08596cde05496dda72ae07603c4ed330 | 127d47bcdefb25de57ba876a72c25826b1cc0af0 | refs/heads/master | 2023-02-05T06:16:02.593819 | 2023-01-27T18:24:51 | 2023-01-27T18:24:51 | 240,112,247 | 1 | 62 | null | 2023-01-27T18:24:53 | 2020-02-12T20:49:32 | Python | UTF-8 | Python | false | false | 1,023 | py | from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APITestCase
from .models import Post, User
class PostTestCase(TestCase):
def setUp(self) -> None:
User.objects.create(username="dinkleberg")
def test_create_post(self):
user = User.objects.get(username="dinkleberg")
Post.objects.create(user=user, message="Hello, world!")
post = Post.objects.get(user=user)
self.assertEqual(post.message, "Hello, world!")
class PostAPITests(APITestCase):
def setUp(self) -> None:
user = User.objects.create(username="dinkleberg")
Post.objects.create(user=user, message="Hello, universe!")
Post.objects.create(user=user, message="Hello, world!")
def test_get_posts(self):
url = reverse("post-list")
response = self.client.get(url, format="json")
self.assertEqual(response.data[1]["message"], "Hello, universe!")
self.assertEqual(response.data[0]["message"], "Hello, world!")
| [
"[email protected]"
] | |
bfa5ff371b08bf2ccfce79a077f18b3492b05cc8 | bad3aedebe6858ee5973e5ee218533027d42ec52 | /nedbankprivatewealth/spiders/spider.py | 234d865ab57a5dfaa2d47dfc93534f845f2a7199 | [] | no_license | hristo-grudev/nedbankprivatewealth | 2683106d16c1d1aade81a4494da66bc92cceb41c | ebfbadabb58e1b000af8f2df18981c010ace8237 | refs/heads/main | 2023-02-24T03:47:25.304128 | 2021-02-04T06:40:47 | 2021-02-04T06:40:47 | 335,863,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,176 | py | import scrapy
from scrapy.loader import ItemLoader
from ..items import NedbankprivatewealthItem
from itemloaders.processors import TakeFirst
class CentralbankSpider(scrapy.Spider):
name = 'nedbankprivatewealth'
start_urls = ['https://nedbankprivatewealth.com/insights/']
def parse(self, response):
post_links = response.xpath('//div[@class="jet-engine-listing-overlay-wrap"]/div/div/div/section/div/div/div/div/div/section//a')
yield from response.follow_all(post_links, self.parse_post)
def parse_post(self, response):
title = response.xpath('//h1//text()').get()
description = response.xpath('/html/body/div[2]/div/div/section[2]/div/div/div[2]/div/div//text()[normalize-space() and not(ancestor::table)]').getall()
description = [p.strip() for p in description]
description = ' '.join(description).strip()
date = response.xpath('//div[@class="jet-listing-dynamic-field__content"]/text()').get()
item = ItemLoader(item=NedbankprivatewealthItem(), response=response)
item.default_output_processor = TakeFirst()
item.add_value('title', title)
item.add_value('description', description)
item.add_value('date', date)
return item.load_item()
| [
"[email protected]"
] | |
bada4df5ac2615720beb0b77628fc1019d21639a | 41fa204c476bf466cb4fe9deb85843d7fdbd4c4b | /test_api/migrations/0004_post_image.py | 754f536d8970af03a4d3d07e7c2224fddd4cbc4a | [] | no_license | hagull/farm_deploy | 9506b1d6dcbc513e8c4d97e9c1c8ba477fc07dd2 | 3609a33949e9deb5742263e4dd5f6c783ea5fda8 | refs/heads/master | 2022-12-15T00:35:00.572945 | 2018-12-08T06:11:04 | 2018-12-08T06:11:04 | 160,857,990 | 0 | 0 | null | 2022-12-08T01:28:03 | 2018-12-07T17:56:19 | Python | UTF-8 | Python | false | false | 393 | py | # Generated by Django 2.0.7 on 2018-11-29 16:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('test_api', '0003_remove_post_photo'),
]
operations = [
migrations.AddField(
model_name='post',
name='image',
field=models.ImageField(blank=True, upload_to=''),
),
]
| [
"[email protected]"
] | |
e4b5c8363171b127cdc24bc75e373ff087a01603 | 40a8f14a73385118c442db9d2153674e64341428 | /hannah.py | 3d1aabe32be9092f7cb5e88826def25318b2fcd3 | [] | no_license | JoshNHales/Textgame | b1c6c9c7f3c9f3754dcc70cb1cfcb481737d092f | 534949be70f5a9245bed04ec397fffc3a1cfccfe | refs/heads/main | 2023-02-25T20:00:18.638835 | 2021-02-02T12:03:02 | 2021-02-02T12:03:02 | 335,274,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,719 | py | import time
def guards():
print ("You hide behind the trees and escape from sight of the guards, running toward a moat you noticed earlier.")
time.sleep(1)
print ("You can either swim away to hopefully escape faster, or slowly look around the water for more clues, which will inevitably waste more time.")
time.sleep(1)
look_or_swim = input("Will you either: A) look around or B) swim?")
if look_or_swim.upper() == "A":
moat()
elif look_or_swim.upper() == "B":
croc_attack()
else:
("Your input was invalid, try again!")
def moat():
print("You look around and your impeccible senses notice a strange ripple in the water...")
time.sleep(1)
print ("""
_.'^^'.
_ .-' ((@)) '. ./\/\/\/\/\/\,.---._
..'o'...-' ~~~ '~/\/\/\/\/\/\_.---. `-.
: /\/\/\/\,-' `-.__
^VvvvvvvvvvvVvVv | `-._
;^^^^^^^^^^^` / `\ / `-._
`````````
'.` `\ ( `'-._
.-----'` /\ \ )--.______.______._______/
(((------'`` '--------'(((----'
""")
print ("AAAAAHHHHHH IT'S A CROCODILEEEEE")
def crocodile():
print ("You panic at the sight of the beast before you and realise that there are only 3 options to choose from.")
time.sleep(1)
print ("You can either A) RUN FOR YOUR LIFE, B) Distract the creatures whilst you flee or C) Sneak past them quietly.")
time.sleep(1)
run_distract_sneak = input ("Will you choose A, B or C?")
if run_distract_sneak.upper() == "A":
print("The crocodiles are too fast for you and gobble you up before you can escape! GAME OVER")
elif run_distract_sneak.upper() == "B":
print ("You find a secret doorway which leads to your freedom")
elif run_distract_sneak.upper() == "C":
print ("You find an empty room with a note saying 'you can't run from your past...' The door suddenly slams shut and you black out.")
else:
("Your input was invalid, try again!")
def doors() :
print ("You notice 3 doors in front of you, each with a mysterious marking etched on the front of it. Door 1 has a sun marking, door 2 a moon marking and door 3 a star marking.")
door_picked = input ("Do you choose either door 1, 2 or 3? >")
if door_picked == "1":
print ("You picked door number 1...")
elif door_picked == "2":
print ("You picked door number 2...")
elif door_picked == "3":
print ("You picked door number 3...")
def room_1():
print ("A sliver of light appears through the doorframe - could it be? As the door opens you see the outside world and run to freedom. YOU WIN")
def room_2():
print("The room is filled with an abundance of letters and images.")
print("Upon looking closer you realise that these letters are written in handwriting very familiar to your own.")
print("How peculiar!")
print("Horror dawns on you as you come to learn you were part of a top secret organisation.")
print("One letter in particular highlights that you have had your memory wiped by them so you don't reveal any of their secrets.")
print("The secret agency speak to you through microphones attached to the wall - they have been watching you the entire time.")
print("'You were never supposed to find out our plans...'")
print("The room is blown up to destroy all evidence, including you. GAME OVER")
def room_3():
print ("You are back where you started in the basement where you first awoke.")
guards() | [
"[email protected]"
] | |
5efe7c96c849f2e88108d81bac384cdf96a38647 | be7ab772fff390e7ec289b3b757a96f192c5a006 | /Quantum_Chemistry_Module/Quantum_Chemistry_Module/integrals.py | 7977071f6799b1c187afa192e1c9016113260cd9 | [] | no_license | adabbott/Quantum-Chemistry-Codes | 7942083047c397928cc05999c034fbc5226156b0 | a4d0c88b2b420406344f923a8303372ebbb2237e | refs/heads/master | 2020-12-03T00:02:37.975709 | 2017-10-09T19:57:45 | 2017-10-09T19:57:45 | 95,979,183 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,753 | py | import psi4
import numpy as np
import molecule
def obara_saika_recursion(PA, PB, alpha, AMa, AMb):
"""
Performs Obara-Saika recursion routine to fill in all integral cartesian components recursively
Used to construct overlap, kinetic, and dipole integral x, y, z component arrays
Parameters
----------
PA: wighted coordinate vector on atom A
PB: weighted coordinate vector on atom B
alpha: orbital exponent
AMa: angular momentum of A
AMb: angular momentum of B
"""
if len(PA) != 3 or len(PB) != 3:
raise "PA and PB must be xyz coordinates."
# Allocate space x, y, and z matrices
# We add one because the equation for the kinetic energy
# integrals require terms one beyond those in the overlap
x = np.zeros((AMa + 1, AMb + 1))
y = np.zeros((AMa + 1, AMb + 1))
z = np.zeros((AMa + 1, AMb + 1))
# Define 1/2alpha factor for convenience
oo2a = 1.0 / (2.0 * alpha)
# Set initial conditions (0a|0b) to 1.0 for each cartesian component
x[0, 0] = y[0, 0] = z[0, 0] = 1.0
# BEGIN RECURSION
# Fill in the [0,1] position with PB
if AMb > 0:
x[0, 1] = PB[0]
y[0, 1] = PB[1]
z[0, 1] = PB[2]
# Fill in the rest of row zero
for b in range(1, AMb):
x[0, b + 1] = PB[0] * x[0, b] + b * oo2a * x[0, b - 1]
y[0, b + 1] = PB[1] * y[0, b] + b * oo2a * y[0, b - 1]
z[0, b + 1] = PB[2] * z[0, b] + b * oo2a * z[0, b - 1]
# Now, we have for each cartesian component
# | 1.0 PB # #|
# | 0 0 0 0|
# | 0 0 0 0|
# | 0 0 0 0|
# Upward recursion in a for all b's
# Fill in the [1,0] position with PA
if AMa > 0:
x[1, 0] = PA[0]
y[1, 0] = PA[1]
z[1, 0] = PA[2]
# Now, we have for each cartesian component
# | 1.0 PB # #|
# | PA 0 0 0|
# | 0 0 0 0|
# | 0 0 0 0|
# Fill in the rest of row one
for b in range(1, AMb + 1):
x[1, b] = PA[0] * x[0, b] + b * oo2a * x[0, b - 1]
y[1, b] = PA[1] * y[0, b] + b * oo2a * y[0, b - 1]
z[1, b] = PA[2] * z[0, b] + b * oo2a * z[0, b - 1]
# Now, we have for each cartesian component
# | 1.0 PB # #|
# | PA # # #|
# | 0 0 0 0|
# | 0 0 0 0|
# Fill in the rest of column 0
for a in range(1, AMa):
x[a + 1, 0] = PA[0] * x[a, 0] + a * oo2a * x[a - 1, 0]
y[a + 1, 0] = PA[1] * y[a, 0] + a * oo2a * y[a - 1, 0]
z[a + 1, 0] = PA[2] * z[a, 0] + a * oo2a * z[a - 1, 0]
# Now, we have for each cartesian component
# | 1.0 PB # #|
# | PA # # #|
# | # 0 0 0|
# | # 0 0 0|
# Fill in the rest of the a'th row
for b in range(1, AMb + 1):
x[a + 1, b] = PA[0] * x[a, b] + a * oo2a * x[a - 1, b] + b * oo2a * x[a, b - 1]
y[a + 1, b] = PA[1] * y[a, b] + a * oo2a * y[a - 1, b] + b * oo2a * y[a, b - 1]
z[a + 1, b] = PA[2] * z[a, b] + a * oo2a * z[a - 1, b] + b * oo2a * z[a, b - 1]
# Now, we have for each cartesian component
# | 1.0 PB # #|
# | PA # # #|
# | # # # #|
# | # # # #|
# Return the results
return (x, y, z)
def compute_integrals(mol):
"""
Computes the integrals for hartree fock
Parameters
----------
A Molecule object
Returns
-------
Overlap, Kinetic, Potential, and 2-electron integrals in a tuple
"""
basis = mol.basis
# make space to store the overlap, kinetic, and dipole integral matrices
S = np.zeros((basis.nao(),basis.nao()))
T = np.zeros((basis.nao(),basis.nao()))
Dx = np.zeros((basis.nao(),basis.nao()))
Dy = np.zeros((basis.nao(),basis.nao()))
Dz = np.zeros((basis.nao(),basis.nao()))
# loop over the shells, basis.nshell is the number of shells
for i in range(basis.nshell()):
for j in range(basis.nshell()):
# basis.shell is a shell (1s, 2s, 2p, etc.)
# for water, there are 5 shells: (H: 1s, H: 1s, O: 1s, 2s, 2p)
ishell = basis.shell(i)
jshell = basis.shell(j)
# each shell has some number of primitives which make up each component of a shell
# sto-3g has 3 primitives for every component of every shell.
nprimi = ishell.nprimitive
nprimj = jshell.nprimitive
# loop over the primitives within a shell
for a in range(nprimi):
for b in range(nprimj):
expa = ishell.exp(a) # exponents
expb = jshell.exp(b)
coefa = ishell.coef(a) # coefficients
coefb = jshell.coef(b)
AMa = ishell.am # angular momenta
AMb = jshell.am
# defining centers for each basis function
# mol.x() returns the x coordinate of the atom given by ishell.ncenter
# we use this to define a coordinate vector for our centers
A = np.array([mol.geometry.x(ishell.ncenter), mol.geometry.y(ishell.ncenter), mol.geometry.z(ishell.ncenter)])
B = np.array([mol.geometry.x(jshell.ncenter), mol.geometry.y(jshell.ncenter), mol.geometry.z(jshell.ncenter)])
alpha = expa + expb
zeta = (expa * expb) / alpha
P = (expa * A + expb * B) / alpha
PA = P - A
PB = P - B
AB = A - B
start = (np.pi / alpha)**(3 / 2) * np.exp(-zeta * (AB[0]**2 + AB[1]**2 + AB[2]**2))
# call the recursion
x, y, z = obara_saika_recursion(PA, PB, alpha, AMa+1, AMb+1)
# Basis function index where the shell begins
i_idx = ishell.function_index
j_idx = jshell.function_index
# We use counters to keep track of which component (e.g., p_x, p_y, p_z)
# within the shell we are on
counta = 0
for p in range(AMa + 1):
la = AMa - p # Let l take on all values, and p be the leftover a.m.
for q in range(p + 1):
ma = p - q # distribute all leftover a.m. to m and n
na = q
countb = 0
for r in range(AMb + 1):
lb = AMb - r # Let l take on all values, and r the leftover a.m.
for s in range(r + 1):
mb = r - s # distribute all leftover a.m. to m and n
nb = s
S[i_idx + counta, j_idx + countb] += start \
* coefa \
* coefb \
* x[la,lb] \
* y[ma,mb] \
* z[na,nb]
Tx = (1 / 2) * (la * lb * x[la - 1, lb - 1] + 4 * expa * expb * x[la + 1, lb + 1] \
- 2 * expa * lb * x[la + 1, lb - 1] - 2 * expb * la * x[la - 1, lb + 1]) \
* y[ma, mb] * z[na, nb]
Ty = (1 / 2) * (ma * mb * y[ma - 1, mb - 1] + 4 * expa * expb * y[ma + 1, mb + 1] \
- 2 * expa * mb * y[ma + 1, mb - 1] - 2 * expb * ma * y[ma - 1, mb + 1]) \
* x[la, lb] * z[na, nb]
Tz = (1 / 2) * (na * nb * z[na - 1, nb - 1] + 4 * expa * expb * z[na + 1, nb + 1] \
- 2 * expa * nb * z[na + 1, nb - 1] - 2 * expb * na * z[na - 1, nb + 1]) \
* x[la, lb] * y[ma, mb]
T[i_idx + counta, j_idx + countb] += start * coefa * coefb * (Tx + Ty + Tz)
dx = (x[la + 1, lb] + A[0] * x[la, lb]) * y[ma, mb] * z[na, nb]
dy = (y[ma + 1, mb] + A[1] * y[ma, mb]) * x[la, lb] * z[na, nb]
dz = (z[na + 1, nb] + A[2] * z[na, nb]) * x[la, lb] * y[ma, mb]
Dx[i_idx + counta, j_idx + countb] += start * coefa * coefb * dx
Dy[i_idx + counta, j_idx + countb] += start * coefa * coefb * dy
Dz[i_idx + counta, j_idx + countb] += start * coefa * coefb * dz
countb += 1
counta += 1
mints = psi4.core.MintsHelper(basis)
# Repulsion and two electron integrals are too hard to implement efficiently. Give up.
V = mints.ao_potential().to_array()
I = mints.ao_eri().to_array()
return S, T, V, I
| [
"[email protected]"
] | |
b918dc04443cb9a0487a0dc88494dc203026ba04 | 3670f46666214ef5e1ce6765e47b24758f3614a9 | /oneflow/python/onnx/flow2onnx.py | eb6a28b9f6f89bc89bd0997f5913badf7895c364 | [
"Apache-2.0"
] | permissive | ashing-zhang/oneflow | 0b8bb478ccd6cabea2dca0864defddab231919bf | 70db228a4d361c916f8f8d85e908795b479e5d20 | refs/heads/master | 2022-12-14T21:13:46.752535 | 2020-09-07T03:08:52 | 2020-09-07T03:08:52 | 293,535,931 | 1 | 0 | Apache-2.0 | 2020-09-07T13:28:25 | 2020-09-07T13:28:24 | null | UTF-8 | Python | false | false | 11,725 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
# oneflow.python.onnx.oneflow.python.onnx - rewrite oneflow graph to onnx graph
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import collections
import itertools
import logging
import os
import os.path
import sys
import traceback
from typing import Text, Optional, Dict, Callable, List
import numpy as np
from onnx import helper, onnx_pb
import oneflow
import oneflow.python.framework.c_api_util as c_api_util
import oneflow.python.framework.session_context as session_ctx
from oneflow.python.oneflow_export import oneflow_export
import oneflow.python.onnx
from oneflow.python.onnx import constants, schemas, util, handler, optimizer
from oneflow.python.onnx.graph import Graph
import oneflow.python.onnx.onnx_opset # pylint: disable=unused-import
logger = logging.getLogger(__name__)
def FlowToOnnxNaive(graph, shape_override):
"""
Convert node from oneflow format to onnx format.
Convert the oneflow nodes into an onnx graph with minimal rewrites so
we can use the onnx graph as intermediate graph.
The input/output/attr of each node are kept here and will be converted in other
following functions.
"""
dtypes = {}
for lbn in graph.helper.lbn2logical_blob_desc:
lbd = graph.helper.lbn2logical_blob_desc[lbn]
if lbn not in shape_override:
shape_override[lbn] = list(lbd.body.shape.dim)
dtypes[lbn] = util.Flow2OnnxDtype(lbd.body.data_type)
# some stats
op_cnt = collections.Counter()
attr_cnt = collections.Counter()
onnx_nodes = []
def is_user_op(node):
return node.WhichOneof("op_type") == "user_conf"
def get_op_conf(node):
conf_type = node.WhichOneof("op_type")
conf = getattr(node, conf_type)
return conf
def get_op_type(node):
if is_user_op(node):
return node.user_conf.op_type_name
return node.WhichOneof("op_type")[:-5]
def get_inputs(node):
if is_user_op(node):
ibns = handler.flow_op.ibn4op_type(get_op_type(node))
if ibns is None:
return list(
itertools.chain(*[x.s for x in node.user_conf.input.values()])
)
ipts = []
for ibn in ibns:
for key, val in node.user_conf.input.items():
if key == ibn:
assert len(val.s) == 1
ipts.append(val.s[0])
break
else:
raise ValueError(
"ibn {} of node {} (type {}) not found".format(
ibn, node.name, get_op_type(node)
)
)
return ipts
else:
conf = get_op_conf(node)
# it cannot cover all legacy op but it's enough
if hasattr(conf, "in"):
op_in = getattr(conf, "in")
if isinstance(op_in, str):
return [op_in]
else:
return op_in
else:
return []
def get_outputs(node):
if is_user_op(node):
obns = handler.flow_op.obn4op_type(get_op_type(node))
if obns is None:
assert all([len(x.s) == 1 for x in node.user_conf.output.values()])
return [x.s[0] for x in node.user_conf.output.values()]
outputs = []
for obn in obns:
for key, val in node.user_conf.output.items():
if key == obn:
assert len(val.s) == 1
outputs.append(val.s[0])
break
else:
raise ValueError(
"obn {} of node {} (type {}) not found".format(
obn, node.name, get_op_type(node)
)
)
else:
conf = get_op_conf(node)
# it cannot cover all legacy op but it's enough
if hasattr(conf, "out"):
out = getattr(conf, "out")
if isinstance(out, str):
outputs = [out]
else:
outputs = out
else:
outputs = []
outputs = ["{}/{}".format(node.name, output) for output in outputs]
return outputs
# minimal conversion of attributes
for node in graph.net.op:
attr = {}
op_cnt[get_op_type(node)] += 1
attrs = node.user_conf.attr.keys() if is_user_op(node) else []
for a in attrs:
attr_cnt[a] += 1
if a == "dtype":
attr[a] = util.Flow2OnnxDtype(util.get_flow_node_attr(node, "dtype"))
else:
attr[a] = util.get_flow_node_attr(node, a)
try:
op_type = get_op_type(node)
input_names = get_inputs(node)
output_names = get_outputs(node)
onnx_node = helper.make_node(
op_type, input_names, output_names, name=node.name, **attr
)
onnx_nodes.append(onnx_node)
except Exception as ex:
logger.error("pass1 convert failed for %s, ex=%s", node, ex)
raise
return onnx_nodes, op_cnt, attr_cnt, dtypes, shape_override
def FlowOnnxMapping(g, ops_mapping):
logger.debug("Mapping Oneflow node to ONNX node(s)")
mapped_op = collections.Counter()
unmapped_op = collections.Counter()
exceptions = []
ops = list(g.get_nodes())
for node in ops:
logger.debug("Process node: %s\n%s", node.name, node.summary)
if node.skip_conversion:
logger.debug("explicitly skip node " + node.name)
continue
op = node.type
map_info = ops_mapping.get(op)
if map_info is None:
unmapped_op[op] += 1
logger.error("oneflow op [%s: %s] is not supported", node.name, op)
continue
mapped_op[op] += 1
func, onnx_op, kwargs = map_info
if onnx_op is not None:
node.type = onnx_op
try:
func(g, node, **kwargs)
node.skip_conversion = True
except Exception as ex:
logger.error(
"Failed to convert node %s\n%s", node.name, node.summary, exc_info=1
)
exceptions.append(ex)
return mapped_op, unmapped_op, exceptions
def TopologicalSort(g, continue_on_error):
ops = g.get_nodes()
if not continue_on_error:
g.TopologicalSort(ops)
else:
try:
g.TopologicalSort(ops)
except: # pylint: disable=bare-except
# if we continue on error, ignore graph cycles so we can report all missing ops
pass
@session_ctx.try_init_default_session
@oneflow_export("onnx.export")
def Export(
job_func: Callable,
model_save_dir: Text,
onnx_filename: Text,
continue_on_error: bool = False,
opset: Optional[int] = None,
extra_opset: Optional[int] = None,
shape_override: Optional[Dict[Text, List[int]]] = None,
external_data: bool = False,
):
r"""Export a oneflow model into ONNX format.
Args:
job_func: The job function
model_save_dir: The directory containing oneflow model weights. Users are expected to call check_point.save(dir), wait for the model saving finishing, and pass the argument 'dir' as model_save_dir.
onnx_filename: a string for the output filename
continue_on_error: if an op can't be processed (aka there is no mapping), continue
opset: the opset to be used (int, default is oneflow.python.onnx.constants.PREFERRED_OPSET)
extra_opset: list of extra opset's, for example the opset's used by custom ops
shape_override: dict with inputs that override the shapes given by oneflow
external_data: Save weights as ONNX external data, usually to bypass the 2GB file size limit of protobuf.
"""
assert os.getenv("ENABLE_USER_OP") != "False"
assert os.path.isdir(model_save_dir)
job_set = c_api_util.GetJobSet()
job_name = job_func.__name__
for job in job_set.job:
if job.job_conf.job_name == job_name:
onnx_graph = ProcessFlowGraph(
job,
model_save_dir,
continue_on_error=continue_on_error,
opset=opset,
extra_opset=extra_opset,
shape_override=shape_override,
)
onnx_graph = optimizer.OptimizeGraph(onnx_graph)
model_proto = onnx_graph.MakeModel(
job_name, onnx_filename, external_data=external_data
)
with open(onnx_filename, "wb") as f:
try:
f.write(model_proto.SerializeToString())
except ValueError as e:
raise ValueError(
"Error occured when running model_proto.SerializeToString(). If the model is larger than 2GB, please specify external_data=True when calling flow.onnx.export. Original error message:\n{}".format(
e
)
)
return
raise ValueError('Cannot find job "{}" in jobset'.format(job_name))
def ProcessFlowGraph(
flow_graph,
model_save_dir,
continue_on_error=False,
opset=None,
extra_opset=None,
shape_override=None,
):
opset = util.FindOpset(opset)
logger.info("Using opset <onnx, %s>", opset)
if opset > schemas.get_max_supported_opset_version():
logger.warning(
"Currently installed onnx package %s is too low to support opset %s, "
"please upgrade onnx package to avoid potential conversion issue.",
util.get_onnx_version(),
opset,
)
if shape_override is None:
shape_override = {}
(onnx_nodes, op_cnt, attr_cnt, dtypes, output_shapes,) = FlowToOnnxNaive(
flow_graph, shape_override
)
g = Graph(onnx_nodes, model_save_dir, output_shapes, dtypes, opset, extra_opset,)
# create ops mapping for the desired opsets
ops_mapping = handler.flow_op.CreateMapping(g.opset, g.extra_opset)
# some nodes may already copied into inner Graph, so remove them from main Graph.
TopologicalSort(g, continue_on_error)
mapped_op, unmapped_op, exceptions = FlowOnnxMapping(g, ops_mapping)
if unmapped_op:
logger.error("Unsupported ops: %s", unmapped_op)
if exceptions and not continue_on_error:
raise exceptions[0]
# onnx requires topological sorting
TopologicalSort(g, continue_on_error)
g.UpdateProto()
logger.debug(
"Summay Stats:\n"
"\toneflow ops: {}\n"
"\toneflow attr: {}\n"
"\tonnx mapped: {}\n"
"\tonnx unmapped: {}".format(op_cnt, attr_cnt, mapped_op, unmapped_op)
)
return g
| [
"[email protected]"
] | |
39413d80c0324f3a64e68c37da15be92d9800182 | 4b2b37d70dbc7959a340736d2b5dcf5a2d53c19f | /data/CurrencySymbols.py | 18d2f1c0195755417198f6f8ff7e9e31a3f01a39 | [] | no_license | brunoqs/world-cities-mongodb | de5a16fa41d31bcc32df80d90a2f06f5723a4bb1 | b84ac4068d70ce613377e2dce3888e14e91886a9 | refs/heads/master | 2022-04-19T19:58:29.452822 | 2020-03-26T19:28:24 | 2020-03-26T19:30:32 | 250,352,313 | 0 | 0 | null | 2020-03-26T19:26:47 | 2020-03-26T19:26:46 | null | UTF-8 | Python | false | false | 2,286 | py | CURRENCY_SYMBOLS = {
"AED": "د.إ", "AFN": "؋", "ALL": "L", "AMD": "դր.", "ANG": "ƒ", "AOA": "Kz",
"ARS": "$", "AUD": "$", "AWG": "ƒ", "AZN": "m",
"BAM": "KM", "BBD": "$", "BDT": "৳", "BGN": "лв", "BHD": "ب.د", "BIF": "Fr",
"BMD": "$", "BND": "$", "BOB": "Bs.", "BRL": "R$", "BSD": "$", "BTN": "Nu",
"BWP": "P", "BYR": "Br", "BZD": "$",
"CAD": "$", "CDF": "Fr", "CHF": "Fr", "CLP": "$", "CNY": "¥", "COP": "$",
"CRC": "₡", "CUP": "$", "CVE": "$, Esc", "CZK": "Kč",
"DJF": "Fr", "DKK": "kr", "DOP": "$", "DZD": "د.ج",
"EEK": "KR", "EGP": "£,ج.م", "ERN": "Nfk", "ETB": "Br", "EUR": "€",
"FJD": "$", "FKP": "£",
"GBP": "£", "GEL": "ლ", "GHS": "₵", "GIP": "£", "GMD": "D", "GNF": "Fr",
"GTQ": "Q", "GYD": "$",
"HKD": "$", "HNL": "L", "HRK": "kn", "HTG": "G", "HUF": "Ft",
"IDR": "Rp", "ILS": "₪", "INR": "₨", "IQD": "ع.د", "IRR": "﷼", "ISK": "kr",
"JMD": "$", "JOD": "د.ا", "JPY": "¥",
"KES": "Sh", "KGS": "лв", "KHR": "៛", "KMF": "Fr", "KPW": "₩", "KRW": "₩",
"KWD": "د.ك", "KYD": "$", "KZT": "Т",
"LAK": "₭", "LBP": "ل.ل", "LKR": "ரூ", "LRD": "$", "LSL": "L", "LTL": "Lt",
"LVL": "Ls", "LYD": "ل.د",
"MAD": "د.م.", "MDL": "L", "MGA": "Ar", "MKD": "ден", "MMK": "K",
"MNT": "₮", "MOP": "P", "MRO": "UM", "MUR": "₨", "MVR": "ރ.", "MWK": "MK",
"MXN": "$", "MYR": "RM", "MZN": "MT",
"NAD": "$", "NGN": "₦", "NIO": "C$", "NOK": "kr", "NPR": "₨", "NZD": "$",
"OMR": "ر.ع.",
"PAB": "B/.", "PEN": "S/.", "PGK": "K", "PHP": "₱", "PKR": "₨", "PLN": "zł",
"PYG": "₲",
"QAR": "ر.ق",
"RON": "RON", "RSD": "RSD", "RUB": "р.", "RWF": "Fr",
"SAR": "ر.س", "SBD": "$", "SCR": "₨", "SDG": "S$", "SEK": "kr", "SGD": "$",
"SHP": "£", "SLL": "Le", "SOS": "Sh", "SRD": "$", "STD": "Db",
"SYP": "£, ل.س", "SZL": "L",
"THB": "฿", "TJS": "ЅМ", "TMT": "m", "TND": "د.ت", "TOP": "T$", "TRY": "₤",
"TTD": "$", "TWD": "$", "TZS": "Sh",
"UAH": "₴", "UGX": "Sh", "USD": "$", "UYU": "$", "UZS": "лв",
"VEF": "Bs", "VND": "₫", "VUV": "Vt",
"WST": "T",
"XAF": "Fr", "XCD": "$", "XOF": "Fr", "XPF": "Fr",
"YER": "﷼",
"ZAR": "R", "ZMK": "ZK", "ZWL": "$",
}
| [
"[email protected]"
] | |
b35ec13793a4249fefe3e3d0a0d66070e12c7018 | a97950354333339cefea7dee152915bbfd2207f7 | /First steps in Python for the CDI course/lab0CDI_4.py | 9372bf69f07de8728a8030900c03ef38576a63f7 | [] | no_license | Fibernalia/CDI | 92366ab1829e0d20f0e9876f4d64ec8503451998 | 569b91408ebf81813fc1cd507e2556ddd1e3bef4 | refs/heads/master | 2020-12-03T00:07:22.621629 | 2017-07-01T23:31:29 | 2017-07-01T23:31:29 | 95,990,286 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | def decode(bstr,C):
output = ""
aux = ""
for bit in bstr:
if not aux:
letter = [tupla[0] for tupla in C if tupla[1] == bit]
else:
letter = [tupla[0] for tupla in C if tupla[1] == aux+bit]
if not letter:
aux += bit
else:
output += str(letter).strip("'[]'")
aux = ""
return output
def main():
bstr = '10100011011111100010011110101100101011101011100010011110010111001101111010010010110010011110000110101100111010111001111100010001011100110'
C = [('a', '11100'),('d', '11101'),('e', '00'),('f', '111100'),('g', '1011'),
('j', '010'),('m', '111101'),('n', '100'),('p', '111110'),('s', '1010'),
('t', '110'),('u', '011'),('z', '111111')]
decoding = decode(bstr,C)
print(decoding)
main()
'''
setzejutgesdunjutjatmengenfetgedunpenjat
'''
| [
"[email protected]"
] | |
648af8987d7edd9ccf4f4543106244e033244c02 | fe474e37f5900e90e9c98c3cc35b0f2702db848e | /algo/src-go/archive/train.py | 7d4893e38d88ddc166aa3c7d90b94d2acdeed76a | [
"MIT"
] | permissive | RunanWang/somewhere | 81c910ee151ac820bfd3b3f859dfbc1751c28d22 | 0ba93a3f5f91042b4910171dda981f6c863ee581 | refs/heads/master | 2023-01-13T18:03:06.810551 | 2020-11-14T11:10:59 | 2020-11-14T11:10:59 | 220,627,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,216 | py | #!/usr/bin/env python
# coding: utf-8
# In[54]:
import pymongo
import pandas as pd
myclient = pymongo.MongoClient("mongodb://182.92.196.182:27017/")
mydb = myclient["kit"]
rec_col = mydb["records"]
item_col = mydb["items"]
user_col = mydb["users"]
store_col = mydb["stores"]
records = rec_col.find({},{"_id":0, "query":0})
items = item_col.find()
users = user_col.find()
stores = store_col.find()
# for item in stores:
# print(item)
df_rec = pd.DataFrame(list(records))
df_item = pd.DataFrame(list(items))
df_user = pd.DataFrame(list(users))
df_store = pd.DataFrame(list(stores))
# print(df_rec)
# print(df_item)
# print(df_user)
# print(df_store)
# In[55]:
# print(df_rec)
df_item = df_item.rename(columns={'_id':'item_id'})
df_item.set_index(['item_id'],inplace=True)
# print(df_item)
df_user = df_user.rename(columns={'_id':'user_id'})
df_user.set_index(['user_id'],inplace=True)
df_store = df_store.rename(columns={'_id':'store_id','timestamp':'store_timestamp'})
df_store.set_index(['store_id'],inplace=True)
# In[56]:
df_result = df_rec.set_index(['item_id'])
df_result = pd.concat([df_result, df_item], axis = 1, join='inner')
df_result = df_result.reset_index()
# print(df_result)
df_result.set_index(['user_id'],inplace=True)
# print(df_result)
df_result = pd.concat([df_result, df_user], axis = 1, join='inner')
df_result = df_result.reset_index()
# print(df_user)
# print(df_result)
df_result.set_index(['store_id'],inplace=True)
df_result = pd.concat([df_result, df_store], axis = 1, join='inner')
df_result = df_result.reset_index()
# df_result.drop(['item_id'])
df_result.to_csv("./result.csv",encoding='gbk')
# In[57]:
header_id = [
"store_id", "user_id", "item_id"
]
header_cate = [
"item_brand", "item_name", "user_gender", "store_city"
]
header_cont=[
"item_price", "item_salecount", "item_score", "store_level", "item_timestamp", "store_timestamp"
]
header_cont_user=[
"user_age", "user_historysum"
]
header_time=[
"timestamp", "item_timestamp", "user_timestamp", "store_timestamp"
]
header_label=[
"is_trade"
]
df_cont = df_result[header_cont]
df_cont_user = df_result[header_cont_user]
df_id = df_result[header_id]
df_cate = df_result[header_cate]
df_time = df_result[header_time]
df_label = df_result[header_label]
# In[58]:
from sklearn import preprocessing
scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(df_cont)
df_cont = pd.DataFrame(scaled)
print(df_cont)
# In[59]:
df_cate = pd.get_dummies(df_cate)
print(df_cate)
frame = [df_cont,df_cont_user, df_cate]
X = pd.concat(frame, axis=1)
y = df_label
# In[62]:
from keras.models import Model
from keras.layers import Input, Dense, Lambda, multiply
from keras import backend as K
from keras import regularizers
import utils as utils
import h5py
MODEL_PATH = './mlr_model.h5'
def keras_sum_layer_output_shape(input_shape):
# a function calculate the shape(equal to 1 in the sum func)
shape = list(input_shape)
assert len(shape) == 2
shape[-1] = 1
return tuple(shape)
def keras_sum_layer(x):
# a function to take sum of the layers
return K.sum(x, axis=1, keepdims=True)
wide_m = 12
input_wide = Input(shape=(X.shape[1], ))
# 第二层为LR和权重层,采用l2正则化项
wide_divide = Dense(wide_m,
activation='softmax',
bias_regularizer=regularizers.l2(0.01))(input_wide)
wide_fit = Dense(wide_m,
activation='sigmoid',
bias_regularizer=regularizers.l2(0.01))(input_wide)
wide_ele = multiply([wide_divide, wide_fit])
out = Lambda(keras_sum_layer,
output_shape=keras_sum_layer_output_shape)(wide_ele)
model = Model(inputs=input_wide, outputs=out)
model.compile(optimizer='adam',
loss='mean_squared_error',
metrics=['accuracy'])
model.fit(X,
y,
epochs=10,
batch_size=2,
callbacks=[
utils.roc_callback(training_data=[X, y], validation_data=[X, y])
])
model.save(MODEL_PATH)
# model_json = model.to_json()
# with open('model.json', 'w') as file:
# file.write(model_json)
# model.save_weights('model.json.h5')
print("训练完毕")
| [
"[email protected]"
] | |
b2f924ba5a1716e08f73ad21a4493d819683c942 | 8899e36880a4e883e24df38e6f4f875696e5b3ca | /3/snake/snake_Joystick_detail.py | 6e1f7c43e993f4b0449feb0da57d3bbf4f2dbabb | [] | no_license | alexliradesign/Examples | 6dcf9d9bc81f0a6a5cfcc713d31a342693f6a8b4 | d859dce23e30b6d52b0fe97339db177f8dbe93b9 | refs/heads/master | 2023-07-16T18:45:44.579076 | 2021-08-18T13:56:51 | 2021-08-18T13:56:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,693 | py | from pycm import *
import time
import math
_RATIO = 1000
m_anJoystick = []
m_nCnt_Motor = 13
class CVar:
IsTorqOn = False
nScreenWidth = 0
nScreenHeight = 0
nTouch_XY_X0 = 0
nTouch_XY_Y0 = 0
nTouch_Pos0 = 0
nTouch_Pos_X0 = 0
nTouch_Pos_Y0 = 0
nTouch_XY_X1 = 0
nTouch_XY_Y1 = 0
nTouch_Pos1 = 0
nTouch_Pos_X1 = 0
nTouch_Pos_Y1 = 0
nButton_0 = -1
nButton_1 = -1
nBack_Background = 0
btn0 = None
btn1 = None
class CTimer:
nTimer = 0
IsTimer = False
def __init__(self):
self.nTimer = 0
self.IsTimer = 0
def Set(self):
self.IsTimer = True
self.nTimer = millis()
def Get(self):
if self.IsTimer :
return millis() - self.nTimer
return 0
def Destroy(self):
self.IsTimer = False
m_nInterval = 6 # 6
m_anIDs = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26]
m_IsTorqOn = False
def Rad2Deg(rad):
return rad * 180.0 / math.pi
def Deg2Rad(Angle):
return Angle * math.pi / 180.0
def Sin(fAngle):
return math.sin(Deg2Rad(fAngle))
def Cos(fAngle):
return math.cos(Deg2Rad(fAngle))
def CalcS(fSlide_Angle, fTurn_Angle = 0, fTilt_Interval=3,nLine_Limit=6, nLine = 1, nCol = 0):
nLine = nLine-1
fMod = ((nLine + nCol) % nLine_Limit) % fTilt_Interval
fTmp = ((nLine + nCol) % nLine_Limit) // fTilt_Interval
if(fMod<fTilt_Interval-2):
return 0
fDir = pow(-1,((fTilt_Interval-fTmp)+1))
fResult = fDir*fSlide_Angle
if (fTurn_Angle > 0) and (fDir > 0):
fResult -= fTurn_Angle
elif (fTurn_Angle < 0) and (fDir < 0):
fResult -= fTurn_Angle
return fResult
def CalcT(fTilt_Angle, fTilt_Interval=3,nLine_Limit=6, nLine=1,nCol=0):
nLine = nLine-1
fTmp = ((nLine + nCol) % nLine_Limit) % fTilt_Interval
if(fTmp<fTilt_Interval-2):
return 0
return pow(-1,(fTilt_Interval-fTmp+1))*fTilt_Angle
def CalcSnake(nIndex, nTime_ms, fAngle_Turn = 0, IsBackward = False, nCnt_Motor = 13, fAngle_Head_Up = 10, fAngle_Body_Up_Down = 10, fAngle_Body_Left_Right = 70):
#print(nTime_ms, fAngle_Head_Up, fAngle_Body_Up_Down, fAngle_Body_Left_Right)
nLine_First = 1
nLine_Last = m_nInterval
nLine_Limit = m_nInterval
fTilt_Interval = m_nInterval/2
#nT = 2
nLine = 0
i = nIndex
#for i in range(nLine_First, nLine_Last+1):
if IsBackward == False :
nLine = nLine_Last - i + 1
else :
nLine = i
afBody_T=[]
afBody_T.append(fAngle_Head_Up)
for nCol in range(0,nCnt_Motor):
fTmp = CalcT(fAngle_Body_Up_Down,fTilt_Interval,nLine_Limit, nLine, nCnt_Motor-nCol)
#print(fTmp)
afBody_T.append(fTmp)
# Head
afBody_T[0] = afBody_T[0] - afBody_T[1]/2
#Tail
afBody_T[len(afBody_T) - 1] = afBody_T[len(afBody_T) - 1] - afBody_T[len(afBody_T) - 2] / 2
afBody_S=[]
afBody_S.append(0)
for nCol in range(1,nCnt_Motor):
fTmp = CalcS(fAngle_Body_Left_Right,fAngle_Turn,fTilt_Interval,nLine_Limit, nLine, nCnt_Motor-nCol)
#print(fTmp)
afBody_S.append(fTmp)
afBody_S[0] = -afBody_S[2]
afBody = []
afBody.append(nTime_ms)
for nCol in range(0,nCnt_Motor):
afBody.append(afBody_T[nCol])
afBody.append(afBody_S[nCol])
#print(nLine,afBody)
#delay(100)
return afBody
# 각도를 모터에서 사용하는 Data 로 바꾸어 주는 함수(float -> int)
def CalcAngle2Raw(fAngle) :
if fAngle == None :
fAngle = 0.0
return (int)(round(fAngle * 4096.0 / 360.0 + 2048.0))
def CalcRaw2Angle(nRaw) :
if nRaw == None :
nRaw = 0
return (float)(360.0 * ((nRaw - 2048.0) / 4096.0))
def Setup_Speed(nID, nValue) :
DXL(nID).write32(112, nValue) # 112 : profile velocity
def Writes(IDs, nAddress, values, size = 4, fPercent = 1.0):
#profile 속도 조정
Setup_Speed(254, int(values[0] * fPercent))
#syncwrite
etc.write8(1200,0)
etc.write16(1202,nAddress)
etc.write8(1204,size)
nMot_First = 5
nMot_Cnt = 1
nPos = 1
for i in IDs:
nID = i
etc.write8(1205,nID)
etc.write32(1206,CalcAngle2Raw(values[nPos]))
etc.write8(1200,1)
nPos = nPos + 1
etc.write8(1200,2)
def Move(IDs, values, fPercent = 1.0, fPercent_Delay = 1.0) :
if (fPercent_Delay == 0):
fPercent_Delay = fPercent
Writes(IDs, 116, values, 4, fPercent)
Wait((int)(round(values[0] * fPercent_Delay)))
def Wait(nMilliseconds) :
tmr = CTimer()
tmr.Set()
while(tmr.Get() < nMilliseconds) :
# if btnList != None:
# #스마트폰의 터치 입력을 확인
# GetTouch_Down()
# # 버튼 눌림 체크 - 터치의 소비
# nNum0, nNum1, Event_Dn0, Event_Dn1, Event_Up0, Event_Up1, Btn0, Btn1 = GetButton(btnList)
CheckJoystick()
#pass #delay(1)
#tmr = None
def TorqAll(IsOn, IsSound = None):
TorqOnOff(-1, IsOn, IsSound)
def TorqOnOff(nNum, IsOn, IsSound = None):
if (IsOn == True) :
if IsSound == True :
buzzer.melody(14)
if (nNum >= 0) :
DXL(nNum).torque_on()
else :
dxlbus.torque_on()
m_IsTorqOn = True
else :
m_IsTorqOn = False
if IsSound == True :
buzzer.melody(15)
#모션을 바로 종료 시킴
#Motion_Play(-3)
#모션이 종료됨을 기다림
#waitMotionStop()
if (nNum >= 0) :
DXL(nNum).torque_off()
else :
dxlbus.torque_off()
def GetTouch_Down():
XY_X0 = 0
XY_Y0 = 0
Pos_X0 = 0
Pos_Y0 = 0
Pos0 = 0
XY_X1 = 0
XY_Y1 = 0
Pos_X1 = 0
Pos_Y1 = 0
Pos1 = 0
Tmp = smart.read64(10470)
nTouch0 = Tmp[0] & 0xffffffff
nTouch1 = Tmp[1] & 0xffffffff
IsChanged = False
if (nTouch0 > 0) :
XY_X0 = nTouch0 & 0x0000FFFF
XY_Y0 = (nTouch0 >> 16) & 0x0000FFFF
Pos_X0 = (int)((XY_X0 / CVar.nScreenWidth) * 5 + 1)
Pos_Y0 = (int)((XY_Y0 / CVar.nScreenHeight) * 5 + 1)
Pos0 = Pos_X0 + (Pos_Y0 - 1) * 5
XY_X0 = (int)(XY_X0 * _RATIO / CVar.nScreenWidth)
XY_Y0 = (int)(XY_Y0 * _RATIO / CVar.nScreenHeight)
if (nTouch1 > 0) :
XY_X1 = nTouch1 & 0x0000FFFF
XY_Y1 = (nTouch1 >> 16) & 0x0000FFFF
Pos_X1 = (int)((XY_X1 / CVar.nScreenWidth) * 5 + 1)
Pos_Y1 = (int)((XY_Y1 / CVar.nScreenHeight) * 5 + 1)
Pos1 = Pos_X1 + (Pos_Y1 - 1) * 5
XY_X1 = (int)(XY_X1 * _RATIO / CVar.nScreenWidth)
XY_Y1 = (int)(XY_Y1 * _RATIO / CVar.nScreenHeight)
CVar.nTouch_Pos0 = Pos0
CVar.nTouch_Pos_X0 = Pos_X0
CVar.nTouch_Pos_Y0 = Pos_Y0
CVar.nTouch_XY_X0 = XY_X0
CVar.nTouch_XY_Y0 = XY_Y0
CVar.nTouch_Pos1 = Pos1
CVar.nTouch_Pos_X1 = Pos_X1
CVar.nTouch_Pos_Y1 = Pos_Y1
CVar.nTouch_XY_X1 = XY_X1
CVar.nTouch_XY_Y1 = XY_Y1
else :
CVar.nTouch_Pos0 = 0
CVar.nTouch_Pos_X0 = 0
CVar.nTouch_Pos_Y0 = 0
CVar.nTouch_XY_X0 = 0
CVar.nTouch_XY_Y0 = 0
CVar.nTouch_Pos1 = 0
CVar.nTouch_Pos_X1 = 0
CVar.nTouch_Pos_Y1 = 0
CVar.nTouch_XY_X1 = 0
CVar.nTouch_XY_Y1 = 0
def GetResolution():
for i in range(0, 100):
screen = smart.read32(10460)
CVar.nScreenWidth = screen & 0x0000FFFF
CVar.nScreenHeight = (screen & 0xFFFF0000) >> 16
if CVar.nScreenWidth > 0 and CVar.nScreenWidth < 65535 and CVar.nScreenHeight > 0 and CVar.nScreenHeight < 65535:
break
def Show_Background(nValue):
if (nValue != CVar.nBack_Background):
smart.display.back_image(nValue)
CVar.nBack_Background = nValue
def rotate(aMotion, nCnt, nStart = 0, nEnd = 0):
if not aMotion:
return aMotion
nCnt %= len(aMotion)
if not nCnt:
return aMotion
left = aMotion[nStart:-nCnt]
if nEnd <= nStart:
nEnd = len(aMotion)
right = aMotion[-nCnt:nEnd]
return right + left
def CheckJoystick():
global m_anJoystick
n1 = etc.read16(61) # 데이터
#n3 = etc.read8(63) # 도착했는지 확인
nVal1 = n1 & 0xff
nVal2 = n1 >> 8 & 0xff
#print("nVal2={0}".format(nVal2))
if (nVal2 > 0):
if (nVal1 < len(m_anJoystick)):
m_anJoystick[nVal1] = ((nVal2 - 120) / 128)
################################################################
################################################################
################################################################
console(USB)
#console(BLE)
# 토크를 Off 하고 액츄에이터의 설정 및 컨트롤러의 설정을 로봇에 맞게 수정한다.
TorqAll(False)
# controller direction : 0-vertical(Humanoid), 1-Horizontal
eeprom.imu_type(1)
# profile -> velocity-based
#DXL(254).write8(10, 4) # 0 -> velocity-based profile, 4 -> time-based profile
# Secondary ID(255: No Use, 0 ~ 252: ID)
#DXL(254).write8(12, 255) # 255 -> No Use(Clear)
# Operation Mode(1:velocity[wheel], 3:position)
#DXL(254).mode(3) # position
# 토크를 Off 하고 액츄에이터의 설정을 모두 Time-based profile 로 변경한다.
#TorqAll(False)
#DXL(254).write8(10, 4) # 0 -> velocity-based profile, 4 -> time-based profile
# Flysky Controller 를 위한 UART Setting
etc.write8(43,1) # 0 : BLE, 1 : UArt, 2 : USB
etc.write8(88,0) # 0 : False, 1 : 저전압일때 경고음
TorqAll(True)
for i in range(0, 6):
m_anJoystick.append(0)
# for j in range(1, 5):
# for i in range(1, m_nInterval+1):
# #print(CalcSnake(i, 1000))
# Move(m_anIDs, CalcSnake(i, 300, -40), 1.0, 0.9)
# #Move(m_anIDs, [1000, 0, -85, -65, 30])
nMotionIndex = 0
aData = []
aMotion = []
anIDs = []
IsPhone = False
IsJoystick = True
m_nSpeed = 300
m_fTurn = 0
# m_lstMotion = [m_nInterval]
# m_lstMotion.clear()
IsFirst = True
IsChanging = False
# 0: Normal, 1: gradually
m_nControlMode = 1
aMotion.append(m_nSpeed)
for i in range(0, 26):
nRaw = 0
nRaw = DXL(i + 1).present_position()
aMotion.append(CalcRaw2Angle(nRaw))
while(True) :
if (IsPhone == False) and (IsJoystick == False) :
if (smart.is_connected() == True):
smart.wait_connected()
smart.display.screen_orientation(2)
delay(500)
GetResolution()
Show_Background(1)
IsPhone = True
else :
IsMove = False
m_fTurn = 0
if (IsJoystick == False):
GetTouch_Down()
if (CVar.nTouch_Pos_X0 > 0) or (IsJoystick == True):
fTurn_Value = 70
if (IsJoystick == False):
if (CVar.nTouch_Pos_X0 == 2) and (CVar.nTouch_Pos_Y0 == 3): # 전진
IsMove = True
m_fTurn = 0
nMotionIndex = (nMotionIndex + 1) % m_nInterval
elif (CVar.nTouch_Pos_X0 == 1) and (CVar.nTouch_Pos_Y0 == 3): # 전진 좌
IsMove = True
m_fTurn = -fTurn_Value
nMotionIndex = (nMotionIndex + 1) % m_nInterval
elif (CVar.nTouch_Pos_X0 == 3) and (CVar.nTouch_Pos_Y0 == 3): # 전진 우
IsMove = True
m_fTurn = fTurn_Value
nMotionIndex = (nMotionIndex + 1) % m_nInterval
elif (CVar.nTouch_Pos_X0 == 2) and (CVar.nTouch_Pos_Y0 == 5): # 후진
IsMove = True
m_fTurn = 0
nMotionIndex = (nMotionIndex - 1)
elif (CVar.nTouch_Pos_X0 == 1) and (CVar.nTouch_Pos_Y0 == 5): # 후진 좌
IsMove = True
m_fTurn = -fTurn_Value
nMotionIndex = (nMotionIndex - 1)
elif (CVar.nTouch_Pos_X0 == 3) and (CVar.nTouch_Pos_Y0 == 5): # 후진 우
IsMove = True
m_fTurn = fTurn_Value
nMotionIndex = (nMotionIndex - 1)
nKey_F = 1 #2
nKey_W = 0 #3
nKey_F2 = 2
nKey_W2 = 3
nKey_Rot_L = 4
nKey_Rot_R = 5
fAngle_Head_Up = 10
fAngle_Body_Up_Down = 10
fAngle_Body_Left_Right = 70
IsForward = True
n1 = etc.read16(61) # 데이터
#n3 = etc.read8(63) # 도착했는지 확인
nVal1 = n1 & 0xff
nVal2 = n1 >> 8 & 0xff
#print("nVal2={0}".format(nVal2))
if (nVal2 > 0):
if (nVal1 < len(m_anJoystick)):
m_anJoystick[nVal1] = ((nVal2 - 120) / 128)
fW = (m_anJoystick[nKey_W] - 0.5)
if (fW < 0.1) and (fW > -0.1):
fW = 0.0
if (m_anJoystick[nKey_F] > 0.9): #전진
IsMove = True
IsForward = True
nMotionIndex = (nMotionIndex + 1) % m_nInterval
m_fTurn = fTurn_Value * fW
# aMotion.append(aMotion.pop(3))
# aMotion.append(aMotion.pop(3))
elif (m_anJoystick[nKey_F] < 0.1):
IsMove = True
IsForward = False
nMotionIndex = (nMotionIndex - 1)
m_fTurn = fTurn_Value * fW
# aMotion.insert(3, aMotion.pop())
# aMotion.insert(3, aMotion.pop())
if nMotionIndex < 0:
nMotionIndex = m_nInterval - 1
#print(m_anJoystick)
if (IsMove == True):
aData = CalcSnake(nMotionIndex + 1, m_nSpeed + 700 * (1.0 - m_anJoystick[nKey_Rot_L]), m_fTurn, False, m_nCnt_Motor, fAngle_Head_Up + 20 * (m_anJoystick[nKey_W2] - 0.5), fAngle_Body_Up_Down + 20 * m_anJoystick[nKey_Rot_R], fAngle_Body_Left_Right - 40 * (1.0 - m_anJoystick[nKey_F2]))
nCheckSpecialValue = m_anJoystick[0] + m_anJoystick[1] + m_anJoystick[2] + m_anJoystick[3] + m_anJoystick[4] + m_anJoystick[5]
#print(nCheckSpecialValue)
if (nCheckSpecialValue < 0.3) :
if (IsChanging == False):
#print(nCheckSpecialValue)
# 0: Normal, 1: gradually
if (m_nControlMode == 0):
m_nControlMode = 1
buzzer.melody(14)
dxlbus.reboot()
buzzer.melody(1)
delay(1500)
TorqAll(True)
else:
m_nControlMode = 0
buzzer.melody(15)
dxlbus.reboot()
buzzer.melody(1)
delay(1500)
TorqAll(True)
IsChanging = True
else:
IsChanging = False
if (m_nControlMode == 0):
aMotion = aData[:]
else:
if IsFirst == True:
IsFirst = False
aMotion = aData[:]
if (IsForward == True):
aMotion.append(aMotion.pop(3))
aMotion.append(aMotion.pop(3))
else:
aMotion.insert(1, aMotion.pop())
aMotion.insert(1, aMotion.pop())
aMotion[0] = aData[0] # Time
aMotion[1] = aData[1] # Head Up
aMotion[2] = aData[2] # Head Dir
aMotion[3] = aData[3] # T[0]
aMotion[4] = aData[4] # S[0]
# aMotion[25] = 0
# aTmp = aMotion[:]
# aTmp[25] = 0
Move(m_anIDs, aMotion, 1.0, 0.8)
# Move(m_anIDs, aTmp, 1.0, 0.8)
# Move(m_anIDs, aData, 1.0, 0.8) | [
"[email protected]"
] | |
21c757132b5efe4d0f056e1b1c85ed1f16fb2d31 | 55f5e9c2b9c380f96df08fbc84bd22f62a1b5e1e | /test/test_del_project.py | 535c23be5c330c0b09b60882dce4cf5a60aaaf3c | [] | no_license | Pav4l/python_training_mantis | 253796d9e0fd093c6ff126c30ee1527273e02ef2 | 854c6316602d708e38aeba6703678022011b6736 | refs/heads/main | 2023-02-08T11:30:21.814222 | 2020-12-28T21:37:13 | 2020-12-28T21:37:13 | 324,232,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | from model.project import Project
import random
import time
def test_delete_project(app, db):
app.session.login(username="administrator", password="root")
if len(db.get_project_list()) == 0:
app.project.create_new_project(
Project(name="project_new", description="description_of_project", status="development", view_state="public"))
old_project_list = db.get_project_list()
project_delete = random.choice(old_project_list)
app.project.delete_project_by_name(project_delete.name)
new_project_list = db.get_project_list()
app.session.logout()
old_project_list.remove(project_delete)
assert old_project_list == new_project_list
assert sorted(old_project_list, key=Project.id_or_max) == sorted(new_project_list, key=Project.id_or_max) | [
"[email protected]"
] | |
022a339815b4954a268433d73ec778c395697598 | 91d550f23c8716eb06171f41f2fd54d3e2d17a04 | /TODO/TODO/urls.py | 75325eecc7589786bd31fb38b63530acaaef9d7b | [] | no_license | johannesWestenhoeferGit/scalors-assignment-backend-todo | b5c5470b6d9112452f1a82732ee946f1108e1df9 | 362d974430f85e52086d79fc4220290c751a1c08 | refs/heads/master | 2020-04-07T10:51:13.982057 | 2018-11-19T23:24:20 | 2018-11-19T23:24:20 | 158,300,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,124 | py | """TODO URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from rest_framework import routers
from TODOapp import views
router = routers.DefaultRouter()
router.register( r'boards', views.BoardViewSet )
router.register( r'todos', views.TodoViewSet )
urlpatterns = [
path( 'admin/', admin.site.urls ),
path( '', include('TODOapp.urls') ), # handle other access in our urls.py in TODOapp
path( 'api-auth/', include('rest_framework.urls', namespace = 'rest_framework') ),
]
| [
"[email protected]"
] | |
8b96e9d557d751f2c9526a8cb1b5538429eb1fd4 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_320/ch27_2019_08_25_20_47_12_525740.py | c391f466c3525e388a6b3bf0875801b34bc00de8 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | pigas = int(input('Quantos cigarros você fuma por dia: '))
anos = int(input('A quantos anos você fuma: ')) * 365
roubo = pigas * anos
dia = 24 * 60
roubo = roubo * (10/dia)
print(round(roubo)) | [
"[email protected]"
] |
Subsets and Splits