blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
09aa0e6424b150ea73e45f68fb4e799e0261c80c | f7ff5c9cb8693e8a06841eaddff8c1d3ab898ad9 | /testing.py | 75222f05e7ec4a5c684f4f21b5e0f08a58e94fac | [] | no_license | cro1100/sqlalchemy-challenge | c067fadc91f8f6a781e1cb49e906913934c70256 | 860c1ac4870c6d7f7ed37ac4b04e16413f8c66ec | refs/heads/main | 2023-01-28T14:34:59.092250 | 2020-12-02T22:57:28 | 2020-12-02T22:57:28 | 310,405,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,827 | py | #!/usr/bin/env python
# coding: utf-8
# get_ipython().run_line_magic('matplotlib', 'inline')
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
# # Reflect Tables into SQLAlchemy ORM
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
connection = engine.connect()
# create a base
Base = automap_base()
# reflect an existing database into a new model
Base.prepare(engine, reflect=True)
# reflect the tables
Base.classes.keys()
Measurement = Base.classes.measurement
Station = Base.classes.station
# # Include inspector
inspector = inspect(engine)
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Flask Routes
#################################################
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/<start> <br/>"
f"/api/v1.0/<start>/<end><br/>"
)
# # Exploratory Climate Analysis
@app.route("/api/v1.0/precipitation")
def precipitation():
session = Session(engine)
# Design a query to retrieve the last 12 months of precipitation data and plot the results
title_df = pd.read_sql("SELECT prcp, max(date) as oldest_date FROM measurement", connection)
return jsonify(title_df)
if __name__ == '__main__':
app.run(debug=True) | [
"[email protected]"
] | |
415f38765388035c4444c139c8ef46f53a74dc5d | 2f545a9c55611d35b061c999f924b4bdb6fb8f20 | /Analysing_twitter/Downloading_tweets.py | 3652083f3c9cb32c141098d462bb18e726bb393e | [] | no_license | ConstanceSL/vigilant-octo-memory | 2049ab79be0e1616bd1d206bc547610d8b330c26 | c6bc431eeb81e5f7dd7573ceedfbe041476c1769 | refs/heads/master | 2020-03-17T19:00:39.023972 | 2018-09-04T09:48:54 | 2018-09-04T09:48:54 | 133,842,840 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,842 | py | import tweepy
import pandas as pd
import datetime
import os
import numpy as np
# Importing the keys for the twitter API
keys = pd.read_csv('/Users/Constance/keys/twitter_keys.csv', index_col=0)
consumer_key = keys.loc['consumer_key'][0]
consumer_secret = keys.loc['consumer_secret'][0]
access_token = keys.loc['access_token'][0]
access_token_secret = keys.loc['access_token_secret'][0]
# Defining the main functions
def download_tweets(query, limit):
"""
Downloads the tweets in the query. Because of the limit on the number of requests,
can take a while. Doesn't include retweets, and has a random old date for the time limit
(will get only 7 days of data anyway)
:param query: hashtag to search
:param limit: max number of tweets to download (it tends to crash above 100'000 tweets)
:return: list of tweets (json format)
"""
tweets = []
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True)
n = 0
m = 0
print('-' * 100 + '\nStarting download for ' + query)
for tweet in tweepy.Cursor(api.search, q=query + ' -filter:retweets', count=100,
lang="en",
since="2017-06-28",
include_rts=False,
tweet_mode="extended").items():
tweets.append(tweet._json)
n += 1
if n == 500:
print('Tweets {}-{} downloaded'.format(m + n - 499, m + n))
n = 0
m += 500
if m + n == limit:
break
print('Downloaded {} tweets'.format(m + n))
return tweets
def format_tweets(tweets, query, orientation):
"""
Formats the downloaded tweets into lists (to be converted to a dataframe), keeping only the relevant info.
Not all fields exist in the json, so uses a lot of try/except to catch this and puts 'None' for the exceptions
:param tweets: list of tweets (json format)
:return: list of tweets (list format) with only the relevant fields
"""
formatted_tweets = []
for tweet in tweets:
formatted_tweet = []
formatted_tweet.extend([tweet['created_at'], tweet['id'], tweet['full_text'],
tweet['truncated'], tweet['display_text_range']])
try:
formatted_tweet.append([tag['text'] for tag in tweet['entities']['hashtags']])
except Exception:
formatted_tweet.append(np.nan)
try:
formatted_tweet.extend([[mention['screen_name'] for mention in tweet['entities']['user_mentions']],
[mention['name'] for mention in tweet['entities']['user_mentions']]])
except Exception:
formatted_tweet.extend([np.nan, np.nan])
try:
formatted_tweet.append([link['url'] for link in tweet['entities']['urls']])
except Exception:
formatted_tweet.append(np.nan)
try:
formatted_tweet.extend([[media['media_url'] for media in tweet['entities']['media']],
[media['type'] for media in tweet['entities']['media']]])
except Exception:
formatted_tweet.extend([np.nan, np.nan])
try:
formatted_tweet.extend([[media['media_url'] for media in tweet['extended_entities']['media']],
[media['type'] for media in tweet['extended_entities']['media']]])
except Exception:
formatted_tweet.extend([np.nan, np.nan])
formatted_tweet.extend([tweet['in_reply_to_status_id'], tweet['in_reply_to_user_id'],
tweet['in_reply_to_screen_name'], tweet['user']['id'],
tweet['user']['name'], tweet['user']['screen_name'],
tweet['user']['location'], tweet['user']['description'],
tweet['user']['url'], tweet['user']['followers_count'],
tweet['user']['friends_count'], tweet['user']['listed_count'],
tweet['user']['created_at'], tweet['user']['favourites_count'],
tweet['user']['verified'], tweet['user']['statuses_count'], tweet['user']['lang'],
tweet['user']['contributors_enabled'], tweet['contributors'],
tweet['is_quote_status'], tweet['retweet_count'], tweet['favorite_count'],
tweet['favorited'], tweet['retweeted'], tweet['lang']])
try:
formatted_tweet.append(tweet['possibly_sensitive'])
except Exception:
formatted_tweet.append(np.nan)
formatted_tweet.append(query)
formatted_tweet.append(orientation)
formatted_tweet.append(datetime.datetime.now().strftime("%d-%m-%Y"))
formatted_tweets.append(formatted_tweet)
print('{} tweets formatted'.format(len(formatted_tweets)))
return formatted_tweets
def tweets_to_df(formatted_tweets):
"""
Turns the list of formatted tweets to a dataframe
:param formatted_tweets: list of tweets (list format) with only the relevant fields
:return: dataframe of formatted tweets
"""
col = ['created_at', 'id_number', 'text', 'truncated', 'text_range', 'hashtags', 'mentions_screen_names',
'mentions_names', 'links', 'media_urls', 'media_types', 'expanded_media_urls', 'expanded_media_types',
'reply_to_status_id', 'reply_to_user_id', 'reply_to_screen_name', 'user_id', 'user_name', 'user_screen_name',
'user_location', 'user_description', 'user_url', 'user_followers_count', 'user_friends_count',
'user_listed_count', 'user_created_at', 'user_favourites_count', 'user_verified', 'user_statuses_count',
'user_lang', 'user_contributors_enabled', 'contributors', 'is_quote_status', 'retweet_count',
'favorites_count', 'favorited', 'retweeted', 'lang', 'possibly_sensitive', 'query', 'orientation', 'date']
tweets_df = pd.DataFrame(formatted_tweets, columns=col)
return tweets_df
def save_df(tweets_df, path):
"""
Saves the dataframe to csv
:param tweets_df: Dataframe of formatted tweets
:param path: where to save the tweets
:return: None
"""
tweets_df.dropna(subset=['text', 'id_number'])
tweets_df.to_csv(path)
print('Dataframe saved to file')
def download_and_save_tweets():
"""
General function that downloads, formats, and saves the tweets
:param query: hashtag to query
:param limit: max number of tweets to download
:param path: where to save the csv
:return: formatted data frame of tweets
"""
path = input('Please enter the path of the project directory: ')
date = datetime.datetime.now().strftime("%d-%m-%Y")
queries = [x for x in input('Hashtags to query (separated by " ,"): ').split(', ')]
orientations = [x for x in input('Orientation of the hashtags (pro/against, separated by " ,"): ').split(', ')]
limit = input('Maximum number of tweets to download: ')
# Moving to the right directory and checking if a folder for raw data exists (creates it if not)
os.chdir(path)
if not os.path.exists('./raw_data'):
os.makedirs('./raw_data')
os.chdir('./raw_data')
# Downloading and saving the data
for query, orientation in zip(queries, orientations):
tweets = download_tweets(query, limit)
formatted_tweets = format_tweets(tweets, query, orientation)
tweets_df = tweets_to_df(formatted_tweets)
save_df(tweets_df, query + '_' + date + '.csv')
print('All hashtags downloaded')
return None
if __name__ == "__main__":
download_and_save_tweets()
| [
"[email protected]"
] | |
265ebd4b01c92572c56ef4f7bfd68a340da1ad7b | 1f67e01617c2893fd0f33b057a1a80b70976f489 | /chapter2/get_cntour.py | fa8800cdec1c1c1c88319931799c4dca08e0754c | [] | no_license | yxl1761274146/studyssh | 0f60d82faa914882329c2b162c100aa43002f58f | cc8c66719a251062939174ac831b622d48617443 | refs/heads/master | 2022-04-28T10:01:00.511832 | 2020-04-24T08:13:15 | 2020-04-24T08:13:15 | 257,182,160 | 0 | 2 | null | 2020-04-20T08:11:40 | 2020-04-20T05:38:40 | HTML | UTF-8 | Python | false | false | 91 | py | import requests
url="http://www.centor.cn"
response=requests.get(url)
print(response.text) | [
"[email protected]"
] | |
77f896b78bdca51ae020b151b70c96935c627d4c | 9ebc24d199d0031ba290001bdbfdebc969ecd8a9 | /api.py | 4295ff9603156e7ecd21e228f7608265bf75e704 | [] | no_license | SorakaMafaka/Riot_API | 0074b549f9d0ed917e35b29040aa4958e65fee13 | 9fdec116f28ea260b35c4eb8f4d1c286d1f99662 | refs/heads/master | 2023-03-06T00:26:58.423914 | 2021-02-24T16:10:05 | 2021-02-24T16:10:05 | 341,957,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,485 | py | # SοrakaMafaka
# API-Key: API
from riotwatcher import LolWatcher, ApiError
import pandas as pd
import cv2
# Global vars
api_key = 'API KEY'
watcher = LolWatcher(api_key)
my_region = 'eun1'
region = 'eune'
# Check league's latest version
latest = watcher.data_dragon.versions_for_region(region)['n']['champion']
# Champions static information
static_champ_list = watcher.data_dragon.champions(latest, False, 'en_US')
# General stats
me = watcher.summoner.by_name(my_region, 'SοrakaMafaka')
# print(me)
# Rank stats
#my_ranked_stats = watcher.league.by_summoner(my_region, me['id'])
# print(my_ranked_stats)
my_matches = watcher.match.matchlist_by_account(my_region, me['accountId'])
# Fetch details of last match
last_match = my_matches['matches'][0]
match_detail = watcher.match.by_id(my_region, last_match['gameId'])
participants = []
for row in match_detail['participants']:
participants_row = {}
participants_row['champion'] = row['championId']
#participants_row['spell1'] = row['spell1Id']
#participants_row['spell2'] = row['spell2Id']
participants_row['win'] = row['stats']['win']
participants_row['kills'] = row['stats']['kills']
participants_row['deaths'] = row['stats']['deaths']
participants_row['assists'] = row['stats']['assists']
#participants_row['totalDamageDealt'] = row['stats']['totalDamageDealt']
#participants_row['goldEarned'] = row['stats']['goldEarned']
#participants_row['champLevel'] = row['stats']['champLevel']
#participants_row['totalMinionsKilled'] = row['stats']['totalMinionsKilled']
#participants_row['item0'] = row['stats']['item0']
#participants_row['item1'] = row['stats']['item1']
participants.append(participants_row)
participantIdentities = []
for row in match_detail['participantIdentities']:
participantIdentities_row = {}
participantIdentities_row['summonerName'] = row['player']['summonerName']
participantIdentities.append(participantIdentities_row)
# Champion static list data to dictionary for looking up
champ_dict = {}
for key in static_champ_list['data']:
row = static_champ_list['data'][key]
champ_dict[row['key']] = row['id']
for row in participants:
row['championName'] = champ_dict[str(row['champion'])]
# print(match_detail.keys())
df = pd.DataFrame(participants)
df1 = pd.DataFrame(participantIdentities)
frames = [df, df1]
result = pd.concat(frames, axis=1, join="inner")
print(result.to_string())
# End of fetch deails of last match
| [
"[email protected]"
] | |
faf3d095d3cc4af042fe18427706194245f0b483 | 83e45f7177a29cfc39a4eff83f9b83aebdf78048 | /test/frequency/test_calculate.py | e931305a22795da8a9ad5b207bb9630ce36d3a18 | [
"Apache-2.0"
] | permissive | UCL/twitter-user-fetch | f9c8ca641c5dfd7d37a8a6286773247d02f8443c | 1677e4468ebc083046c641568b9770cb650d18a9 | refs/heads/master | 2021-07-14T18:43:56.383991 | 2017-10-20T08:28:52 | 2017-10-20T08:28:52 | 107,196,137 | 0 | 0 | null | 2017-10-20T08:28:53 | 2017-10-17T00:05:49 | Python | UTF-8 | Python | false | false | 1,643 | py | from app.frequency import calculate
import unittest
class TestCalculate(unittest.TestCase):
dl = [
['Tue Sep 19 11:53:24 +0000 2017', '#alltestsmatter1'],
['Thu Sep 21 11:53:24 +0000 2017', '#alltestsmatter2'],
['Thu Oct 19 11:53:24 +0000 2017', '#alltestsmatter1'],
['Thu Oct 19 11:53:24 +0000 2017', '#alltestsmatter1'],
['Thu Oct 19 11:53:24 +0000 2017', '#alltestsmatter2'],
['Thu Oct 19 11:53:24 +0000 2017', '#alltestsmatter2'],
['Thu Oct 19 11:53:24 +0000 2017', '#alltestsmatter2'],
]
def test_get_monthly_frequency(self):
expected = [
[2017, 9, '#alltestsmatter1', 1],
[2017, 9, '#alltestsmatter2', 1],
[2017, 10, '#alltestsmatter1', 2],
[2017, 10, '#alltestsmatter2', 3]
]
calc_df = calculate.Calculate(self.dl)
self.assertListEqual(calc_df.get_monthly_frequency().values.tolist(), expected, "Must calculate frequency")
def test_get_average_monthly_frequency(self):
expected = [
['#alltestsmatter1', 1.5],
['#alltestsmatter2', 2.0]
]
calc_df = calculate.Calculate(self.dl)
self.assertListEqual(calc_df.get_average_monthly_frequency().values.tolist(), expected, "Must calculate avg")
def test_get_average_monthly_frequency_descending(self):
expected = [
['#alltestsmatter2', 2.0],
['#alltestsmatter1', 1.5]
]
calc_df = calculate.Calculate(self.dl)
self.assertListEqual(calc_df.get_average_monthly_frequency_descending().values.tolist(), expected, "Must sort")
| [
"[email protected]"
] | |
6bfb52aa9f932793c3fb1ce5394829309aab7d4e | 169d809f45dedcaa3c7b1b49912d8b025abe18d9 | /challenge251_easy_re.py | 516f39b9767f732d8d695bcffd712a849d351e4d | [] | no_license | bermec/challenges | 8a82d1d38d1ed1a0fc3f258443bc0054efc977a6 | 9fb092f20f12b4eaa808e758f00f482a49346c88 | refs/heads/master | 2021-10-08T05:05:56.803332 | 2018-12-08T00:20:20 | 2018-12-08T00:20:20 | 109,448,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,324 | py | '''
Nonograms, also known as Hanjie, Picross or Griddlers, are picture logic puzzles in
which cells in a grid must be colored or left blank according to numbers at the
side of the grid to reveal a hidden picture. In this puzzle type, the numbers are a
form of discrete tomography that measures how many unbroken lines of filled-in
squares there are in any given row or column.
In a Nonogram you are given the number of elements in the rows and columns. A row/column
where containing no element has a '0' all other rows/columns will have at least one number.
Each number in a row/column represent sets of elements next to each other.
If a row/column have multiple sets, the declaration of that row/column will have multiple
numbers. These sets will always be at least 1 cell apart.
An example
2 1 1
1 1 1 2 1
2 * *
1 2 * * *
0
2 1 * * *
2 * *
Formal Inputs & Outputs
Input description
Today we will work with ASCII "art". The different character will serve as colors.
If you want you can ofcourse color them in the output.
*
/|
/ |
/ |
*---*
Output description
Output changes a bit, you will show the set of the same characters.
Note 2 sets of different characters don't have to be seperated by an empty cell
Columns:
(*,1)
(/,1) (/,1) (/,1) (|,3)
(*,1) (-,2) (-,1) (-,1) (*,1)
Rows:
(*,1)
(/,1) (|,1)
(/,1) (|,1)
(/,1) (|,1)
(*,1) (-,3) (*,1)
Ins
1
*
/|
/ |
/ |
*---*
2
/\ #
/**\#
/****\
/******\
/--------\
| |
| || # |
| || # |
| || |
*------*
Bonus 1
Place the columns and rows in a grid like you would give to a puzzler
(*,1)
(/,1) (/,1) (/,1) (|,3)
(*,1) (-,2) (-,1) (-,1) (*,1)
(*,1)
(/,1) (|,1)
(/,1) (|,1)
(/,1) (|,1)
(*,1) (-,3) (*,1)
Bonus 2
Now solve a ASCII puzzle. This should be a little bit
'''
pattern = ''' (*,1)
(/,1) (|,1)
(/,1) (|,1)
(/,1) (|,1)
(*,1) (-,3) (*,1)'''
pattern = pattern.splitlines()
print(pattern)
import re
for x in range(0, 5):
ans = re.findall('\s\s[\(\S\S\d\)]+', pattern[x])
ans = ans[0]
ans = ans.split(' ')
for item in ans:
print(ans) | [
"[email protected]"
] | |
8b7072d1ba5a4ede933dd01a2384c3eaf6b82838 | cbc41d6251f6cd9ddf802046d73f67f01baabf48 | /tp0/copia_jimmy.py | 2f958d68e8ccc14c85e1f0703bf8fcc671bffbe7 | [] | no_license | jimsrc/bio16 | cf004bd9413372214a567ab3abb6b82d52e2492c | a3f5452c9b133fbc3f7e2ba32d979a8dd2b01931 | refs/heads/master | 2020-08-04T20:14:34.420227 | 2016-08-25T20:59:46 | 2016-08-25T20:59:46 | 66,593,826 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | import numpy as np
import codecs
import igraph
import pandas as pd
u = open(1, 'w', encoding='utf-8', closefd=False)
data = pd.read_csv("lista_curso.csv",encoding='utf-8')
data = data.replace(np.nan,0)
names_data = data.drop(data.columns[range(14)] ,axis=1)
print(names_data.columns,file=u)
print(names_data,file=u)
node_names = data['Nombre'].values
print(node_names,file=u)
values = names_data.values
g = igraph.Graph.Adjacency(values.tolist())
g.es['weight'] = values[values.nonzero()]
g.vs['labels'] = node_names
print(g.degree(),file=u)
igraph.plot(g, vertex_label=g.vs['labels'])
| [
"[email protected]"
] | |
219150b5a382ba176958a98fcb77fe773104e5fa | 53a262d3db0d5540991bd042e883b78fe8c87a05 | /gis_rest_project/urls.py | d5a9c7ea79431819515bf284e8f0b21e19d6dc0c | [] | no_license | davy254/gis_rest_project | 42e40f79439b6b966562fb8461ba00f136d53d17 | 049cd40975a09a92cbef1ab031136f0f5d1dca8f | refs/heads/master | 2022-12-14T17:31:56.083232 | 2020-09-10T12:08:54 | 2020-09-10T12:08:54 | 295,836,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | """gis_rest_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('nairobi_hospitals_api.urls')),
]
| [
"[email protected]"
] | |
d34431680d74d350cfa028cb5ffe34f5d8fada99 | dfce9280c3299bacae9563a3837519d1d23c2a5a | /chap13_files/item05_see_matrix.py | e76d6c77ac189e620ae263a097098bc561616d27 | [] | no_license | DemianLuna/python-01-basics-2020-course | bda5b8efc7da54b1af7b27c549e7b6e7c70eba33 | e499eb8e3cfc644dea175baaada7c923cc18739c | refs/heads/master | 2022-12-22T19:16:05.809925 | 2020-09-26T18:34:04 | 2020-09-26T18:34:04 | 294,977,293 | 0 | 0 | null | 2020-09-12T16:01:14 | 2020-09-12T16:01:13 | null | UTF-8 | Python | false | false | 690 | py | """
Before start, go to the following direcction
- [First 10 years of Google stock](https://goo.gl/3zaUlD)
Download de csv file and renamed to google_stock_data.csv
After that copy the header and the first 5 records and create the file google_stock_data_mini.csv
"""
from colorama import Fore, Style, init
import platform
my_system = platform.system()
## this is a ternary if
is_windows = True if my_system == 'Windows' else False
# Use convert True for windows systems
init( convert=is_windows )
csv_path = 'google_stock_data.csv'
csv_file = open( csv_path )
size = 0
for line in csv_file:
print( f"{Fore.GREEN} {line}" )
size+=1
print( f"\nTotal lines are { size }" ) | [
"[email protected]"
] | |
94bed5381ed0ed656b517ee6a6c890226eb4ca73 | 4537fb9c826c9cf67f1774300d5bf235f6992360 | /Solutions/0042/Q_0042.py | 344a43e6d0877cc20828777b687b119ad252f4d7 | [] | no_license | Conzpite/ProjectEuler | 1270300bc3f9dfe321fa7ade8342124a49d3b475 | c21f401effc26dd74ad23913f5e85355c6810ba9 | refs/heads/master | 2022-11-23T16:52:54.507136 | 2020-07-19T13:44:34 | 2020-07-19T13:44:34 | 277,668,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | import math
#Return highest value for comparison
def AddTriangleNumber(numSet, n):
value = n * (n+1) // 2
numSet.add(value)
return value
#Set up triangle numbers by preloading some numbers
currN = 1
largestValue = 0
numSet = set()
for i in range(20):
largestValue = AddTriangleNumber(numSet, currN)
currN += 1
words = []
with open("p042_words.txt", "r") as f:
words = f.readline().split(",")
triangleWordsNum = 0
for word in words:
wordValue = 0
wordshort = word.strip("\"")
for char in wordshort:
wordValue += (ord(char)- ord('A') + 1)
#Add more to triangle numbers to contain possible values
while wordValue > largestValue:
largestValue = AddTriangleNumber(numSet, currN)
currN += 1
if wordValue in numSet:
triangleWordsNum += 1
print(triangleWordsNum)
| [
"[email protected]"
] | |
4db72792a3d7ae6431c64ea43c349511e7c74b70 | 3ac98e9354f761927732d75f096a27334a7f2d6a | /superbench/benchmarks/return_code.py | 74739d2b761aa05e71ea86652089e4f27b9e0824 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | santakd/superbenchmark | e8a3ac900853e2fb75b76efa3088fc7bf877032a | 2299d238dd52e8da1142f59673b4e8c35134b123 | refs/heads/main | 2023-04-09T16:54:14.552150 | 2021-04-21T03:58:55 | 2021-04-21T03:58:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""A module for statuses of benchmarks."""
from superbench.benchmarks.context import Enum
class ReturnCode(Enum):
"""The Enum class representing benchmark status."""
# Common return codes.
SUCCESS = 0
INVALID_ARGUMENT = 1
INVALID_BENCHMARK_TYPE = 2
INVALID_BENCHMARK_RESULT = 3
# Return codes related with model benchmarks.
NO_SUPPORTED_PRECISION = 10
MODEL_TRAIN_FAILURE = 11
MODEL_INFERENCE_FAILURE = 12
DISTRIBUTED_SETTING_INIT_FAILURE = 13
DISTRIBUTED_SETTING_DESTROY_FAILURE = 14
DATASET_GENERATION_FAILURE = 15
DATALOADER_INIT_FAILURE = 16
OPTIMIZER_CREATION_FAILURE = 17
MODEL_CREATION_FAILURE = 18
| [
"[email protected]"
] | |
905cdae5fb8ee9fa03abad70fc7fb8b91b627ae6 | 720a8ffc5807d3d02a4c9f2d3c18f79fa7f66c45 | /baseTest/reptile/pic_Test.py | 04ea47a70048406c55e9c10ad658bc076de2fc88 | [] | no_license | tbgdwjgit/python3 | 8123f87753b2469256843cf3257d0dc11744712a | ef7f54e492c73cf1e5533857b126fdd4eb1d8ca2 | refs/heads/master | 2021-04-06T00:36:31.333267 | 2019-09-16T08:07:49 | 2019-09-16T08:07:49 | 125,023,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,247 | py | __author__ = 'Test-YLL'
# coding:utf-8
'''
reptile
英[ˈreptaɪl]
美[ˈrɛptɪl, -ˌtaɪl]
n. 爬行动物; 卑鄙的人;
adj.爬虫类的; 卑鄙的;
[例句]Is not a reptile but a mammal.
并不是爬虫动物,而是哺乳动物。
'''
import re
import urllib.request
'''京东手机图 '''
'''
def craw(url,page):
html1=urllib.request.urlopen(url).read()
html1=str(html1)
pat1='<div id="plist".+?<div class="page clearfix">'
result1=re.compile(pat1).findall(html1)
result1=result1[0]
# pat2='<img width="220" height="220" data-img="1"data-lazy-img="//(.+?\.jpg)">'
pat2='<img width="220" height="220" data-img="1" src="//(.+?\.jpg)">'
imagelist=re.compile(pat2).findall(result1)
x=1
for imageurl in imagelist:
imagename='e:/test1221/'+str(page) + str(x) + ".jpg"
imageurl="http://"+imageurl
try:
urllib.request.urlretrieve(imageurl,filename=imagename)
except urllib.error.URLError as e:
if hasattr(e,"code"):
x+=1
if hasattr(e,"reason"):
x+=1
x+=1
for i in range(1,7):
url = "http://list.jd.com/list.html?cat=9987,653,655&page="+str(i)
craw(url,i)
'''
''' test '''
| [
"[email protected]"
] | |
29048e5c0449e12f3b2de6b9c46ec9062e2cf697 | 13a624bd01fa7bac79be91f1ea5a5d4ba006e3d3 | /bioinfo_python/029.py | 867939cbdb72bea33f509d57fcec7c0bf67da5ce | [] | no_license | KennethJHan/bioinfo-lecture-2107 | 3ab1800aef5e5f62a51ef07629fc84d1711c7c23 | 4a08814a6a7ccae4b9d81d9f5ad485399ddd9498 | refs/heads/main | 2023-06-24T18:46:46.026166 | 2021-07-30T11:04:39 | 2021-07-30T11:04:39 | 389,945,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72 | py |
Seq1 = "ATGTTATAG"
res = "C" in Seq1
print(res)
print("A" in Seq1)
| [
"[email protected]"
] | |
8c059f5fb4903b8a5dab179210a0124b5dce14af | 1fbeb08c86243de89fa3c855ae79959f421b8581 | /movie_db.py | 879bbfa9b8e082af6d3e71ec398c2eff51e6fe8e | [] | no_license | SYAN83/movie_review_analysis | e7fb14c804d9645b2109beabae05b12201c7a497 | 3577774bf533a75823ddf7f57278eed5c451f073 | refs/heads/master | 2021-06-06T01:28:50.134251 | 2015-06-26T18:32:12 | 2015-06-26T18:32:12 | 38,014,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,382 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 21 15:39:07 2015
@author: Shu-Macbook
"""
import movie_review as mr
import pandas as pd
import sqlite3
#movie_list = ["Inception","Interstellar 2014","Mad Max: Fury Road",\
# "WALL.E","PK","Eternal Sunshine of the Spotless Mind"]
#movie_list = ["Its Such a Beautiful Day","marvels The Avengers",\
#"Guardians of the Galaxy","Donnie Darko","X-Men: Days of Future Past",\
#"Star Trek"]
movie_list = ["Entourage"]
#movie_list = ["Alone in the Dark","40 Days and Nights","Battlefield Earth",\
#"Super Shark","100 Degrees Below Zero","Airplane vs Volcano","Alien Abduction"]
def scrape():
for movie in movie_list:
mv = mr.MovieReview(movie)
mv.tomatoReview()
def getTableName():
conn = sqlite3.connect(mr.MovieReview.tomatodb)
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table';", \
con=conn)
conn.close()
return table
def getReview(query):
conn = sqlite3.connect(mr.MovieReview.tomatodb)
try:
data = pd.read_sql(query, con=conn)
except sqlite3.DatabaseError as e:
print e.message()
return None
except TypeError:
return None
conn.close()
return data
def main():
# scrape()
print getTableName()
print getReview("select * from movie_title")
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
b7c4e916e1f6d81465c6f830d753a881a0a6b7e7 | ad3e18b7c0045141dbed62e91419f3e126153f41 | /TodoAPI/UserDetails/migrations/0002_auto_20210803_0114.py | 4623c94912659a051917b9408031d23604541624 | [] | no_license | Kakashi-23/TodoAppApi | 5b58d82447e7434890c7d2f44799c9eb254446e4 | 4449325cbb99e5e17864cc3651eeaff94310465e | refs/heads/main | 2023-07-11T00:31:12.107327 | 2021-08-15T20:39:41 | 2021-08-15T20:39:41 | 396,438,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | # Generated by Django 3.2.5 on 2021-08-02 19:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('UserDetails', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='tododetails',
name='title',
field=models.CharField(max_length=225),
),
migrations.AlterField(
model_name='tododetails',
name='todo_data',
field=models.CharField(max_length=225),
),
]
| [
"[email protected]"
] | |
7cfcfacde49b160679ab8f0831c59b01fccd91aa | 1cb793903eac98d5ce8a7502ccf972f954dfc9c5 | /HouseEstimator/admin.py | 88396faf6fd8c0ab2d0d14dbcb1ccf68e99ee798 | [] | no_license | incognitoSE/backend | 8af21868d1df801fec25f9f108bfc18e51274ee9 | 4f7002dc68e6c0cb6c0459b8ab6b007017eedeb8 | refs/heads/master | 2023-06-12T09:24:49.767586 | 2021-07-03T23:27:04 | 2021-07-03T23:27:04 | 360,358,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | from django.contrib import admin
from .models import House
admin.site.register(House)
| [
"[email protected]"
] | |
ad7566b72ea8423788389fb72979e95b2a81b0b3 | 156f5b9dd349522e94bb2b0822b867ba092d93f3 | /hash_generator.py | 174429325e89313461b76aff80bc022527c32201 | [] | no_license | ashfrol/py31_classes | 85e64585502ac776d0712e87d23024856621484c | e8452c3c6c5a49667b8536d033d6fcf813712097 | refs/heads/master | 2022-11-13T01:47:45.305226 | 2020-07-12T18:06:22 | 2020-07-12T18:06:22 | 266,164,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | from hashlib import md5
from os.path import join
def hash_strings_from_file(filepath):
with open(filepath, 'r', encoding='utf-8') as file_read:
for line in file_read:
hashed_line = md5(line.encode()).hexdigest()
yield hashed_line
for item in hash_strings_from_file('output/countrylink.txt'):
print(item)
| [
"[email protected]"
] | |
fe1882007038df7d006b3525c72f65d8fb6f1eac | cfb37a99d374d31dbf626dcab69ff1aff5ed6e44 | /app-engine-twitter/responses.py | d9b50932173491fd0bf53cdcbaef9d684c54bc69 | [] | no_license | nicolavic98/twittershade | 73f8ff919d33e17d12690161465e641c80db8f10 | c17923b6ffd2a63ae65121bf0b2c3a09d19b00d9 | refs/heads/master | 2021-01-01T20:44:57.502032 | 2017-08-04T15:39:45 | 2017-08-04T15:39:45 | 98,925,357 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py |
below10k = ["TP someone's house", "Go to an abandoned warehouse", "Take a shower while wearing all your clothes", "Eat a raw egg", "Take a bite out of a bar of soap", "Give a hug to a random stranger"]
tenKto25K = ["Slap the person next to you", "Inhale Sour Patch Kids sugar", "Order 100 pizzas and send them to your neighbor's house", "Go To Walmart & yell loud on the speaker", "Take a shot of hotsauce"]
above25K = ["Cut someone's hair while they're asleep", "Take a bucket to 7-Eleven, fill it up and leave without paying", "Go to a public place and yell FIREEEEEE!!!!"]
| [
"[email protected]"
] | |
a8c959d8e9f329313bd71de766b856523a873d00 | 28239578bb6bfa6c2d930ceb758a2d5ad6f96dbd | /app/models/transaction_output.py | eda00e41e71edcb20e5c02aecf6da7e7cf94b818 | [] | no_license | YuitoSato/buidle-chain | 43037601632a65acd7754fff69592a2beeb420d6 | 570b8c3414e496ea9da48c4ac7a08d56613c14a5 | refs/heads/master | 2020-04-17T12:15:59.243594 | 2019-01-22T11:37:14 | 2019-01-22T11:37:14 | 166,573,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,845 | py | import hashlib
import uuid
from functools import reduce
from operator import add
from app.models.exceptions.not_enough_balance_exception import NotEnoughBalanceException
from app.models.transaction_input import TransactionInput
from app.utils.constants import COINBASE_ADDRESS
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA256
class TransactionOutput:
def __init__(self, transaction_output_id, amount, locking_script, sender_address, recipient_address):
self.transaction_output_id = transaction_output_id
self.amount = amount
self.locking_script = locking_script
self.sender_address = sender_address
self.recipient_address = recipient_address
@classmethod
def build(cls, amount, sender_address, recipient_address, timestamp):
transaction_output_id = hashlib.sha256((str(amount) + sender_address + recipient_address + str(timestamp)).encode('utf-8')).hexdigest()
return TransactionOutput(
transaction_output_id = transaction_output_id,
amount = amount,
locking_script = TransactionOutput.calc_locking_script(recipient_address, transaction_output_id),
sender_address = sender_address,
recipient_address = recipient_address
)
def to_input(self, unlocking_script):
return TransactionInput(
transaction_input_id = uuid.uuid1().hex,
transaction_output_id = self.transaction_output_id,
unlocking_script = unlocking_script,
amount = self.amount
)
@classmethod
def calc_locking_script(cls, recipient_address, transaction_output_id):
if recipient_address == COINBASE_ADDRESS:
return COINBASE_ADDRESS
else:
public_key_str = '-----BEGIN PUBLIC KEY-----\n'\
+ recipient_address\
+ '\n-----END PUBLIC KEY-----'
public_key = RSA.importKey(public_key_str.encode('utf-8'))
encryptor = PKCS1_OAEP.new(public_key, SHA256)
encrypted = encryptor.encrypt(transaction_output_id.encode('utf-8'))
return encrypted.decode('latin-1')
@classmethod
def calc_total_amount(cls, tx_outputs):
if len(tx_outputs) == 0:
return 0
tx_output_amounts = list(map(lambda tx_o: tx_o.amount, tx_outputs))
return reduce(add, tx_output_amounts)
@classmethod
def fetch_tx_outputs_over_amount(cls, target_amount, tx_outputs):
result_tx_outputs = []
sum_amount = 0
for i in range(len(tx_outputs)):
result_tx_outputs.append(tx_outputs[i])
sum_amount += tx_outputs[i].amount
if sum_amount > target_amount:
return result_tx_outputs
raise NotEnoughBalanceException()
| [
"[email protected]"
] | |
c96eae7c3f8bdf6cadee7eb9455b092a1192d4c4 | 65052e95e1e48dffa143abd8d0e0181658ed5752 | /TeamUPLIFTCA/morenews.py | 26f739aa2caedb8a90d15edc88519c903a78fe5c | [] | no_license | mkm1088/Positive-News-Finder | f3c2708a0fded0d5e0b166a0f51fd4ab7e3aa9dc | c89eadd6774881219578456bd6a056d21709fdcf | refs/heads/master | 2020-04-21T08:01:47.231887 | 2019-02-06T13:22:13 | 2019-02-06T13:22:13 | 169,407,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,376 | py | import requests
#from urllib3.request import urlopen
from bs4 import BeautifulSoup as soup
from selenium import webdriver
from bs4 import BeautifulSoup as BS
import re
def newsFromCNBC():
main_url = "https://newsapi.org/v2/top-headlines?sources=the-hindu&apiKey=***************"
open_cnbc_page = requests.get(main_url).json()
print(len(open_cnbc_page),"is the length of the json return")
article = open_cnbc_page["articles"]
browser = webdriver.Chrome(executable_path='F:\chromedriver_win32\chromedriver.exe')
for ar in article:
browser.get(ar["url"])
print(ar["url"])
#ans=''
html = browser.page_source
soup = BS(html, 'html.parser')
try:
title = soup.find('h2',{'class':'intro'}).get_text()
table1 = soup.find('div',{'class':re.compile('content-body')})
table = table1.find_all('p')
#print(soup.find_all('div',{'class':re.compile('content-body')})[1].find_all('p'))
for k in table:
if k.string is not None:
ans=ans+k.string
#insertIntoDB(ar['publishedAt'],ar["title"], ans, "The Hindu", ar["url"])
print(ans)
except:
print("removing video articles")
#driver
if __name__ == '__main__':
# function call
newsFromCNBC()
| [
"[email protected]"
] | |
07f746e0a6c9a0546c3d7a88aa78518545bbb39e | 81f22c3c76e9136c27915ebb1b05b5cc8e539156 | /hebChatbot/States.py | 086b1a7a59dc934490fed318a3b5e6e4bddc78fc | [] | no_license | RnDteam/CCai | 9a4870d9a0a6d88a579ddadc271779d246af3cea | 8913edf078c9dea732578dbe3ca2d85e9a632d74 | refs/heads/master | 2021-01-11T17:59:28.500829 | 2017-02-20T11:31:11 | 2017-02-20T11:31:11 | 79,892,554 | 1 | 2 | null | 2017-02-14T10:09:10 | 2017-01-24T08:26:37 | HTML | UTF-8 | Python | false | false | 264 | py | from enum import Enum
class States(Enum):
EntityExtraction = 1
IntentRecognition = 2
ExecutingAction = 3
ActionDone = 4
@staticmethod
def is_edge_state(state):
return state == States.EntityExtraction or state == States.ActionDone
| [
"[email protected]"
] | |
5b491aab07e6ef8e121b3f5e427d92718c934b8d | 0e7ac0061f2d6d81a12946476a767d0c63e17155 | /Define_Model/Pooling.py | 85168020c13d5c929bf58ef20bc455fa7ca6b03c | [
"MIT"
] | permissive | forwiat/SpeakerVerifiaction-pytorch | b80afe6cfdc8d7a350f064fc49781de586064d68 | feb8efe9b22a08f1995db02d77d022036d205573 | refs/heads/master | 2022-12-05T01:33:38.132016 | 2020-07-24T17:06:21 | 2020-07-24T17:06:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,291 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@Author: yangwenhao
@Contact: [email protected]
@Software: PyCharm
@File: Pooling.py
@Time: 2020/4/15 10:57 PM
@Overview:
"""
import torch
import torch.nn as nn
class SelfAttentionPooling(nn.Module):
def __init__(self, input_dim, hidden_dim):
super(SelfAttentionPooling, self).__init__()
self.hidden_dim = hidden_dim
self.input_dim = input_dim
self.attention_linear = nn.Linear(input_dim, self.hidden_dim)
self.attention_activation = nn.Sigmoid()
self.attention_vector = nn.Parameter(torch.rand(self.hidden_dim, 1))
self.attention_soft = nn.Tanh()
def forward(self, x):
"""
:param x: [batch, length,feat_dim] vector
:return: [batch, feat_dim] vector
"""
x = x.squeeze()
assert len(x.shape) == 3
fx = self.attention_activation(self.attention_linear(x))
vf = fx.matmul(self.attention_vector)
alpha = self.attention_soft(vf)
alpha_ht = x.mul(alpha)
mean = torch.sum(alpha_ht, dim=-2)
return mean
class AttentionStatisticPooling(nn.Module):
def __init__(self, input_dim, hidden_dim):
super(AttentionStatisticPooling, self).__init__()
self.hidden_dim = hidden_dim
self.input_dim = input_dim
self.attention_linear = nn.Linear(input_dim, self.hidden_dim)
self.attention_activation = nn.Sigmoid()
self.attention_vector = nn.Parameter(torch.rand(self.hidden_dim, 1))
self.attention_soft = nn.Tanh()
def forward(self, x):
"""
:param x: [length,feat_dim] vector
:return: [feat_dim] vector
"""
if x.shape[1] == 1:
x = x.squeeze(1)
assert len(x.shape) == 3
fx = self.attention_activation(self.attention_linear(x))
vf = fx.matmul(self.attention_vector)
alpha = self.attention_soft(vf)
alpha_ht = x.mul(alpha)
mean = torch.sum(alpha_ht, dim=-2, keepdim=True)
sigma_power = torch.sum(torch.pow(x - mean, 2).mul(alpha), dim=-2).add_(1e-12)
# alpha_ht_ht = x*x.mul(alpha)
sigma = torch.sqrt(sigma_power)
mean_sigma = torch.cat((mean.squeeze(1), sigma), 1)
return mean_sigma
| [
"[email protected]"
] | |
f11d8a1d3dde20793f358024f72b265acdbb6630 | 308c7d5dd37ca24096081edea6f73e79ce089da4 | /51-100/84. 柱状图中最大的矩形.py | dae417f40d7fb2e5a16d31f1bbfb002823eabc5d | [] | no_license | fengges/leetcode | 4953ca038b085cdb772054fa1483bf816dccb578 | 5d592440b214024cad342c497b381dbce19d8a70 | refs/heads/master | 2022-09-18T17:52:49.525234 | 2022-09-03T08:45:07 | 2022-09-03T08:45:07 | 132,389,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 765 | py | class Solution:
def largestRectangleArea(self, heights):
res = 0
st=[]
heights.append(0)
i=0
while i<len(heights):
if len(st)==0 or heights[st[-1]] < heights[i]:
st.append(i)
i+=1
else:
cur = st[-1]
del st[-1]
if len(st)==0:
length=i
else:
length=i - st[-1] - 1
res = max(res, heights[cur] * length)
return res
s=Solution()
test=[
{"input": [2,1,5,6,2,3], "output": 10},
]
for t in test:
r=s.largestRectangleArea(t['input'])
if r!=t['output']:
print("error:"+str(t)+" out:"+str(r))
r = s.largestRectangleArea(t['input']) | [
"[email protected]"
] | |
c355d00f358671006e580deed575d744d53a8a2f | b88be390d375277294dc958a29f1f35ec05c7783 | /player/migrations/0003_playersmodel.py | 6728bef9709d50a4c82dbb96a9e54a897ae3dfd6 | [] | no_license | rafaelcavasani/sort_my_team_new | 92ecadc33ae7dde43a6dc672ce05490a63599d6c | fe003a6bf250991d8faf0b533398bc1f2ec0c768 | refs/heads/master | 2021-09-23T03:53:49.862685 | 2020-02-09T22:35:22 | 2020-02-09T22:35:22 | 238,510,867 | 0 | 0 | null | 2021-09-22T18:31:42 | 2020-02-05T17:40:23 | Python | UTF-8 | Python | false | false | 628 | py | # Generated by Django 3.0.3 on 2020-02-05 16:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('player', '0002_teammodel'),
]
operations = [
migrations.CreateModel(
name='PlayersModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('position', models.CharField(max_length=25)),
('importance', models.IntegerField()),
],
),
]
| [
"[email protected]"
] | |
12b9d50b32c934ec5bed1e7f3ddf7a6bed070236 | 0e3ee223395e74cc1c80445d2c3e7fc61b1d5216 | /networks/transformer/encoder.py | c12e2d219910b3a832224056f9cf924badc6f65d | [] | no_license | xduan7/DLTM | 2c757843a7f8d8fca499f44af56fd09f7c9eee1c | 36058ca72f4720005d324fcc400c6a805abf4753 | refs/heads/master | 2020-04-05T09:54:15.493338 | 2019-03-04T23:27:22 | 2019-03-04T23:27:22 | 156,780,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,201 | py | """
File Name: DLTM/encoder.py
Author: Xiaotian Duan (xduan7)
Email: [email protected]
Date: 11/8/18
Python Version: 3.6.6
File Description:
"""
import torch.nn as nn
from networks.transformer.embedding import Embedding
from networks.transformer.encoder_layer import EncoderLayer
from networks.transformer.positional_encoder import PositionalEncoder
class Encoder(nn.Module):
def __init__(self,
dict_size: int,
seq_length: int,
base_feq: float,
emb_scale: float,
emb_dim: int,
num_layers: int,
num_heads: int,
ff_mid_dim: int,
pe_dropout: float = 0.0,
mha_dropout: float = 0.0,
ff_dropout: float = 0.0,
enc_dropout: float = 0.0,
epsilon: float = 1e-6):
super().__init__()
self.__embedding = Embedding(dict_size=dict_size,
emb_dim=emb_dim)
self.__positional_encoder = \
PositionalEncoder(seq_length=seq_length,
emb_dim=emb_dim,
emb_scale=emb_scale,
dropout=pe_dropout,
base_feq=base_feq)
self.__encoder_layers = nn.ModuleList(
[EncoderLayer(emb_dim=emb_dim,
num_heads=num_heads,
ff_mid_dim=ff_mid_dim,
mha_dropout=mha_dropout,
ff_dropout=ff_dropout,
enc_dropout=enc_dropout,
epsilon=epsilon) for _ in range(num_layers)])
self.__output_norm = nn.LayerNorm(normalized_shape=emb_dim,
eps=epsilon)
def forward(self, src_indexed_sentence, src_mask):
h = self.__positional_encoder(self.__embedding(src_indexed_sentence))
for encoder_layer in self.__encoder_layers:
h = encoder_layer(h, src_mask)
return self.__output_norm(h)
| [
"[email protected]"
] | |
72399c862381921816e9e4b50b98d88fc69a7001 | 9a5094444b6b343c184367f5be902fb0663aa340 | /node_modules/gulp-sass/node_modules/node-sass/build/config.gypi | 21beeef653b37154ab0f63b37e0ce1d906a36f50 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | sethwisniewski/agency-x | 5b37bca61f3750c6a45326d9ce0d2f0b271820e6 | 5e2a9ef2f2d26a5fcb1e4567a506dc4c29e086e8 | refs/heads/master | 2020-03-30T18:05:13.605262 | 2018-10-10T11:29:24 | 2018-10-10T11:29:24 | 151,482,775 | 0 | 0 | null | 2018-10-10T11:29:25 | 2018-10-03T21:28:11 | CSS | UTF-8 | Python | false | false | 4,640 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"debug_http2": "false",
"debug_nghttp2": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_file": "icudt60l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt60l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "60",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 57,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "57.dylib",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "7.0",
"nodedir": "/Users/shadiaali/.node-gyp/8.11.3",
"standalone_static_library": 1,
"libsass_ext": "",
"libsass_cflags": "",
"libsass_ldflags": "",
"libsass_library": "",
"save_dev": "",
"viewer": "man",
"browser": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/Users/shadiaali/Desktop/homework/agency/etc/npmignore",
"shell": "/bin/bash",
"maxsockets": "50",
"init_author_url": "",
"shrinkwrap": "true",
"parseable": "",
"init_license": "ISC",
"if_present": "",
"sign_git_tag": "",
"init_author_email": "",
"cache_max": "Infinity",
"long": "",
"local_address": "",
"git_tag_version": "true",
"cert": "",
"registry": "https://registry.npmjs.org/",
"npat": "",
"fetch_retries": "2",
"versions": "",
"message": "%s",
"key": "",
"globalconfig": "/Users/shadiaali/Desktop/homework/agency/etc/npmrc",
"loaded": "",
"always_auth": "",
"spin": "true",
"cache_lock_retries": "10",
"heading": "npm",
"proprietary_attribs": "true",
"fetch_retry_mintimeout": "10000",
"json": "",
"access": "",
"https_proxy": "",
"engine_strict": "",
"description": "true",
"userconfig": "/Users/shadiaali/.npmrc",
"init_module": "/Users/shadiaali/.npm-init.js",
"user": "501",
"node_version": "8.11.3",
"save": "",
"editor": "vi",
"tag": "latest",
"global": "",
"optional": "true",
"force": "",
"bin_links": "true",
"searchopts": "",
"depth": "Infinity",
"searchsort": "name",
"rebuild_bundle": "true",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"tag_version_prefix": "v",
"strict_ssl": "true",
"save_prefix": "^",
"ca": "",
"save_exact": "",
"group": "20",
"fetch_retry_factor": "10",
"dev": "",
"version": "",
"cache_lock_stale": "60000",
"cache_min": "10",
"searchexclude": "",
"cache": "/Users/shadiaali/.npm",
"color": "true",
"save_optional": "",
"ignore_scripts": "",
"user_agent": "npm/2.15.12 node/v8.11.3 darwin x64",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"umask": "0022",
"init_version": "1.0.0",
"scope": "",
"init_author_name": "",
"git": "git",
"unsafe_perm": "true",
"tmp": "/var/folders/nn/0fztryzj28153m6h0wj96vnh0000gn/T",
"onload_script": "",
"link": "",
"prefix": "/Users/shadiaali/Desktop/homework/agency"
}
}
| [
"[email protected]"
] | |
b2b258726dadc8f9172d3f68557313f7379335ff | 8ea8c399175c3e00296365eb31f62e57913354ec | /venv/bin/pip3 | be8913c154dd52e16633ce12f7f00f6c85c88587 | [] | no_license | dfriveros11/Arquisoft | ec81bcb2b8dea9481ad4e53c26993623bff97666 | 9543352519e09bc96182730c2b4f950d3725ab1e | refs/heads/master | 2021-07-16T06:34:06.797412 | 2018-05-03T00:43:47 | 2018-05-03T00:43:47 | 131,207,721 | 0 | 1 | null | 2020-05-12T00:29:37 | 2018-04-26T20:33:45 | Python | UTF-8 | Python | false | false | 400 | #!/home/arcadia/IdeaProjects/Arquisoft/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
] | ||
85c45e3b374f9d2207ceeb63ad4edc812bd52f9c | 6245f2a57c849b50bf3528020a1a44f49b6ce6ba | /scripts/avsb/visualize.py | 51104acb8b2c3ea9fc29d8399745835746c384e2 | [
"MIT"
] | permissive | nanlovescan/frg | 725222302311408326030fa280f2ad2ead5a2215 | e789439f599eb884a6220ae5b471cf610b0c2b2a | refs/heads/master | 2023-07-01T01:28:57.697151 | 2021-07-20T01:45:43 | 2021-07-20T01:45:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | '''显示结果'''
import numpy
from helpers.drawer import draw_heatmap
def main():
'''入口'''
lval = 5.20
rpath = 'heatmap8/avsb'
uval = numpy.load('{0}/{1:.2f}U.npy'.format(rpath, lval))
draw_heatmap(uval[0, 0, 0, 0, :, :, 0])
draw_heatmap(uval[1, 1, 1, 1, :, :, 0])
draw_heatmap(uval[1, 0, 0, 1, :, :, 0])
draw_heatmap(uval[0, 1, 1, 0, :, :, 0])
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
66a65c7bfb388fbad1a797acd3f5aa5a3f32735f | fe764f99cc4f75d70bb42a4bfcc06a26db026c45 | /solve.py | 0f9ce5a3a6179ebe911b58ca811e37c212456ce6 | [
"CC0-1.0"
] | permissive | atolopko/mastermind | a46a0ae59dedaffb095a6a8bd83ef50fe3c89c3c | 388aecac3d7cacaaca81a4004f55e4547277f8c7 | refs/heads/main | 2023-01-22T21:23:37.164087 | 2020-12-07T14:42:53 | 2020-12-07T14:42:53 | 319,185,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,967 | py | from random import randrange
import sys
import numpy as np
# code repr is 4-char string, with each color as digit 0-5
COLORS = 6
CODE_LEN = 4
MAX_CODES = COLORS ** CODE_LEN
ALL_CODES = [list(np.base_repr(code, 6).rjust(CODE_LEN, '0')) for code in range(0, MAX_CODES)]
def next_code_query(admissible_codes) -> list:
"""
As the code breaker, generate the next code query (guess). Do this
by simply guessing one of the remaining admissible_codes!
"""
return admissible_codes[randrange(len(admissible_codes))]
def query_response(query, code) -> (int, int):
correct = 0
included = 0
for c, q in zip(query, code):
if c == q:
correct += 1
code = list(code)
for q in query:
if q in code:
code.remove(q)
included += 1
return correct, included - correct
def is_code_admissible(candidate_code, query, resp_correct, resp_misplaced) -> bool:
# For determining admissibility of a candidate code, the player's
# query becomes the code, and candidate code becomes the query
correct, misplaced = query_response(query=candidate_code, code=query)
return correct == resp_correct and misplaced == resp_misplaced
def prune(query, resp_correct, resp_misplaced, admissible_codes) -> list:
new_admissible_codes = []
for candidate_code in admissible_codes:
if is_code_admissible(candidate_code, query, resp_correct, resp_misplaced):
new_admissible_codes.append(candidate_code)
# print(f'admit {code}')
return new_admissible_codes
def play_round(code, admissible_codes) -> list:
query = next_code_query(admissible_codes)
num_correct, num_misplaced = query_response(query, code)
new_admissible_codes = prune(query, num_correct, num_misplaced, admissible_codes)
# print(f'code={code}, query={query}, correct={num_correct}, misplaced={num_misplaced}, admissible_codes={len(new_admissible_codes)}')
# Ensure progress is made
assert len(new_admissible_codes) < len(admissible_codes)
# print(new_admissible_codes)
return new_admissible_codes
if __name__ == "__main__":
admissible_codes = ALL_CODES
rounds_taken = []
for code in ALL_CODES:
# print('=' * 80)
# print(f'Trying to break code {code}')
round = 1
admissible_codes = ALL_CODES
while len(admissible_codes) > 1:
# print(f'ROUND {round}')
admissible_codes = play_round(code, admissible_codes)
round += 1
# print(f'CODE: {code}')
# print(f'QUERY: {admissible_codes[0]}')
assert len(admissible_codes) == 1
assert admissible_codes[0] == code
# print(f'{code} BROKEN in {round} rounds!')
rounds_taken.append(round)
print(np.histogram(rounds_taken, bins=10, range=(0, 10)))
print(f"Min turns required={min(rounds_taken)}")
print(f"Max turns required={max(rounds_taken)}")
| [
"[email protected]"
] | |
0d4454aa7c7bb30c66477f6a76d147030978da18 | 7a48540ac332c51e2a7d9bbf8e06d20b8300945a | /detection/train.py | 17ff0ca5f443946f4affbe7ed30c9cf5a45423b6 | [] | no_license | Sophia-team/OCR-CARS-PLATES | 6ae29072f64bf9e9547e52a26b39da01d120c392 | 25594a40d1422880f4861583f0772c4d27fcf85b | refs/heads/master | 2022-11-15T13:11:01.599622 | 2020-07-10T10:54:06 | 2020-07-10T10:54:06 | 278,608,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,672 | py | import os
import sys
from argparse import ArgumentParser
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import tqdm
import torchvision
from dataset import DetectionDataset
from unet import UNet
from torch import optim
from torch.utils.data import DataLoader
from transform import *
from torchvision import transforms
import segmentation_models_pytorch as smp
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
sys.path.insert(0, os.path.abspath((os.path.dirname(__file__)) + '/../'))
from utils import get_logger, dice_coeff, dice_loss
def eval_net(net, dataset, device):
net.eval()
tot = 0.
with torch.no_grad():
for i, b in tqdm.tqdm(enumerate(dataset), total=len(dataset)):
imgs, true_masks = b
result = net(imgs.to(device))
masks_pred = result[:, 0].squeeze(1) # (b, 1, h, w) -> (b, h, w)
masks_pred = (F.sigmoid(masks_pred) > 0.5).float()
tot += dice_coeff(masks_pred.cpu(), true_masks).item()
return tot / len(dataset)
def train(net, optimizer, criterion, scheduler, train_dataloader, val_dataloader, logger, args=None, device=None):
num_batches = len(train_dataloader)
best_model_info = {'epoch': -1, 'val_dice': 0., 'train_dice': 0., 'train_loss': 0.}
for epoch in range(args.epochs):
logger.info('Starting epoch {}/{}.'.format(epoch + 1, args.epochs))
net.train()
if scheduler is not None:
scheduler.step(epoch)
epoch_loss = 0.
tqdm_iter = tqdm.tqdm(enumerate(train_dataloader), total=len(train_dataloader))
mean_bce, mean_dice = [], []
for i, batch in tqdm_iter:
imgs, true_masks = batch
result = net(imgs.to(device))
masks_pred = result[:, 0]
masks_probs = F.sigmoid(masks_pred)
bce_val, dice_val = criterion(masks_probs.cpu().view(-1), true_masks.view(-1))
loss = bce_val + dice_val
mean_bce.append(bce_val.item())
mean_dice.append(dice_val.item())
epoch_loss += loss.item()
tqdm_iter.set_description('mean loss: {:.4f}'.format(epoch_loss / (i + 1)))
optimizer.zero_grad()
loss.backward()
optimizer.step()
logger.info('Epoch finished! Loss: {:.5f} ({:.5f} | {:.5f})'.format(epoch_loss / num_batches,
np.mean(mean_bce), np.mean(mean_dice)))
val_dice = eval_net(net, val_dataloader, device=device)
if val_dice > best_model_info['val_dice']:
best_model_info['val_dice'] = val_dice
best_model_info['train_loss'] = epoch_loss / num_batches
best_model_info['epoch'] = epoch
torch.save(net.state_dict(), os.path.join(args.output_dir, f'{args.model}_CP-best_epoch-{epoch}.pth'))
logger.info('Validation Dice Coeff: {:.5f} (best)'.format(val_dice))
else:
logger.info('Validation Dice Coeff: {:.5f} (best {:.5f})'.format(val_dice, best_model_info['val_dice']))
torch.save(net.state_dict(), os.path.join(args.output_dir, f'{args.model}_best_epoch-{epoch}.pth'))
def main():
parser = ArgumentParser()
parser.add_argument('-d', '--data_path', dest='data_path', type=str, default=None ,help='path to the data')
parser.add_argument('-e', '--epochs', dest='epochs', default=20, type=int, help='number of epochs')
parser.add_argument('-b', '--batch_size', dest='batch_size', default=40, type=int, help='batch size')
parser.add_argument('-s', '--image_size', dest='image_size', default=256, type=int, help='input image size')
parser.add_argument('-lr', '--learning_rate', dest='lr', default=0.0001, type=float, help='learning rate')
parser.add_argument('-wd', '--weight_decay', dest='weight_decay', default=5e-4, type=float, help='weight decay')
parser.add_argument('-lrs', '--learning_rate_step', dest='lr_step', default=10, type=int, help='learning rate step')
parser.add_argument('-lrg', '--learning_rate_gamma', dest='lr_gamma', default=0.5, type=float,
help='learning rate gamma')
parser.add_argument('-m', '--model', dest='model', default='fpn',)
parser.add_argument('-w', '--weight_bce', default=0.5, type=float, help='weight BCE loss')
parser.add_argument('-l', '--load', dest='load', default=False, help='load file model')
parser.add_argument('-v', '--val_split', dest='val_split', default=0.7, help='train/val split')
parser.add_argument('-o', '--output_dir', dest='output_dir', default='./output', help='dir to save log and models')
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
logger = get_logger(os.path.join(args.output_dir, 'train.log'))
logger.info('Start training with params:')
for arg, value in sorted(vars(args).items()):
logger.info("Argument %s: %r", arg, value)
# net = UNet() # TODO: to use move novel arch or/and more lightweight blocks (mobilenet) to enlarge the batch_size
# net = smp.FPN('mobilenet_v2', encoder_weights='imagenet', classes=2)
net = smp.FPN('se_resnet50', encoder_weights='imagenet', classes=2)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if args.load:
net.load_state_dict(torch.load(args.load))
logger.info('Model type: {}'.format(net.__class__.__name__))
net.to(device)
optimizer = optim.Adam(net.parameters(), lr=args.lr, weight_decay=args.weight_decay)
criterion = lambda x, y: (args.weight_bce * nn.BCELoss()(x, y), (1. - args.weight_bce) * dice_loss(x, y))
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_step, gamma=args.lr_gamma) \
if args.lr_step > 0 else None
train_transforms = Compose([
Crop(min_size=1 - 1 / 3., min_ratio=1.0, max_ratio=1.0, p=0.5),
Flip(p=0.05),
RandomRotate(),
Pad(max_size=0.6, p=0.25),
Resize(size=(args.image_size, args.image_size), keep_aspect=True),
ScaleToZeroOne(),
])
val_transforms = Compose([
Resize(size=(args.image_size, args.image_size)),
ScaleToZeroOne(),
])
train_dataset = DetectionDataset(args.data_path, os.path.join(args.data_path, 'train_mask.json'),
transforms=train_transforms)
val_dataset = DetectionDataset(args.data_path, None, transforms=val_transforms)
train_size = int(len(train_dataset) * args.val_split)
val_dataset.image_names = train_dataset.image_names[train_size:]
val_dataset.mask_names = train_dataset.mask_names[train_size:]
train_dataset.image_names = train_dataset.image_names[:train_size]
train_dataset.mask_names = train_dataset.mask_names[:train_size]
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=8,
shuffle=True, drop_last=True)
val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=4,
shuffle=False, drop_last=False)
logger.info('Number of batches of train/val=%d/%d', len(train_dataloader), len(val_dataloader))
try:
train(net, optimizer, criterion, scheduler, train_dataloader, val_dataloader, logger=logger, args=args,
device=device)
except KeyboardInterrupt:
torch.save(net.state_dict(), os.path.join(args.output_dir, f'{args.model}_INTERRUPTED.pth'))
logger.info('Saved interrupt')
sys.exit(0)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
478392f69605aa14e27129e23f3c8df664336ea2 | 2d82d4c6574bd6d32f2cf1c781615f7951f55f66 | /muntjac/addon/weelayout/wee_layout_application.py | 9fbb1c28bf11aa6d2409b487f217f9d1eff263f8 | [
"Apache-2.0"
] | permissive | metaperl/muntjac | f83f745ee03942a61af92ee7fba7285aa9c46f3c | 8db97712edd81b4d25deaaa48587d2a08010f2c8 | refs/heads/master | 2021-01-15T22:04:25.057862 | 2012-11-09T03:52:59 | 2012-11-09T03:52:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,183 | py |
from muntjac.api \
import Application, Window, VerticalLayout, HorizontalLayout, \
NativeButton, TextField, Alignment, Label
from muntjac.ui.button \
import IClickListener
from muntjac.addon.weelayout.wee_layout \
import WeeLayout, Direction
class WeelayoutApplication(Application):
def __init__(self):
super(WeelayoutApplication, self).__init__()
self._core = False
self._vertical = False
def init(self):
mainWindow = Window('Weelayout Application')
self.setMainWindow(mainWindow)
# mainWindow.setContent(self.splitRecursive(2))
# mainWindow.setContent(self.undefinedWithRelativeSizes())
# mainWindow.setContent(self.splitView())
# mainWindow.setContent(self.createVertical(2))
# mainWindow.setContent(self.createCoreVertical(2))
# mainWindow.setContent(self.createHorizontal(2))
mainWindow.setContent(self.createCoreHorizontal(2))
self.setTheme('test')
def splitRecursive(self, deep):
l = None
if self._core:
l = VerticalLayout() if self._vertical else HorizontalLayout()
else:
if self._vertical:
l = WeeLayout(Direction.VERTICAL)
else:
l = WeeLayout(Direction.HORIZONTAL)
l.setSizeFull()
if self._core:
c = l
b = NativeButton('One')
b.setSizeFull()
c.addComponent(b)
c.setExpandRatio(b, 1)
if deep > 0:
deep -= 1
c2 = self.splitRecursive(deep)
c.addComponent(c2)
c.setExpandRatio(c2, 9)
else:
wl = l
wl.setClipping(True)
b = NativeButton('Button')
b.setSizeFull()
if self._vertical:
b.setHeight('10%')
else:
b.setWidth('10%')
l.addComponent(b)
if deep > 0:
deep -= 1
w = self.splitRecursive(deep)
if self._vertical:
w.setHeight('90%')
else:
w.setWidth('90%')
l.addComponent(w)
else:
b.setSizeFull()
return l
def undefinedWithRelativeSizes(self):
wl = WeeLayout(Direction.VERTICAL)
wl.setHeight('100%')
wlong = NativeButton('With long caption', LongClickListener())
wl.addComponent(wlong, '100%', '30px', Alignment.TOP_LEFT)
b = NativeButton('Two')
b.addStyleName('test')
wl.addComponent(b, '100%', '100%', Alignment.TOP_LEFT)
wl.setSmartRelativeSizes(True)
return wl
def splitView(self):
wl = WeeLayout(Direction.HORIZONTAL)
wl.setSizeFull()
one = NativeButton('One', OneClickListener())
wl.addComponent(one, '100px', '30px', Alignment.TOP_RIGHT)
wl.addComponent(Label(''), '14px', '14px', Alignment.TOP_CENTER)
wl.addComponent(NativeButton('Two'), '100%', '100%',
Alignment.TOP_CENTER)
# wl.setClipping(true)
return wl
def createVertical(self, recurse):
wl = WeeLayout(Direction.VERTICAL)
wl.setSizeFull()
# wl.setWidth("100%")
# wl.setHeight("50%")
wl.addComponent(TextField('Left'), Alignment.TOP_LEFT)
wl.addComponent(TextField('Center'), Alignment.TOP_CENTER)
tf = TextField('Right')
tf.setWidth('50%')
wl.addComponent(tf, Alignment.TOP_RIGHT)
if recurse > 0:
recurse -= 1
wl.addComponent(self.createHorizontal(recurse))
return wl
def createHorizontal(self, recurse):
wl = WeeLayout(Direction.HORIZONTAL)
wl.setSizeFull()
# wl.setHeight("100%");
wl.addComponent(TextField('Top'), Alignment.TOP_LEFT)
wl.addComponent(TextField('Middle'), Alignment.MIDDLE_LEFT)
tf = TextField('Bottom')
tf.setHeight('50%')
wl.addComponent(tf, Alignment.BOTTOM_LEFT)
if recurse > 0:
recurse -= 1
wl.addComponent(self.createVertical(recurse))
return wl
def createCoreVertical(self, recurse):
"""Same with core layouts"""
l = VerticalLayout()
l.setSizeFull()
tf = TextField('Left')
l.addComponent(tf)
l.setComponentAlignment(tf, Alignment.TOP_LEFT)
tf = TextField('Center')
l.addComponent(tf)
l.setComponentAlignment(tf, Alignment.TOP_CENTER)
tf = TextField('Right')
l.addComponent(tf)
tf.setWidth('50%')
l.setComponentAlignment(tf, Alignment.TOP_RIGHT)
if recurse > 0:
recurse -= 1
createCoreHorizontal = self.createCoreHorizontal(recurse)
l.addComponent(createCoreHorizontal)
l.setExpandRatio(createCoreHorizontal, 1)
return l
def createCoreHorizontal(self, recurse):
l = HorizontalLayout()
l.setSizeFull()
tf = TextField('Top')
l.addComponent(tf)
l.setComponentAlignment(tf, Alignment.TOP_LEFT)
tf = TextField('Middle')
l.addComponent(tf)
l.setComponentAlignment(tf, Alignment.MIDDLE_LEFT)
tf = TextField('Bottom')
l.addComponent(tf)
tf.setWidth('50%')
l.setComponentAlignment(tf, Alignment.BOTTOM_LEFT)
if recurse > 0:
recurse -= 1
createCoreVertical = self.createCoreVertical(recurse)
l.addComponent(createCoreVertical)
l.setExpandRatio(createCoreVertical, 1)
return l
class LongClickListener(IClickListener):
def buttonClick(self, event):
if event.getButton().getCaption() is None:
event.getButton().setCaption('Long caption')
else:
event.getButton().setCaption(None)
class OneClickListener(IClickListener):
def buttonClick(self, event):
event.getButton().setWidth('300px')
if __name__ == '__main__':
from muntjac.main import muntjac
muntjac(WeelayoutApplication, nogui=True, forever=True, debug=True)
| [
"[email protected]"
] | |
0e97feb704d3f04968e8895281546cf9d069744c | 275fc5b32727ba2ae9d4390d3222cb7544544203 | /evaluate/model_mlp.py | ea682ad27844995fa80177780b14cc9be44bb26a | [] | no_license | kpzhang/vec2link | 3ec1475eebd6df0b030864ecf0a914a14dac735c | fdda251c92871409a336d0c10f2c75e19f461673 | refs/heads/master | 2020-04-15T19:07:46.013229 | 2020-01-29T17:56:24 | 2020-01-29T17:56:24 | 164,939,188 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,293 | py | # coding: utf-8
import tensorflow as tf
import numpy as np
from sklearn.metrics import roc_auc_score
# run four_methods.py first
class Mlp:
def __init__(self, sess, inputs_data, outputs_data, test_inputs, test_labels, output_dim, lr=0.0001,
batch_size=128, run_time=100000, learning_rate_decay_factor=0.98, output_interval=200):
self._sess = sess
self._inputs_data = inputs_data
self._output_dim = output_dim
self._outputs_data = outputs_data
self._test_inputs_data = test_inputs
self._test_labels_data = test_labels
self._x = tf.placeholder(tf.float32, [None, len(inputs_data[0])])
self._y = tf.placeholder(tf.int32, [None, self._output_dim])
self._inputs_num = len(self._inputs_data)
# self._lr = lr
self.learning_rate = tf.Variable(
float(lr), trainable=False, dtype=tf.float32)
self.learning_rate_decay_op = self.learning_rate.assign(
self.learning_rate * learning_rate_decay_factor)
self._current_index = 0
self._batch_size = batch_size
self._run_time = run_time
self._output_interval = output_interval
self._build_net()
def _build_net(self):
l0 = tf.layers.dense(self._x, 128, tf.nn.sigmoid)
l1 = tf.layers.dense(l0, 64, tf.nn.sigmoid)
l2 = tf.layers.dense(l1, 32, tf.nn.sigmoid)
self._output = tf.nn.sigmoid(tf.layers.dense(l2, self._output_dim)) # output layer
self.loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=self._y, logits=self._output) # compute cost
self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
def _get_batch(self):
result_x = []
result_y = []
if self._current_index + self._batch_size <= self._inputs_num:
result_x = self._inputs_data[self._current_index:self._current_index + self._batch_size]
result_y = self._outputs_data[self._current_index:self._current_index + self._batch_size]
self._current_index += self._batch_size
else:
result_x = list(self._inputs_data[self._current_index:])
result_y = list(self._outputs_data[self._current_index:])
length = self._batch_size - len(result_x)
for i in range(length):
result_x.append(self._inputs_data[i])
result_y.append(self._outputs_data[i])
self._current_index = length
assert len(result_x) == len(result_y) == self._batch_size
# return result_x, result_y
return result_x, [[i] for i in result_y]
def _get_test(self):
return self._test_inputs_data, [[i] for i in self._test_labels_data]
def _get_train(self):
return self._inputs_data, [[i] for i in self._outputs_data]
def tran_net(self):
total_loss = 0
loss_history = [0., 0., 0.]
for i in range(self._run_time):
x_batch, y_batch = self._get_batch()
try:
lr_, _loss, _, _output = self._sess.run([self.learning_rate, self.loss, self.train_op, self._output],
{self._x: x_batch, self._y: y_batch})
except Exception,e:
print
total_loss += _loss
if i != 0 and i % self._output_interval == 0:
current_loss = total_loss / self._output_interval
total_loss = 0
if i > 1000 and current_loss > max(loss_history):
self._sess.run(self.learning_rate_decay_op)
loss_history[i % 3] = current_loss
# auc
x_train, y_train = self._get_train()
_train_output = self._sess.run([self._output], {self._x: x_train, self._y: y_train})
train_score = []
for tr_out in _train_output[0].tolist():
for tr_o in tr_out:
train_score.append(tr_o)
train_true = []
for y_tr in y_train:
for y in y_tr:
train_true.append(y)
train_auc = roc_auc_score(train_true,train_score)
#
x_test,y_test = self._get_test()
_test_loss, _test_output = self._sess.run([self.loss, self._output],
{self._x: x_test, self._y: y_test})
test_score = []
for te_out in _test_output.tolist():
for te_o in te_out:
test_score.append(te_o)
#
true_pos_count = 0
test_true = []
for y_te in y_test:
for y_t in y_te:
test_true.append(y_t)
if y_t == 1:
true_pos_count += 1
else:
pass
test_auc = roc_auc_score(test_true,test_score)
print 'step:', i, 'lr:', lr_, 'train_loss:', current_loss, \
'train_auc:', train_auc, 'test_loss:', _test_loss, 'test_auc:', test_auc
def predict(self, inputs):
return self._sess.run(self._output, {self._x: inputs})
train_inputs = []
train_labels = []
test_inputs = []
test_labels = []
file_train = '../data/red_vec_train.txt'
file_test = '../data/red_vec_test.txt'
f_tr = open(file_train)
for line in f_tr.readlines():
train_inputs.append(list(line.split(' ')[:-1]))
train_labels.append(int(line[-2]))
f_tr.close()
f_te = open(file_test)
for line in f_te.readlines():
test_inputs.append(list(line.split(' ')[:-1]))
test_labels.append(int(line[-2]))
f_te.close()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
tf_model = Mlp(sess, train_inputs, train_labels, test_inputs, test_labels, 1,
lr=0.0003, run_time=100000, batch_size=64, learning_rate_decay_factor=0.9)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
try:
for it in range(100000):
tf_model.tran_net()
except KeyboardInterrupt:
print
save_path = saver.save(sess,'../Model/save_model.ckpt') | [
"[email protected]"
] | |
a35f7f17c325450fd3f72aa5da2507ce62d3fb68 | 96b44dc0df4cc0e9059da1c79d6d2468740939b1 | /downloads/code/LintCode/Majority-Number.py | ba5587fb3f949e0cfe40e3cac3fbbd45d26f9882 | [] | no_license | sdytlm/sdytlm.github.io | 5cb9fcc93fc5d2a287c4f0d1920ab83c34b3f68d | 0506ca9039db2ba253eb311c23b544d164c6bbda | refs/heads/master | 2021-01-24T11:08:54.257293 | 2019-05-06T03:59:17 | 2019-05-06T03:59:17 | 27,394,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | class Solution:
"""
@param nums: A list of integers
@return: The majority number
"""
def majorityNumber(self, nums):
# write your code here
hash_map = dict()
for i in nums:
if hash_map.has_key(i):
hash_map[i] += 1
else:
hash_map[i] = 1
max = 0
result = 0
for i in nums:
if hash_map[i] > max:
max = hash_map[i]
result = i
return result
| [
"[email protected]"
] | |
f05fd839d4e468ea11af460eb32bff753a8f404c | 4fb44d59c82b15fdfd22c9c0a84490f1fb66a750 | /venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_internal/configuration.py | bdf06a4759d29bbf3a0210b07bcb42fd11390022 | [] | no_license | thevirg/InvoiceProgram | 2e04d72a1c1e9f48f027ae6bbbd72289eb571d6a | 248464183bc65dcb9de99120c0e9279eb4fac8a5 | refs/heads/master | 2021-02-18T22:47:20.670377 | 2020-03-05T19:25:05 | 2020-03-05T19:25:05 | 245,246,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,247 | py | """Configuration management setup
Some terminology:
- name
As written in config files.
- value
Value associated with a name
- key
Name combined with it's section (section.name)
- variant
A single word describing where the configuration key-value pair came from
"""
import locale
import logging
import os
from pip._vendor import six
from pip._vendor.six.moves import configparser
from pip._internal.exceptions import (
ConfigurationError, ConfigurationFileCouldNotBeLoaded,
)
from pip._internal.locations import (
legacy_config_file, new_config_file, running_under_virtualenv,
site_config_files, venv_config_file,
)
from pip._internal.utils.misc import ensure_dir, enum
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import ( # noqa: F401
Any, Dict, Iterable, List, NewType, Optional, Tuple
)
RawConfigParser = configparser.RawConfigParser # Shorthand
Kind = NewType("Kind", str)
logger = logging.getLogger(__name__)
# NOTE: Maybe use the optionx attribute to normalize keynames.
def _normalize_name(name):
# type: (str) -> str
"""Make a name consistent regardless of source (environment or file)
"""
name = name.lower().replace('_', '-')
if name.startswith('--'):
name = name[2:] # only prefer long opts
return name
def _disassemble_key(name):
# type: (str) -> List[str]
return name.split(".", 1)
# The kinds of configurations there are.
kinds = enum(
USER="user", # User Specific
GLOBAL="global", # System Wide
VENV="venv", # Virtual Environment Specific
ENV="env", # from PIP_CONFIG_FILE
ENV_VAR="env-var", # from Environment Variables
)
class Configuration(object):
"""Handles management of configuration.
Provides an interface to accessing and managing configuration files.
This class converts provides an API that takes "section.key-name" style
keys and stores the value associated with it as "key-name" under the
section "section".
This allows for a clean interface wherein the both the section and the
key-name are preserved in an easy to manage form in the configuration files
and the data stored is also nice.
"""
def __init__(self, isolated, load_only=None):
# type: (bool, Kind) -> None
super(Configuration, self).__init__()
_valid_load_only = [kinds.USER, kinds.GLOBAL, kinds.VENV, None]
if load_only not in _valid_load_only:
raise ConfigurationError(
"Got invalid value for load_only - should be one of {}".format(
", ".join(map(repr, _valid_load_only[:-1]))
)
)
self.isolated = isolated # type: bool
self.load_only = load_only # type: Optional[Kind]
# The order here determines the override order.
self._override_order = [
kinds.GLOBAL, kinds.USER, kinds.VENV, kinds.ENV, kinds.ENV_VAR
]
self._ignore_env_names = ["version", "help"]
# Because we keep track of where we got the data from
self._parsers = {
variant: [] for variant in self._override_order
} # type: Dict[Kind, List[Tuple[str, RawConfigParser]]]
self._config = {
variant: {} for variant in self._override_order
} # type: Dict[Kind, Dict[str, Any]]
self._modified_parsers = [] # type: List[Tuple[str, RawConfigParser]]
def load(self):
# type: () -> None
"""Loads configuration from configuration files and environment
"""
self._load_config_files()
if not self.isolated:
self._load_environment_vars()
def get_file_to_edit(self):
# type: () -> Optional[str]
"""Returns the file with highest priority in configuration
"""
assert self.load_only is not None, \
"Need to be specified a file to be editing"
try:
return self._get_parser_to_modify()[0]
except IndexError:
return None
def items(self):
# type: () -> Iterable[Tuple[str, Any]]
"""Returns key-value pairs like dict.items() representing the loaded
configuration
"""
return self._dictionary.items()
def get_value(self, key):
# type: (str) -> Any
"""Get a value from the configuration.
"""
try:
return self._dictionary[key]
except KeyError:
raise ConfigurationError("No such key - {}".format(key))
def set_value(self, key, value):
# type: (str, Any) -> None
"""Modify a value in the configuration.
"""
self._ensure_have_load_only()
fname, parser = self._get_parser_to_modify()
if parser is not None:
section, name = _disassemble_key(key)
# Modify the parser and the configuration
if not parser.has_section(section):
parser.add_section(section)
parser.set(section, name, value)
self._config[self.load_only][key] = value
self._mark_as_modified(fname, parser)
def unset_value(self, key):
# type: (str) -> None
"""Unset a value in the configuration.
"""
self._ensure_have_load_only()
if key not in self._config[self.load_only]:
raise ConfigurationError("No such key - {}".format(key))
fname, parser = self._get_parser_to_modify()
if parser is not None:
section, name = _disassemble_key(key)
# Remove the key in the parser
modified_something = False
if parser.has_section(section):
# Returns whether the option was removed or not
modified_something = parser.remove_option(section, name)
if modified_something:
# name removed from parser, section may now be empty
section_iter = iter(parser.items(section))
try:
val = six.next(section_iter)
except StopIteration:
val = None
if val is None:
parser.remove_section(section)
self._mark_as_modified(fname, parser)
else:
raise ConfigurationError(
"Fatal Internal error [id=1]. Please report as a bug."
)
del self._config[self.load_only][key]
def save(self):
# type: () -> None
"""Save the currentin-memory state.
"""
self._ensure_have_load_only()
for fname, parser in self._modified_parsers:
logger.info("Writing to %s", fname)
# Ensure directory exists.
ensure_dir(os.path.dirname(fname))
with open(fname, "w") as f:
parser.write(f) # type: ignore
#
# Private routines
#
def _ensure_have_load_only(self):
# type: () -> None
if self.load_only is None:
raise ConfigurationError("Needed a specific file to be modifying.")
logger.debug("Will be working with %s variant only", self.load_only)
@property
def _dictionary(self):
# type: () -> Dict[str, Any]
"""A dictionary representing the loaded configuration.
"""
# NOTE: Dictionaries are not populated if not loaded. So, conditionals
# are not needed here.
retval = {}
for variant in self._override_order:
retval.update(self._config[variant])
return retval
def _load_config_files(self):
# type: () -> None
"""Loads configuration from configuration files
"""
config_files = dict(self._iter_config_files())
if config_files[kinds.ENV][0:1] == [os.devnull]:
logger.debug(
"Skipping loading configuration files due to "
"environment's PIP_CONFIG_FILE being os.devnull"
)
return
for variant, files in config_files.items():
for fname in files:
# If there's specific variant set in `load_only`, load only
# that variant, not the others.
if self.load_only is not None and variant != self.load_only:
logger.debug(
"Skipping file '%s' (variant: %s)", fname, variant
)
continue
parser = self._load_file(variant, fname)
# Keeping track of the parsers used
self._parsers[variant].append((fname, parser))
def _load_file(self, variant, fname):
# type: (Kind, str) -> RawConfigParser
logger.debug("For variant '%s', will try loading '%s'", variant, fname)
parser = self._construct_parser(fname)
for section in parser.sections():
items = parser.items(section)
self._config[variant].update(self._normalized_keys(section, items))
return parser
def _construct_parser(self, fname):
# type: (str) -> RawConfigParser
parser = configparser.RawConfigParser()
# If there is no such file, don't bother reading it but create the
# parser anyway, to hold the data.
# Doing this is useful when modifying and saving files, where we don't
# need to construct a parser.
if os.path.exists(fname):
try:
parser.read(fname)
except UnicodeDecodeError:
# See https://github.com/pypa/pip/issues/4963
raise ConfigurationFileCouldNotBeLoaded(
reason="contains invalid {} characters".format(
locale.getpreferredencoding(False)
),
fname=fname,
)
except configparser.Error as error:
# See https://github.com/pypa/pip/issues/4893
raise ConfigurationFileCouldNotBeLoaded(error=error)
return parser
def _load_environment_vars(self):
# type: () -> None
"""Loads configuration from environment variables
"""
self._config[kinds.ENV_VAR].update(
self._normalized_keys(":env:", self._get_environ_vars())
)
def _normalized_keys(self, section, items):
# type: (str, Iterable[Tuple[str, Any]]) -> Dict[str, Any]
"""Normalizes items to construct a dictionary with normalized keys.
This routine is where the names become keys and are made the same
regardless of source - configuration files or environment.
"""
normalized = {}
for name, val in items:
key = section + "." + _normalize_name(name)
normalized[key] = val
return normalized
def _get_environ_vars(self):
# type: () -> Iterable[Tuple[str, str]]
"""Returns a generator with all environmental vars with prefix PIP_"""
for key, val in os.environ.items():
should_be_yielded = (
key.startswith("PIP_") and
key[4:].lower() not in self._ignore_env_names
)
if should_be_yielded:
yield key[4:].lower(), val
# XXX: This is patched in the tests.
def _iter_config_files(self):
# type: () -> Iterable[Tuple[Kind, List[str]]]
"""Yields variant and configuration files associated with it.
This should be treated like items of a dictionary.
"""
# SMELL: Move the conditions out of this function
# environment variables have the lowest priority
config_file = os.environ.get('PIP_CONFIG_FILE', None)
if config_file is not None:
yield kinds.ENV, [config_file]
else:
yield kinds.ENV, []
# at the base we have any global configuration
yield kinds.GLOBAL, list(site_config_files)
# per-user configuration next
should_load_user_config = not self.isolated and not (
config_file and os.path.exists(config_file)
)
if should_load_user_config:
# The legacy config file is overridden by the new config file
yield kinds.USER, [legacy_config_file, new_config_file]
# finally virtualenv configuration first trumping others
if running_under_virtualenv():
yield kinds.VENV, [venv_config_file]
def _get_parser_to_modify(self):
# type: () -> Tuple[str, RawConfigParser]
# Determine which parser to modify
parsers = self._parsers[self.load_only]
if not parsers:
# This should not happen if everything works correctly.
raise ConfigurationError(
"Fatal Internal error [id=2]. Please report as a bug."
)
# Use the highest priority parser.
return parsers[-1]
# XXX: This is patched in the tests.
def _mark_as_modified(self, fname, parser):
# type: (str, RawConfigParser) -> None
file_parser_tuple = (fname, parser)
if file_parser_tuple not in self._modified_parsers:
self._modified_parsers.append(file_parser_tuple)
| [
"[email protected]"
] | |
532f1188a9ed5536a02984ae1bc71401b1cfc649 | c0cf737b53fa32e1c88a1b39a188ccc63c879fc9 | /day-02/day-02.py | 447cc166272e4d13f98da564e1903160f272fa94 | [] | no_license | aoutzen/advent-of-code | 5dd7af6e575fce923c10901e4a16e42b853fdfdc | 04f5ea70568d2ad517742489021985015f202ec5 | refs/heads/master | 2020-11-24T18:37:38.507845 | 2019-12-17T01:51:27 | 2019-12-17T01:51:27 | 228,294,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,566 | py | # --- Day 2: 1202 Program Alarm ---
# On the way to your gravity assist around the Moon, your ship computer beeps angrily about a
# "1202 program alarm". On the radio, an Elf is already explaining how to handle the situation:
# "Don't worry, that's perfectly norma--" The ship computer bursts into flames.
# You notify the Elves that the computer's magic smoke seems to have escaped. "That computer
# ran Intcode programs like the gravity assist program it was working on; surely there are
# enough spare parts up there to build a new Intcode computer!"
# An Intcode program is a list of integers separated by commas (like 1,0,0,3,99). To run one,
# start by looking at the first integer (called position 0). Here, you will find an opcode -
# either 1, 2, or 99. The opcode indicates what to do; for example, 99 means that the program
# is finished and should immediately halt. Encountering an unknown opcode means something went
# wrong.
# Opcode 1 adds together numbers read from two positions and stores the result in a third
# position. The three integers immediately after the opcode tell you these three positions -
# the first two indicate the positions from which you should read the input values, and the
# third indicates the position at which the output should be stored.
# For example, if your Intcode computer encounters 1,10,20,30, it should read the values at
# positions 10 and 20, add those values, and then overwrite the value at position 30 with their
# sum.
# Opcode 2 works exactly like opcode 1, except it multiplies the two inputs instead of adding
# them. Again, the three integers after the opcode indicate where the inputs and outputs are,
# not their values.
# Once you're done processing an opcode, move to the next one by stepping forward 4 positions.
# For example, suppose you have the following program:
# 1,9,10,3,2,3,11,0,99,30,40,50
# For the purposes of illustration, here is the same program split into multiple lines:
# 1,9,10,3,
# 2,3,11,0,
# 99,
# 30,40,50
# The first four integers, 1,9,10,3, are at positions 0, 1, 2, and 3. Together, they represent
# the first opcode (1, addition), the positions of the two inputs (9 and 10), and the position
# of the output (3). To handle this opcode, you first need to get the values at the input
# positions: position 9 contains 30, and position 10 contains 40. Add these numbers together
# to get 70. Then, store this value at the output position; here, the output position (3) is
# at position 3, so it overwrites itself. Afterward, the program looks like this:
# 1,9,10,70,
# 2,3,11,0,
# 99,
# 30,40,50
# Step forward 4 positions to reach the next opcode, 2. This opcode works just like the
# previous, but it multiplies instead of adding. The inputs are at positions 3 and 11; these
# positions contain 70 and 50 respectively. Multiplying these produces 3500; this is stored
# at position 0:
# 3500,9,10,70,
# 2,3,11,0,
# 99,
# 30,40,50
# Stepping forward 4 more positions arrives at opcode 99, halting the program.
# Here are the initial and final states of a few more small programs:
# 1,0,0,0,99 becomes 2,0,0,0,99 (1 + 1 = 2).
# 2,3,0,3,99 becomes 2,3,0,6,99 (3 * 2 = 6).
# 2,4,4,5,99,0 becomes 2,4,4,5,99,9801 (99 * 99 = 9801).
# 1,1,1,4,99,5,6,0,99 becomes 30,1,1,4,2,5,6,0,99.
# Once you have a working computer, the first step is to restore the gravity assist program
# (your puzzle input) to the "1202 program alarm" state it had just before the last computer
# caught fire. To do this, before running the program, replace position 1 with the value 12
# and replace position 2 with the value 2. What value is left at position 0 after the program
# halts?
# --- Part Two ---
# "Good, the new computer seems to be working correctly! Keep it nearby during this mission -
# you'll probably use it again. Real Intcode computers support many more features than your new
# one, but we'll let you know what they are as you need them."
# "However, your current priority should be to complete your gravity assist around the Moon.
# For this mission to succeed, we should settle on some terminology for the parts you've already
# built."
# Intcode programs are given as a list of integers; these values are used as the initial state
# for the computer's memory. When you run an Intcode program, make sure to start by initializing
# memory to the program's values. A position in memory is called an address (for example, the
# first value in memory is at "address 0").
# Opcodes (like 1, 2, or 99) mark the beginning of an instruction. The values used immediately
# after an opcode, if any, are called the instruction's parameters. For example, in the
# instruction 1,2,3,4, 1 is the opcode; 2, 3, and 4 are the parameters. The instruction 99
# contains only an opcode and has no parameters.
# The address of the current instruction is called the instruction pointer; it starts at 0.
# After an instruction finishes, the instruction pointer increases by the number of values in
# the instruction; until you add more instructions to the computer, this is always 4 (1 opcode
# + 3 parameters) for the add and multiply instructions. (The halt instruction would increase
# the instruction pointer by 1, but it halts the program instead.)
# "With terminology out of the way, we're ready to proceed. To complete the gravity assist, you
# need to determine what pair of inputs produces the output 19690720."
# The inputs should still be provided to the program by replacing the values at addresses 1 and
# 2, just like before. In this program, the value placed in address 1 is called the noun, and
# the value placed in address 2 is called the verb. Each of the two input values will be
# between 0 and 99, inclusive.
# Once the program has halted, its output is available at address 0, also just like before.
# Each time you try a pair of inputs, make sure you first reset the computer's memory to the
# values in the program (your puzzle input) - in other words, don't reuse memory from a
# previous attempt.
# Find the input noun and verb that cause the program to produce the output 19690720. What is
# 100 * noun + verb? (For example, if noun=12 and verb=2, the answer would be 1202.)
import os
def open_file_from_same_directory(filename):
cur_dir = os.path.dirname(__file__)
input_path = os.path.join(cur_dir, filename)
input_file = open(input_path)
return input_file
def intcode_program_from_str(program_string):
split_program = program_string.split(',')
intcode_program = [int(i) for i in split_program]
return intcode_program
def run_intcode_program(intcode_program):
program_index = 0
while intcode_program[program_index] != 99:
if intcode_program[program_index] == 1:
# addition
operand1_index = intcode_program[program_index + 1]
operand2_index = intcode_program[program_index + 2]
sum = intcode_program[operand1_index] + intcode_program[operand2_index]
result_index = intcode_program[program_index + 3]
intcode_program[result_index] = sum
program_index += 4
elif intcode_program[program_index] == 2:
# multiplication
operand1_index = intcode_program[program_index + 1]
operand2_index = intcode_program[program_index + 2]
prod = intcode_program[operand1_index] * intcode_program[operand2_index]
result_index = intcode_program[program_index + 3]
intcode_program[result_index] = prod
program_index += 4
pass
else:
print('Something went wrong!')
break
return intcode_program
input_file = open_file_from_same_directory("day-02-input.txt")
intcode_program = intcode_program_from_str(input_file.read())
initial_intcode_program = intcode_program.copy()
noun = None
verb = None
for i in range(100):
for j in range(100):
intcode_program[1] = i
intcode_program[2] = j
intcode_program_post_run = run_intcode_program(intcode_program)
if intcode_program_post_run[0] == 19690720:
noun = intcode_program[1]
verb = intcode_program[2]
break
else:
intcode_program = initial_intcode_program.copy()
if noun and verb:
break
answer = (100 * noun) + verb
print(answer)
# intcode_program[1] = 12
# intcode_program[2] = 2
# intcode_program_post_run = run_intcode_program(intcode_program)
# print(intcode_program_post_run[0]) | [
"[email protected]"
] | |
3841c04c4a9890bd1483de5b35dec315e5f3f246 | 6cb5b4e56b727b98b234b04c519ec6f41b4e3701 | /Primary/Str/isAnagram.py | dcf67eca0ad025ad770b081c66bcf312fc93e12e | [] | no_license | doubleLLL3/Leetcode-Exercise-Codes | fd90b9addcad11412aacdfb577afbe481547624b | 190a85d59d16dee929ca025dafdee7747c5abc87 | refs/heads/master | 2022-08-28T18:17:05.716623 | 2020-06-01T06:52:41 | 2020-06-01T06:52:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,990 | py | def isAnagram(s, t):
'''
1.先使用长度作为第一条件筛选
2.用两个字典存储字符和出现次数
3.判断两个字典是否相同
'''
# if len(s) != len(t):
# return False
# dict_s = {}
# dict_t = {}
# for i in range(len(s)):
# dict_s[s[i]] = dict_s.get(s[i], 0) + 1
# dict_t[t[i]] = dict_t.get(t[i], 0) + 1
# if dict_s == dict_t:
# return True
# return False
'''
排序后比较是否相等:
'''
# if len(s) != len(t):
# return False
# return sorted(s) == sorted(t)
'''
放入字典时,一个加一个减,则只需要一个字典
'''
# if len(s) != len(t):
# return False
# dict = {}
# for i in range(len(s)):
# dict[s[i]] = dict.get(s[i], 0) + 1
# dict[t[i]] = dict.get(t[i], 0) - 1
# for i in dict.values():
# if i != 0:
# return False
# return True
'''
先遍历完一个字符串放入一个字典,再用另一个字符串来减,如果小于0则直接返回False
'''
# if len(s) != len(t):
# return False
# dict = {}
# for i in s:
# dict[i] = dict.get(i, 0) + 1
# for i in t:
# dict[i] = dict.get(i, 0) - 1
# if dict[i] < 0:
# return False
# return True
'''
使用集合记录出现过的字符,遍历集合,计数是否相等(最佳!)
'''
if len(s) != len(t):
return False
set_s = set(s)
if set_s == set(t):
for i in set_s:
if s.count(i) != t.count(i):
return False
return True
else:
return False
s = "rat"
t = "car"
print(isAnagram(s, t)) | [
"[email protected]"
] | |
53de61303078f02960067e5d6cd8fc4046ef4b5c | 64ddafcdca09310c14582916dd82c35f2b7a431b | /privx_api/base.py | 9fa805ec642fa602354a597bbec2b4daed1f5e8f | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | cnguyenc/privx-sdk-for-python | b6463ce09bfbf6b209c4957fd02baa52127542da | 8baf4fd2abe126b0c550558240fd26643206e076 | refs/heads/master | 2023-04-24T18:53:35.028170 | 2021-04-26T09:19:06 | 2021-04-26T09:19:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,448 | py | import base64
import http.client
import json
import ssl
import urllib.parse
import urllib.request
from typing import Union
from http.client import HTTPException
from privx_api.exceptions import InternalAPIException
from privx_api.enums import UrlEnum
def format_path_components(format_str: str, **kw) -> str:
components = {k: urllib.parse.quote(v, safe="") for k, v in kw.items()}
return format_str.format(**components)
class Connection:
def __init__(self, connection_info):
self.host = connection_info["host"]
self.port = connection_info["port"]
self.ca_cert = connection_info["ca_cert"]
self._connection = None
def __enter__(self):
self._connection = http.client.HTTPSConnection(
self.host, port=self.port, context=self.get_context()
)
return self._connection
def __exit__(self, exc_type, exc_val, exc_tb):
self._connection.close()
def get_context(self) -> ssl.SSLContext:
return ssl.create_default_context(cadata=self.ca_cert)
class BasePrivXAPI:
"""
Base class of PrivXAPI.
"""
def __init__(
self, hostname, hostport, ca_cert, oauth_client_id, oauth_client_secret
):
self._access_token = ""
self._oauth_client_id = oauth_client_id
self._oauth_client_secret = oauth_client_secret
self._connection_info = {
"host": hostname,
"port": hostport,
"ca_cert": ca_cert,
}
@classmethod
def _get_url(cls, name: str) -> str:
url = UrlEnum.get(name)
if not url:
raise InternalAPIException("URL missing: ", name)
return url
@classmethod
def _build_url(cls, name: str, path_params=None, query_params=None) -> str:
path_params = path_params or {}
query_params = query_params or {}
url = cls._get_url(name)
if path_params:
url = format_path_components(url, **path_params)
if query_params:
params = urllib.parse.urlencode(query_params)
url = "{}?{}".format(url, params)
return url
def _authenticate(self, username: str, password: str):
with Connection(self._connection_info) as conn:
token_request = {
"grant_type": "password",
"username": username,
"password": password,
}
basic_auth = base64.b64encode(
"{}:{}".format(self._oauth_client_id, self._oauth_client_secret).encode(
"utf-8"
)
)
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Authorization": "Basic {}".format(basic_auth.decode("utf-8")),
}
try:
conn.request(
"POST",
self._get_url(UrlEnum.AUTH.TOKEN),
body=urllib.parse.urlencode(token_request),
headers=headers,
)
except (OSError, HTTPException) as e:
raise InternalAPIException(str(e))
response = conn.getresponse()
if response.status != 200:
raise InternalAPIException("Invalid response: ", response.status)
data = response.read()
self._access_token = json.loads(data).get("access_token")
if self._access_token == "":
raise InternalAPIException("Failed to get access token")
def _get_headers(self) -> dict:
return {
"Content-type": "application/json",
"Authorization": "Bearer {}".format(self._access_token),
}
def _get_search_params(self, **kwargs: Union[str, int]) -> dict:
params = {key: val for key, val in kwargs.items() if val}
return params if any(params) else {}
def _http_get(self, urlname: str, path_params=None, query_params=None) -> tuple:
path_params = path_params or {}
query_params = query_params or {}
with Connection(self._connection_info) as conn:
try:
conn.request(
"GET",
self._build_url(urlname, path_params, query_params),
headers=self._get_headers(),
)
except (OSError, HTTPException) as e:
raise InternalAPIException(str(e))
response = conn.getresponse()
return response.status, response.read()
def _http_get_no_auth(self, urlname: str) -> tuple:
headers = self._get_headers()
del headers["Authorization"]
with Connection(self._connection_info) as conn:
try:
conn.request(
"GET",
self._build_url(urlname),
headers=headers,
)
except (OSError, HTTPException) as e:
raise InternalAPIException(str(e))
response = conn.getresponse()
return response.status, response.read()
def _http_post(
self, urlname: str, body=None, path_params=None, query_params=None
) -> tuple:
body = body or {}
path_params = path_params or {}
query_params = query_params or {}
with Connection(self._connection_info) as conn:
try:
conn.request(
"POST",
self._build_url(urlname, path_params, query_params),
headers=self._get_headers(),
body=self._make_body_params(body),
)
except (OSError, HTTPException) as e:
raise InternalAPIException(str(e))
response = conn.getresponse()
return response.status, response.read()
def _http_put(
self, urlname: str, body=None, path_params=None, query_params=None
) -> tuple:
body = body or {}
path_params = path_params or {}
query_params = query_params or {}
with Connection(self._connection_info) as conn:
try:
conn.request(
"PUT",
self._build_url(urlname, path_params, query_params),
headers=self._get_headers(),
body=self._make_body_params(body),
)
except (OSError, HTTPException) as e:
raise InternalAPIException(str(e))
response = conn.getresponse()
return response.status, response.read()
def _http_delete(
self, urlname: str, body=None, path_params=None, query_params=None
) -> tuple:
body = body or {}
path_params = path_params or {}
query_params = query_params or {}
with Connection(self._connection_info) as conn:
try:
conn.request(
"DELETE",
self._build_url(urlname, path_params, query_params),
headers=self._get_headers(),
body=self._make_body_params(body),
)
except (OSError, HTTPException) as e:
raise InternalAPIException(str(e))
response = conn.getresponse()
return response.status, response.read()
@staticmethod
def _make_body_params(data: Union[dict, str]) -> str:
return data if isinstance(data, str) else json.dumps(data)
| [
"[email protected]"
] | |
26ee7bcef0591e0555bd2838881e925565b67435 | 86d980c3ec7e4201454e229c1216b7a9be294d0a | /Quiz.py | 92e1e0350f410d4b4a9684b459d234981a4e5d21 | [] | no_license | rupalidhumne/Perceptrons-and-Decision-Trees | 3d3e57749f0119babc5d6661ae76f74a27ca2d97 | 8768554ba42c9811ea2469b93a2007e90d87e63b | refs/heads/master | 2021-09-01T12:47:50.993500 | 2017-12-27T03:01:20 | 2017-12-27T03:01:20 | 115,473,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,851 | py | global NumNodes
trains=[]
import csv
global CLASS_idx
testDS={}
trainDS={}
ds={}
import math
import random
global root
trainVals=set()
global headers
global headers2
totAccuracy=[]
nn=[]
#if I reach a question mark then count it as a no
class Node:
def __init__(self, state,parent, children, soln,freq,path): #state is my category, path is what route I took to get here (yes or no), parent is previously visited category, solution is am I done or going yn...
self.state=state #state is node or category
self.parent=parent
self.children=children
self.soln=soln #republican or democrat
self.freq=freq
self.path=path
def __str__(self, level=0):
ret = "\t"*level+repr(self.state)+ repr(self.soln)+"\n"
for child in self.children:
ret += child.__str__(level+1)
return ret
def __repr__(self):
return '<tree node representation>'
def generateTrain(file):
global CLASS_idx
global headers
with open(file) as csvfile:
reader = csv.reader(csvfile)
headers=next(reader)[1:] #this includes the last party column so might want to get rid
for row in reader:
trainDS.update({row[0]:row[1:]})
csvfile.close()
CLASS_idx=len(headers)-1 #number of categories
def generateTest(file):
global headers2
with open(file) as csvfile:
reader = csv.reader(csvfile)
headers2=next(reader)[1:] #this includes the last party column so might want to get rid
for row in reader:
testDS.update({row[0]:row[1:]})
csvfile.close()
def openCSV(file): #will make test and tree
global CLASS_idx
global headers
with open(file) as csvfile:
reader = csv.reader(csvfile)
headers=next(reader)[1:] #this includes the last party column so might want to get rid
i=1
for row in reader:
if i<436:
ds.update({row[0]:row[1:]})
i=i+1
else:
break
csvfile.close()
CLASS_idx=len(headers)-1 #number of categories
def predict(start):#for person, traverse through my tree to get the correct answer
correct=0
p=0
incorrect=[]
for key in testDS:
person=testDS[key]
path=traverseTree(start, person,[])
if path==['A7', 'Sometimes', 'A1', 'Sometimes', 'A2', 'Never', 'A6']:
p=p+1
if soln is not "FAIL" and soln==person[len(person)-1]:
correct=correct+1
else:
incorrect.append(person)
print(len(incorrect))
numQues(incorrect)
print(p)
return correct/len(testDS.keys())*100
#make a method that just returns the soln
def numQues(arr):
n=0
for row in arr:
if '?' in row:
n=n+1
print(n)
def val_list(data,column):
#print(data.values())
return [val[column] for val in data.values()] #for list in data set return the last column so yes/no
def freq_dist(data_dict):
vals=val_list(data_dict,CLASS_idx) #last column
#print(vals)
return{a: vals.count(a) for a in set(vals)} #numbers of yes versus number of no overall
def val_set(data,column): #set of the categories
return set(val_list(data,column))
def freq_entropy(freq_dict):
f=list(freq_dict.values())
s=sum(f)
p=[i/s for i in f]
return (-sum([i * math.log(i,2) for i in p if i>0]))
def traverseTree(currNode, person,path):
questionList=[]
if currNode.soln is None:
cat=currNode.state
if cat in headers:
path.append(cat)
responseI=person[headers.index(cat)]
for child in currNode.children:
if responseI=='?':
path.append(child)
prediction=traverseTree(child,person)
questionList.append(prediction)
elif child.state==responseI:
if child.soln is not None:
return child.soln
else:
path.append(child)
return traverseTree(child, person)
if responseI=='?':
if questionList[0] != questionList[1]:
r=random.randint(0,1)
return questionList[r]
else:
return questionList[0]
else:
for child in currNode.children: #child of a subcategory is a category
if child.soln is None:
path.append(child)
return traverseTree(child,person)
else:
return currNode.soln
else:
return currNode.soln
def extract(data,column,value): #return rows with cat s (V15) for which sub cat =yes and then rows where in V15 the subcat is no
return {a:data[a] for a in data if data[a][column]==value}
def parameter_entropy(data,col):
length=len(data)
total=0
for v in val_set(data,col):
eds=extract(data,col,v)
l=len(eds)
e=freq_entropy(freq_dist(eds))
total=total+ (l/length*e)
return total
def make_tree(n,trainDS,level):
global numNodes
initial_h=freq_entropy(freq_dist(trainDS)) #dictionary entropy
print(initial_h)
best=max((initial_h-parameter_entropy(trainDS,i),i) for i in range(CLASS_idx))
p=best[1]
if level==0:
n.state=headers[p]
else:
catNode=Node("", None, [],None,None,[])
catNode.state=headers[p] #mainCategory
catNode.parent=n
n.children.append(catNode)
# currNode=catNode #WILL THIS CHANGE THE VALUES FOR N AS WELL
numNodes=numNodes+1
for v in val_set(trainDS,p):
subCatNode=Node("", None, [],None,None,[])
subCatNode.state=v
# subCatNode.parent=currNode
if level==0:
subCatNode.parent=n
n.children.append(subCatNode)
else:
subCatNode.parent=catNode
catNode.children.append(subCatNode)
# currNode.children.append(subCatNode)
new_ds=extract(trainDS,p,v)
freqs=freq_dist(new_ds) #node.set children each of these things
subCatNode.freq=freqs
if freq_entropy(freqs)<0.0000001:
subCatNode.soln=getSoln(subCatNode,new_ds)
else:
make_tree(subCatNode, new_ds,level+1)
numNodes=numNodes+1
return numNodes
def getSoln(n,new_ds):
#print(n.state)
soln=""
sCategory=n.state
parent=n.parent
for key in new_ds:
data=new_ds[key]
if data[headers.index(parent.state)]==sCategory: #if the data for that category matches the subcategory value (y/n)
soln=data[len(data)-1]
break
return soln
fileTrain='quizA_train.csv'
fileTest='quizA_test.csv'
generateTrain(fileTrain)
generateTest(fileTest)
#testDS.update({0:['?', '?', 'Sometimes', 'Never', 'Never', 'Never', 'Sometimes', 'Sometimes','?', '?', 'Never', 'Sometimes', 'Never', 'Sometimes', 'Never', 'Sometimes', 'Never', 'Never', 'Sometimes']})
root=Node("", None, [],None,None,[])
numNodes=1
n=make_tree(root, trainDS, 0)
print(str(root))
acc=predict(root)
#openCSV(file)
"""
for i in range (0,20):
numNodes=1
generateTrain()
generateTest()
root=Node("", None, [],None,None) #path would be response of my parent before me
n=make_tree(root, trainDS, 0)
acc=predict(root)
print(acc)
#print(str(root))
nn.append(n)
tSize=tSize+10
"""
print(n)
#print(acc)
#print(totAccuracy)
#2 files, test train,
| [
"[email protected]"
] | |
c026df261b9fe18e1441c5ac0ff698fa9e98fbb0 | f3b233e5053e28fa95c549017bd75a30456eb50c | /p38a_input/L2U/2U-2Q_MD_NVT_rerun/set_1ns_equi_2.py | b3c77f65a18691de8b4767e75d70e78db63cc808 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 752 | py | import os
dir = '/mnt/scratch/songlin3/run/p38a/L2U/MD_NVT_rerun/ti_one-step/2U_2Q/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi_2.in'
temp_pbs = filesdir + 'temp_1ns_equi_2.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi_2.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi_2.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"[email protected]"
] | |
9f29b589276529f8f51a0be792b49a3e6ed48456 | 7e3028dcfd18acd1f8268af56be08d4d9a8cf0c8 | /IniReader.py | 09a3848051af87fd88d77a9b8007543a8badc9d7 | [] | no_license | mbits-os/solver | 04299313f673296abf5512d9827f5dad979a5269 | 640c60b7ee86004e7ba2ccd3ff9cdb889c45d90a | refs/heads/master | 2021-01-23T19:38:02.340673 | 2014-03-13T12:01:43 | 2014-03-13T12:01:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,392 | py | from ConfigFile import \
PrintOut, PrintMessage, PrintError, PrintWarning, PrintExc, \
Location, Field, Section, FileContext, Macros
import sys, os, tempfile, shutil, getopt
from xml.parsers.expat import ParserCreate
from string import Formatter
kOpt = 0
kReq = 1
#list of well-known platforms, that will not deploy
#lower-case, please
vs_desktop_platforms = (
"win32", "x64", "ia64"
)
def PrintTemplate(out, templates, name, host, *args, **kwargs):
for t in Formatter().parse(str(templates[name])):
#print
#print t
#print
out.write(t[0])
if t[1] != None:
try:
if isinstance(t[1], (int, long)):
val = args[t[1]]
else:
val = kwargs[t[1]]
except:
if isinstance(t[1], (int, long)):
val = host[t[1]]
else:
val = getattr(host, t[1])
conv = t[3]
if conv == None: conv = 's'
if conv == 's': val = str(val)
elif conv == 'r': val = repr(val)
elif conv == 'c':
val(out, templates)
val = ''
out.write(val)
def is_true(value):
prop = value.lower().strip()
return prop == "1" or prop == "yes" or prop == "true"
class IniProgram:
force = 0
skip = 0
input_filename = 0
output_filename = 0
inputs = [] #list of all include files to mtime check
default_section_name = None
def __init__(self):
pass
def other_updated(self): return False
def set_output(self, fname):
self.output_filename = fname
def set_default_section(self, name):
self.default_section_name = name
def read(self, vstudio, mask, fname, loc = None, ignoreIO = False):
self.input_filename = fname
self.inputs = [fname]
self.loc = Location(fname, 1)
self.mask = mask
self.vstudio = vstudio
self.translation = { "ver": vstudio, "_ver": '_'+vstudio, "ver0": vstudio, "_ver0": '_'+vstudio }
if len(vstudio) == 1:
self.translation["ver0"] = '0' + vstudio; self.translation["_ver0"] = '_0' + vstudio
macros = Macros()
for k in self.predefs.macros: macros.macros[k] = self.predefs.macros[k]
macros.add_macro("__configfile__", "3", Location("<predefined>", 0))
macros.add_macro("__vs__", vstudio, Location("<predefined>", 0))
_ctx = FileContext(fname, macros, self.default_section_name)
try:
_ctx.parse_file()
except IOError, err:
if ignoreIO:
if loc: loc.Warn("Could not open %s" % fname)
else: PrintWarning("<command line>", 0, "Could not open %s" % fname)
raise
elif loc: PrintExc(loc.fname, loc.lineno)
else: PrintExc("<command line>", 0)
except SystemExit, err:
raise
except:
PrintExc(_ctx.fname, 1)
for f in _ctx.imports: self.inputs.append(f.fname)
for f in _ctx.includes: self.inputs.append(f.fname)
_ctx.handle_imports()
self._macros = Macros()
self.append_defines(_ctx)
self.sections = {}
for sec in _ctx.sections:
sec = _ctx.sections[sec]
_sec = Section(sec, sec.name)
for idx in sec.index:
fld = sec.items[idx]
_fld = Field(fld, \
self._macros.resolve(fld.name),\
self._macros.resolve(fld.value))
sec.append(_fld)
self.sections[sec.name.lower()] = sec
del self._macros
self.read_config()
def append_defines(self, _ctx): #overridable
pass
def append_define(self, name, value):
self._macros.add_macro(name, value, Location(self.input_filename, 1))
def get_section(self, name, opt):
_name = str(name).lower()
if _name in self.sections: return self.sections[_name]
if opt == kOpt: return None
self.loc.Error("Could not find section '%s'" % name)
def get_item(self, sec, name, errmsg = None, opt = kReq):
_name = name
name = name.lower()
if name in sec.items: return sec.items[name]
if opt == kOpt: return None
if type(errmsg) not in (str, unicode):
errmsg = "%s:%s item missing" % (sec.name, _name)
self.loc.Error(errmsg)
def get_array(self, sec, name, errmsg = None):
if type(errmsg) not in (str, unicode):
errmsg = "%s:%s list missing" % (sec.name, name)
return self.get_item(sec, name, errmsg).value.split(";")
def get_optional_item(self, sec, name): return self.get_item(sec, name, opt=kOpt)
def write(self):
if self.output_filename == 0: self.update_filename()
if self.output_filename == 0: return []
orig_output_name = self.output_filename
updated = []
path, fname = os.path.split(orig_output_name)
name, ext = fname.rsplit(".", 1)
fname = os.path.join(path, self.mask.format(name=name, **self.translation) + "." + ext)
self.output_filename = fname
if self.write_one(): updated.append(fname)
self.output_filename = 0
return updated
def same_content(self, left, right):
try:
l = open(left, 'rb')
r = open(right, 'rb')
while 1:
lline = l.readline()
rline = r.readline()
if not lline:
if rline: return False
return True
if lline != rline: return False
except:
return False
def write_one(self):
if not self.force:
try:
to = os.path.getmtime(self.output_filename)
older = True
for i in self.inputs:
if os.path.getmtime(i) > to: older = False; break
if older: return False #no update needed
except:
#some access was broken - huge chance,
#it's because output desn't exsits yet
pass
_loc = Location(self.input_filename, 1)
_loc.Message("building %s" % os.path.basename(self.output_filename))
tmp = tempfile.mkstemp('',\
os.path.basename(self.output_filename)+'.tmp-',\
os.path.dirname(self.output_filename))
out = open(tmp[1], 'w')
self.write_out(out)
out.close()
if os.path.exists(self.output_filename):
if self.skip and self.same_content(self.output_filename, tmp[1]):
_loc.Message("output not changed, skipping")
os.close(tmp[0])
os.remove(tmp[1])
return False
bak = tempfile.mkstemp('',\
os.path.basename(self.output_filename)+'.back-',\
os.path.dirname(self.output_filename))
os.close(bak[0])
shutil.copyfile(self.output_filename, bak[1])
os.close(tmp[0])
shutil.move(tmp[1], self.output_filename)
return True
def usage(self, argv, reason=0):
if reason:
PrintOut("%s\n" % reason)
PrintOut(\
"usage:\n "+os.path.basename(argv[0])+""" [options] config
options:
(h)elp
(o)utput
(f)orce
(s)kip
(d)ef macro to be added to predefined list
vs [comma separated, allowed values: 8,9,10]
mask should contain {name} and one of {ver}, {_ver},
{ver0} and {_ver0} [def=${name}${_0ver}]
{ver} will be empty, if vs has only one version
{_ver} will also be empty, but if the version is to be used,
it will have underscore prepended to version
{ver0} and {_ver0} are the same as {ver} and {ver0},
but will create 08 and _08 instead of 8 and _8 (to be used,
when vs also contains 10)
""")
def main(self, kind, argv, loc = None, ignoreIO = False):
try:
opts, args = getopt.getopt(argv[1:], \
"ho:fsd:", \
["help", "output=", "force", "skip",
"D=", "def=", "mellow", "vs=", "mask="])
except getopt.GetoptError, err:
self.usage(argv, str(err))
return 2
mellow = 0
vs = ["9"]
vs2 = None
mask2 = None
self.predefs = Macros()
for o, a in opts:
if o in ("-h", "--help"):
self.usage(argv)
return 0
elif o in ("-o", "--output"): self.set_output(a)
elif o in ("-f", "--force"): self.force = 1
elif o in ("-s", "--skip"): self.skip = 1
elif o == "--mellow": mellow = 1
elif o == "--mask": mask2 = a
elif o == "--vs":
if vs2 == None: vs2 = []
vs2 += a.split(",")
elif o in ("-d", "--D", "--def"):
d = a.split("=", 1)
value=''
name = d[0].strip()
d = d[1:]
if len(d): value = d[0].strip()
self.predefs.add_macro(name, value, Location("<command-line>", 0))
if vs2 != None: vs = vs2
#print self.predefs
if len(args) == 0:
self.usage(argv, "input missing")
return 2
elif len(args) > 1 and self.output_filename:
self.usage(argv, "to many inputs")
return 2
if mask2 != None:
mask = mask2
mask2 = None
else:
if len(vs) == 1: mask = "{name}"
else: mask = "{name}{_ver0}"
re = []
for arg in args:
for studio in vs:
try:
self.read(studio, mask, arg, loc, ignoreIO)
updated = self.write()
for u in updated: re.append(u)
except IOError, err:
if not ignoreIO: raise
if not mellow:
for r in re:
PrintOut(\
"%s: warning: %s '%s' has been updated and must be reloaded\n" %\
(self.input_filename, kind, r))
if len(re) or self.other_updated(): return 1
return 0
class VCProjInfo:
class BailExpat(Exception):
def __init__(self): pass
def __init__(self):
self.name = ""
self.guid = 0
def expat_se(self, name, attrs):
assert name == "VisualStudioProject"
self.guid = attrs["ProjectGUID"]
self.name = attrs["Name"]
raise VCProjInfo.BailExpat()
def get(self, vcproj):
try:
f = open(vcproj)
except:
return (None, 0)
p = ParserCreate()
p.StartElementHandler = self.expat_se
try:
p.ParseFile(f)
except VCProjInfo.BailExpat:
pass
except:
return (None, 0)
f.close()
return (self.name, self.guid)
class CSProjInfo:
class BailExpat(Exception):
def __init__(self): pass
def __init__(self):
self.name = ""
self.guid = 0
self.text = ""
def expat_se(self, name, attrs):
self.text = ""
def expat_ee(self, name):
if name == "PropertyGroup":
raise CSProjInfo.BailExpat()
elif name == "AssemblyName":
self.name = self.text
elif name == "ProjectGuid":
self.guid = self.text
def expat_cd(self, text):
self.text += text
def get(self, csproj):
try:
f = open(csproj)
except:
return (None, 0)
p = ParserCreate()
p.StartElementHandler = self.expat_se
p.EndElementHandler = self.expat_ee
p.CharacterDataHandler = self.expat_cd
try:
p.ParseFile(f)
except CSProjInfo.BailExpat:
pass
except:
return (None, 0)
f.close()
return (self.name, self.guid)
| [
"[email protected]"
] | |
ac6e8a3c6bcc8819ec62e8ea30d218e242058332 | 0007122ec169c11037cf1007e822e02702317a5d | /ml/subprocess_wrapper.py | 466590be76abb5102d595ddb3cdab5ee4cbed26c | [] | no_license | themarzbar247/CCTV_Cam | aac442d60fd3fce2b947f81af9cef5097409ada6 | 099c1879a469e5355e9b63b147f7cbff737c6897 | refs/heads/main | 2023-06-01T05:22:47.192175 | 2021-06-23T13:58:50 | 2021-06-23T13:58:50 | 329,317,747 | 0 | 0 | null | 2021-06-23T13:58:51 | 2021-01-13T13:32:07 | Python | UTF-8 | Python | false | false | 3,669 | py | import subprocess
import sys
import pickle
from queue import Queue, Empty
from threading import Thread
from struct import Struct
ON_POSIX = 'posix' in sys.builtin_module_names
HEADER = Struct("!L")
def send(obj, file=sys.stdout.buffer):
"""Send a pickled message over the given channel."""
payload = pickle.dumps(obj, -1)
file.write(HEADER.pack(len(payload)))
file.write(payload)
file.flush()
def recieve(file=sys.stdin.buffer):
"""Receive a pickled message over the given channel.
Returns:
object: A deserialised object from the file buffer.
"""
header = read_file(file, HEADER.size)
payload = read_file(file, *HEADER.unpack(header))
return pickle.loads(payload)
def read_file(file, size):
"""Read a fixed size buffer from the file.
Returns:
[Byte]: bytes from file buffer.
"""
parts = []
while size > 0:
part = file.read(size)
if not part:
raise EOFError
parts.append(part)
size -= len(part)
return b''.join(parts)
class SubprocessWrapper:
"""Encapsulates subproccess to allow for easier communication between parent and child processes.
Args:
module_file (String): A path like for a python script to run as a sub proccess.
"""
def __init__(self, module_file):
self.module_file_name = module_file
self.process = None
self.in_q = Queue()
self.out_q = Queue()
def start(self, *args):
"""Starts the threads and subproccess
Returns:
SubprocessWrapper: returns self
"""
print(f"Starting: {self.module_file_name}{args}")
self.process = subprocess.Popen([sys.executable, self.module_file_name, *args],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=ON_POSIX)
self.recever = Thread(target=self._enqueue_output, daemon=True)
self.sender = Thread(target=self._enqueue_input, daemon=True)
self.sender.start()
self.recever.start()
return self
def send(self,obj):
"""Sends an object to the child process.
Args:
obj (object): the object to send to the child process
"""
self.in_q.put(obj)
def read(self, wait=False):
"""Reads from the child process
Args:
wait (bool, optional): Should this block the thread. Defaults to False.
Raises:
e: pass through exceptions from child processes.
Returns:
obj (object): the object the child process has sent.
"""
try: obj = self.out_q.get(wait)
except Empty:
return None
else:
if isinstance(obj, Exception):
raise obj
return obj
def _enqueue_output(self):
"""while the child process is running, it will wait for a message and add it to the output enqueue for the main thread to pick up.
"""
try:
while self.process.poll() is None:
self.out_q.put(recieve(self.process.stdout))
except Exception as e:
self.out_q.put_nowait(e)
self.process.stdout.close()
def _enqueue_input(self):
"""while the child process is running, it will wait for a message in the input enqueue and send it to the child process.
"""
try:
while self.process.poll() is None:
send(self.in_q.get(), self.process.stdin)
except Exception as e:
pass
self.process.stdout.close()
| [
"[email protected]"
] | |
65e56979950079da89c6f081f7033b2c4a160d7a | 72368c0515f47cbea81413bad6b525eb8170f3d1 | /osbuild/mounts.py | 7f46b89cb76393f998229b80a27a422c9514c66f | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | teg/osbuild | 187bbcab1505b76eb3a5349fd4c2bdc048aba886 | 8bc4bff80c5d615a4ec5435f537d77e827bebbfa | refs/heads/main | 2022-06-01T05:50:05.051190 | 2021-11-19T08:50:15 | 2022-01-25T11:23:36 | 452,252,955 | 0 | 0 | Apache-2.0 | 2022-01-26T11:41:44 | 2022-01-26T11:41:43 | null | UTF-8 | Python | false | false | 4,174 | py | """
Mount Handling for pipeline stages
Allows stages to access file systems provided by devices.
This makes mount handling transparent to the stages, i.e.
the individual stages do not need any code for different
file system types and the underlying devices.
"""
import abc
import hashlib
import json
import os
import subprocess
from typing import Dict
from osbuild import host
from osbuild.devices import DeviceManager
class Mount:
"""
A single mount with its corresponding options
"""
def __init__(self, name, info, device, target, options: Dict):
self.name = name
self.info = info
self.device = device
self.target = target
self.options = options
self.id = self.calc_id()
def calc_id(self):
m = hashlib.sha256()
m.update(json.dumps(self.info.name, sort_keys=True).encode())
if self.device:
m.update(json.dumps(self.device.id, sort_keys=True).encode())
if self.target:
m.update(json.dumps(self.target, sort_keys=True).encode())
m.update(json.dumps(self.options, sort_keys=True).encode())
return m.hexdigest()
class MountManager:
"""Manager for Mounts
Uses a `host.ServiceManager` to activate `Mount` instances.
Takes a `DeviceManager` to access devices and a directory
called `root`, which is the root of all the specified mount
points.
"""
def __init__(self, devices: DeviceManager, root: str) -> None:
self.devices = devices
self.root = root
self.mounts = {}
def mount(self, mount: Mount) -> Dict:
source = self.devices.device_abspath(mount.device)
args = {
"source": source,
"target": mount.target,
"root": self.root,
"tree": self.devices.tree,
"options": mount.options,
}
mgr = self.devices.service_manager
client = mgr.start(f"mount/{mount.name}", mount.info.path)
path = client.call("mount", args)
if not path:
res = {}
self.mounts[mount.name] = res
return res
if not path.startswith(self.root):
raise RuntimeError(f"returned path '{path}' has wrong prefix")
path = os.path.relpath(path, self.root)
self.mounts[mount.name] = path
return {"path": path}
class MountService(host.Service):
"""Mount host service"""
@abc.abstractmethod
def mount(self, args: Dict):
"""Mount a device"""
@abc.abstractmethod
def umount(self):
"""Unmount all mounted resources"""
def stop(self):
self.umount()
def dispatch(self, method: str, args, _fds):
if method == "mount":
r = self.mount(args)
return r, None
raise host.ProtocolError("Unknown method")
class FileSystemMountService(MountService):
"""Specialized mount host service for file system mounts"""
def __init__(self, args):
super().__init__(args)
self.mountpoint = None
self.check = False
@abc.abstractmethod
def translate_options(self, options: Dict):
return []
def mount(self, args: Dict):
source = args["source"]
target = args["target"]
root = args["root"]
options = args["options"]
mountpoint = os.path.join(root, target.lstrip("/"))
args = self.translate_options(options)
os.makedirs(mountpoint, exist_ok=True)
self.mountpoint = mountpoint
subprocess.run(
["mount"] +
args + [
"--source", source,
"--target", mountpoint
],
check=True)
self.check = True
return mountpoint
def umount(self):
if not self.mountpoint:
return
self.sync()
print("umounting")
# We ignore errors here on purpose
subprocess.run(["umount", self.mountpoint],
check=self.check)
self.mountpoint = None
def sync(self):
subprocess.run(["sync", "-f", self.mountpoint],
check=self.check)
| [
"[email protected]"
] | |
7bb8400b870908871347973d08509a7f99913fa8 | abb0b8c04bbb5a91fd50c6540a864de66e624ba9 | /Analysis/bec_rabi_flop_3state.py | 2088a04782010c315fb602c4fc77a26ef4b66cad | [] | no_license | anavaldesc/Chern | eb292ceda0ab99c11ac2c157766d863bd9b341f8 | b2985ca11a4309bc27b12e852aa1d77caaf5b650 | refs/heads/master | 2021-01-20T22:05:53.449553 | 2017-11-16T14:54:26 | 2017-11-16T14:54:26 | 101,799,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,147 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 5 17:39:08 2017
@author: banano
"""
import sys
sys.path.append('/Users/banano/databandit')
import numpy as np
import matplotlib.pyplot as plt
import h5py
import os
import pandas as pd
from fnmatch import fnmatch
import databandit as db
from scipy.sparse.linalg import expm
def matchfiles(dir):
for root, dirs, files in os.walk(dir):
for file in files:
if fnmatch(file, '*.h5'):
yield root, file
break # break after listing top level dir
def getfolder(date, sequence):
date = pd.to_datetime(str(date))
folder = 'data/' + date.strftime('%Y/%m/%d') + '/{:04d}'.format(sequence)
return folder
camera = 'XY_Flea3'
date = 20170823
sequence = 183# 151
redo_prepare = False
sequence_type = 'bec_rabi_flop_3state'
#x = [78, 329, 534]
#y = [281, 344, 247] #start in z
#x = [80, 331, 530]
#y = [178, 241, 151] #start in x
x = [124, 375, 579]
y = [231, 292, 202] #start in y
wx = 120
wy = 30
w = 30
ods = []
iod = []
p0 = []
p1 = []
p2 = []
psum = []
folder = getfolder(date, sequence)
outfile = '{}_{}_{:04d}.h5'.format(sequence_type, date, sequence)
Raman_pulse_time = []
fracs = []
try:
with h5py.File('results/' + outfile, 'r') as f:
f['data']
except KeyError:
redo_prepare = True
except IOError:
redo_prepare = True
try:
with h5py.File('results/' + outfile, 'r') as f:
f['data']
except KeyError:
redo_prepare = True
except IOError:
redo_prepare = True
if redo_prepare:
print('Preparing {} data...'.format(sequence_type))
for r, file in matchfiles(folder):
with h5py.File(os.path.join(r, file), 'r') as h5_file:
# print('banana')
try:
img = h5_file['data']['images' + camera]['Raw'][:]
attrs = h5_file['globals'].attrs
img = np.float64(img)
atoms = img[0] - img[2]
probe = img[1] - img[2]
od = -np.log(((atoms < 1) + atoms) / ((probe < 1) + probe))
iod.append(np.ma.masked_invalid(od).sum())
# attrs = h5_file['results/rois_od'].1attrs
Raman_pulse_time.append(attrs['Raman_pulse_time'])
# print(attrs[:])
except Exception as e:
print(e)
# print('There are no {} images in this file'.format(camera))
try:
attrs = h5_file['results/rois_od'].attrs
p0.append(attrs['roi_0'])
p1.append(attrs['roi_1'])
p2.append(attrs['roi_2'])
psum.append(attrs['roi_0'] + attrs['roi_1'] + attrs['roi_2'])
except:
print('There are no rois in this shot')
#
df = pd.DataFrame()
df['Raman_pulse_time'] = Raman_pulse_time
df['integratedOD'] = iod
df['p0'] = p0
df['p1'] = p1
df['p2'] = p2
df['psum'] = psum
# df['p{}'.format(i)] = p0
fracs = np.array(fracs)
ods = np.array(ods)
# for i in range(3):
# df['p{}'.format(2-i)] = ods[:,i]
# for i in range(50):
# df['frac'] = fraction[:,1:1+2].mean()
# print(fraction[:,1:1+4].mean())
# print(fr action[:,i:i+4].mean())
# df['delta_xyz'] = delta_xyz
df = df.dropna()
df = df.sort_values(by='Raman_pulse_time')
# df = df[ df.integratedOD > df.integratedOD.mean() * 1 ]
# df.to_hdf('results/' + outfile, 'data', mode='w')
else:
df = pd.read_hdf('results/' + outfile, 'data')
#except Exception as e:
# print(e)
# print('Empty data file')
#%%
color = ['b', 'k', 'r']
for i in range(3):
plt.plot(df['Raman_pulse_time'], df['p{}'.format(i)]/ df['psum'], color[i] + 'o')
plt.xlabel('Raman pulse time')
plt.ylabel('Fraction')
#plt.xlim([0, 500])
#%%
def evolve(t, H, psi0, kwargs):
dt = np.diff(t)
Hlist = np.array([H(ti, *kwargs) for ti in t[:-1]+dt/2])
Ulist = []
psiList = [psi0]
Plist = [np.abs(psi0)**2]
for dti, Hi in zip(dt, Hlist):
Ui = expm(-1j*Hi*dti)
psi = np.dot(Ui, psiList[-1])
Ulist.append(Ui)
psiList.append(psi)
Plist.append(np.abs(psi)**2)
Ulist = np.array(Ulist)
psiList = np.array(psiList)
Plist = np.array(Plist)
return psiList, Plist, Ulist
def H_RashbaRF_full(t, ramp_rate, qx, qy, Omega1, Omega2, Omega3,
omega1, omega2, omega3, E1, E2, E3, ramp):
if ramp and t <= 1 / ramp_rate:
Omega1 = Omega1 * 2 * np.pi * t * ramp_rate
Omega2 = Omega2 * 2 * np.pi * t * ramp_rate
Omega3 = Omega3 * 2 * np.pi * t * ramp_rate
else:
Omega1 = Omega1 * 2 * np.pi
Omega2 = Omega2 * 2 * np.pi
Omega3 = Omega3 * 2 * np.pi
k1_x = np.cos(2 * np.pi / 3) * np.sqrt(2)
k1_y = -np.sin(2 * np.pi / 3) * np.sqrt(2)
k2_x = np.cos(2 * np.pi * 2/ 3) * np.sqrt(2)
k2_y = -np.sin(2 * np.pi * 2/ 3) * np.sqrt(2)
k3_x = np.cos(2 * np.pi)
k3_y = -np.sin(2 * np.pi)
k1_x = np.cos(2 * np.pi / (360./135))
k1_y = -np.sin(2 * np.pi / (360./135))
k2_x = np.cos(2 * np.pi / 1.6)
k2_y = -np.sin(2 * np.pi / 1.6)
k3_x = np.cos(2 * np.pi)
k3_y = -np.sin(2 * np.pi)
H = np.array([[E1 + (qx+k1_x)**2 + (qy+k1_y)**2, 1.0j*Omega1*np.cos(omega1*t), Omega3*np.cos(omega3*t)],
[-1.0j*Omega1*np.cos(omega1*t),E2 + (qx+k2_x)**2 + (qy+k2_y)**2, -1.0j*Omega2*np.cos(omega2*t)],
[Omega3*np.cos(omega3*t), 1.0j*Omega2*np.cos(omega2*t),E3 + (qx+k3_x)**2 + (qy+k3_y)**2]])
H = np.array(H, dtype='complex')
return H
def H_RashbaRF(t, qx, qy, Omega1, Omega2, Omega3, delta1=0, delta2=0, delta3=0):
Omega1 = Omega1 * 2 * np.pi
Omega2 = Omega2 * 2 * np.pi
Omega3 = Omega3 * 2 * np.pi
k1_x = np.cos(2 * np.pi / 3) * np.sqrt(2)
k1_y = -np.sin(2 * np.pi / 3) * np.sqrt(2)
k2_x = np.cos(2 * np.pi * 2/ 3) * np.sqrt(2)
k2_y = -np.sin(2 * np.pi * 2/ 3) * np.sqrt(2)
k3_x = np.cos(2 * np.pi)
k3_y = -np.sin(2 * np.pi)
#
k1_x = np.cos(2 * np.pi / (360./135))
k1_y = -np.sin(2 * np.pi / (360./135))
k2_x = np.cos(2 * np.pi / 1.6)
k2_y = -np.sin(2 * np.pi / 1.6)
k3_x = np.cos(2 * np.pi)
k3_y = -np.sin(2 * np.pi)
# H = np.array([[(qx+k1_x)**2 + (qy+k1_y)**2+delta1 + delta3, 1.0j*Omega1, Omega3],
# [-1.0j*Omega1, (qx+k2_x)**2 + (qy+k2_y)**2-delta1+delta2, -1.0j*Omega2],
# [Omega3, 1.0j*Omega2, (qx+k3_x)**2 + (qy+k3_y)**2-delta2-delta3]])
H = np.array([[(qx+k1_x)**2 + (qy+k1_y)**2+delta1 + delta3, Omega1, Omega3],
[Omega1, (qx+k2_x)**2 + (qy+k2_y)**2-delta1+delta2, Omega2],
[Omega3, Omega2, (qx+k3_x)**2 + (qy+k3_y)**2-delta2-delta3]])
H = np.array(H, dtype='complex')
return H
Omega = 3.2
Omega1 = Omega * 1.
Omega2 = Omega * 0.9
Omega3 = Omega * 1.1
E1 = 0
E2 = 224
E3 = 224 + 140
delta1 = 0
delta2 = 0
delta3 = 0
omega1 = E2 - E1
omega2 = E3 - E2
omega3 = E3 - E1
qx = 0
qy = 0
ramp_rate = 1
args_floquet = [qx, qy, Omega1, Omega2, Omega3, delta1, delta2, delta3]
args_full = [ramp_rate, qx, qy, 2* Omega1, 2* Omega2, 2*Omega3, omega1, omega2,
omega3, E1, E2, E3, False]
psi0 = np.array([1, 0, 0], dtype='complex')
t = df['Raman_pulse_time']*1e-3
#kwargs = [qx, qy, omega_zx, omega_xy, omega_yz, Omega_zx, Omega_xy, Omega_yz]
#kwargs_full = [qx, qy, 2* Omega1, 2* Omega, 2*Omega3, omega1, omega2,
# omega3, E1, E2, E3]
P_full = evolve(t, H_RashbaRF_full, psi0, args_full)[1]
P_Floquet = evolve(t, H_RashbaRF, psi0, args_floquet)[1]
#%%
label = ['z', 'x', 'y']
for i in range(3):
# plt.plot(t*1e3, P_full[:,i], color[i])
plt.plot(t*1e3, P_Floquet[:,i], color[i] + '--', label=label[i] + ' rwa')
plt.xlabel('Pulse time [hbar/E_R]')
plt.ylabel('Probability')
#plt.legend()
#sorted_fractions = []
#for i in range(wx):
# sorted_fractions.append(df['frac{}'.format(i)])
# plt.plot(df['Raman_pulse_time'], df['frac{}'.format(i)])
# plt.ylim([0,1])
# plt.xlabel('Raman pulse time [us]')
# plt.ylabel('Fraction')
#plt.show()
#
#sorted_fractions = np.array(sorted_fractions)
#psd_vec = []
#for i in range(len(sorted_fractions[:,0])):
# frac_ft = np.fft.fftshift( np.fft.fft(sorted_fractions[i]-sorted_fractions[i].mean()))
# N = len(frac_ft)
# psd = np.abs(frac_ft[N/2::])**2
## plt.plot(psd)
# psd_vec.append(psd/psd.max())
#
#psd = np.array(psd_vec)
#psd /= psd.max()
#N = len(sorted_fractions[:,0])
#fet = df.as_matrix().T[0]
#d = fet[1] - fet[0]
#d = d*1e-6
#freqs = np.fft.fftfreq(N, d)[0:N/2]*1e-3
#plt.pcolormesh(psd.T, vmin=0, vmax=1, cmap='YlGn')
#plt.xlabel('pixel')
#plt.ylabel('Frequency [kHz]')
#plt.yticks(range(0, N/2, N/10), ['%.1f'%freqs[::int(N/10)][i] for i in range(0,5)])
#plt.axis('Tight')
| [
"[email protected]"
] | |
edd79dabbe37979eddc9265b68061e477fbc0a8f | de41c4ee70f0ff41c198794cd204b7025d745ce9 | /tests/test_repository.py | 1c0fcb979a475b5a0d773ffdb3cf868d99ded20c | [] | no_license | liluo/linguist | 30b6957ba3bc8f116a7500f2a0a7bfd32cd368cf | d65d34be638e7314f6f63d024f1c6cb940121f53 | refs/heads/master | 2021-01-16T00:57:48.797495 | 2013-11-19T15:35:49 | 2013-11-19T15:35:49 | 11,943,348 | 3 | 5 | null | 2013-11-19T15:09:30 | 2013-08-07T06:46:35 | Python | UTF-8 | Python | false | false | 893 | py | # -*- coding: utf-8 -*-
from pygments.lexers import find_lexer_class
from framework import LinguistTestBase, main, ROOT_DIR
from libs.repository import Repository
from libs.language import Language
class TestRepository(LinguistTestBase):
def repo(self, base_path):
return Repository.from_directory(base_path)
def linguist_repo(self):
return self.repo(ROOT_DIR)
def test_linguist_language(self):
assert self.linguist_repo().language == Language.find_by_name('Python')
def test_linguist_languages(self):
assert self.linguist_repo().languages[Language.find_by_name('Python')] > 2000
def test_linguist_size(self):
assert self.linguist_repo().size > 3000
def test_binary_override(self):
assert self.repo(ROOT_DIR + '/samples/Nimrod').language == Language.find_by_name('Nimrod')
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
c8d12fe384351eefbc7005f84e44367ebb3c90b8 | 270537e8095d7e86ccf4c672d992d3c7c0dea339 | /build/20130906/UUBlog/UUBlog/core/ubasemodel.py | 78467082aa0c6640972cb7a95659c43f13140aa9 | [] | no_license | Missyliang1/UUBlog | 5d566045a28e4f03a97b473be106ecd9f2c4835d | db287da6de7acd970c431a5e417a89c6285d677a | refs/heads/master | 2021-01-18T07:40:42.032139 | 2013-09-18T05:54:06 | 2013-09-18T05:54:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | #-*- coding:utf-8 -*-
from django.db import models
class UBaseModel(models.Model):
class Meta:
abstract =True
def getModelResult(model,count=None,*orders,**wheres):
ret=model.objects.filter(**wheres)
for order in orders:
ret=ret.order_by(order)
if count:
return ret[:count]
return ret
def Test(self):
return self
| [
"[email protected]"
] | |
cc8fa5ed8c3057249d6ea406fc6e8a5ae4dc625d | abc2b1b09440de924792cb1b50594cc7fefd6d05 | /deployment_queue/views.py | 2a0157df00bf1107cb19edc8377df98de644efed | [] | no_license | SJSU272Spring2019/Project-Group-5 | 46e4642289d81ee5d46b7772d63d8f5a5a525375 | 48ee3457f8696524e7bd5e5ed5d68a52a7b2929c | refs/heads/master | 2020-04-25T19:34:16.965936 | 2019-05-12T04:42:15 | 2019-05-12T04:42:15 | 173,024,873 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,108 | py | from django.shortcuts import render
from deployment_detail.models import DeploymentDetail
from django.shortcuts import redirect
from authenticate.models import UserProfile
def deployment_queue(request):
if not request.user.is_authenticated:
user_message = "Please login to access the Training Management System"
return render(request, 'login.html', {'user_message': user_message})
deployment_records = DeploymentDetail.objects.filter(deployment_status='Open').order_by('project_code', 'course_order')
return render(request, 'deployment_queue.html', {'deployment_records': deployment_records, 'profile_info':UserProfile.objects.get(username=request.user.username)})
def update_deployment_queue(request):
if not request.user.is_authenticated:
user_message = "Please login to access the Training Management System"
return render(request, 'login.html', {'user_message': user_message})
if request.POST.get('cancel_deployment_queue'):
return redirect('/queue')
if request.POST.get('save_deployment_queue'):
deployment_records = save_deployment_queue(request)
return render(request, 'deployment_queue.html', {'deployment_records': deployment_records, 'profile_info':UserProfile.objects.get(username=request.user.username)})
if request.POST.get('save_exit_deployment_queue'):
deployment_records = save_deployment_queue(request)
return redirect('/queue')
def save_deployment_queue(request):
if not request.user.is_authenticated:
user_message = "Please login to access the Training Management System"
return render(request, 'login.html', {'user_message': user_message})
my_checkboxes = request.POST.getlist('Completed')
for item in my_checkboxes:
temp_deployment_record = DeploymentDetail.objects.get(deliverable_id=item)
temp_deployment_record.deployment_status = 'Complete'
temp_deployment_record.save()
deployment_records = DeploymentDetail.objects.filter(deployment_status='Open').order_by('project_code','course_order')
return deployment_records | [
"[email protected]"
] | |
fb5c6da20a7f06bc3f7ae9e52b83c9776894e320 | 05bcba01ccadacdcb50930d8a14ca866d4860aa3 | /每日一码/1.4.py | d562765773e791296e3d3948d1f59bf072f06599 | [] | no_license | bai3/arithmetic | 1de9057a376cc9025a30e8eed069c1657b297fb3 | 0688e44ba2ec9cdd18661602f60ddd011e8e077d | refs/heads/master | 2021-09-14T12:37:05.125808 | 2018-05-14T02:27:48 | 2018-05-14T02:27:48 | 106,187,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | # -*- coding:utf-8 -*-
class Solution:
def jumpFloor(self, number):
if number <= 0:
return 0
elif number <= 2:
return number
else:
b = 1
c = 2
for i in range(3, number+1):
c = c + b
b = c - b
return c | [
"[email protected]"
] | |
d4a02a75781865a75f8f07caad950b79ddaf844c | a283608134d59391f88d877d642bb23ba4015fcb | /core/window_controller/window_controller.py | 70985fdf8b05e03a2c81c02787129acf94d69bc5 | [
"MIT"
] | permissive | wo1fsea/TheOldChineseRoom | 6daf371e65c3626981c9eec70677a0372c6da16c | 19e52d86a9720bb8701149a47126f8e7975320b5 | refs/heads/master | 2021-10-08T19:04:10.497774 | 2018-12-16T15:37:06 | 2018-12-16T15:37:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,580 | py | # -*- coding: utf-8 -*-
"""----------------------------------------------------------------------------
Author:
Huang Quanyong (wo1fSea)
[email protected]
Date:
2017/10/4
Description:
window_controller.py
----------------------------------------------------------------------------"""
import sys
class WindowController(object):
def __init__(self):
self.adapter = self._load_adapter()()
def locate_window(self, name):
return self.adapter.locate_window(name)
def move_window(self, window_id, x, y):
self.adapter.move_window(window_id, x, y)
def resize_window(self, window_id, width, height):
self.adapter.resize_window(window_id, width, height)
def focus_window(self, window_id):
self.adapter.focus_window(window_id)
def is_window_focused(self, window_id):
return self.adapter.is_window_focused(window_id)
def get_focused_window_name(self):
return self.adapter.get_focused_window_name()
def get_window_geometry(self, window_id):
return self.adapter.get_window_geometry(window_id)
def _load_adapter(self):
if sys.platform == "darwin":
from .window_controller_darwin import WindowControllerDarwin
return WindowControllerDarwin
elif sys.platform == "win32":
from .window_controller_win32 import WindowControllerWin32
return WindowControllerWin32
elif sys.platform in ["linux", "linux2"]:
raise NotImplementedError()
else:
raise NotImplementedError()
| [
"[email protected]"
] | |
27e6878286735e53449181168380113412444fa9 | c58b04695593fe579ff5681c215807a70fb02661 | /team7/pytorch_0.4_classification/example-ngraph_1.py | 9a7278d2cb08fc57818c280d95fbfd1bc1e13ea7 | [] | no_license | Mowd/nctu_dl_final | 2322d660834b33c544db7dd1d0607c77338774e6 | e2d9628e9f2a00c37fa18af69390a391d3a83b60 | refs/heads/master | 2020-04-15T06:46:58.568984 | 2019-01-10T16:03:29 | 2019-01-10T16:03:29 | 164,472,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,802 | py | from benchmark import benchmarking
import torch
import torchvision
import torchvision.transforms as transforms
from torchvision.models.resnet import resnet18
import os
import sys
#data_dir = os.environ['TESTDATADIR']
#assert data_dir is not None, "No data directory"
#from models import ResNeXt29_2x64d
from collections import OrderedDict
import time
import numpy as np
from models import resnet20_cifar
import torch.utils.data as data
from PIL import Image
import onnx
import ngraph as ng
from ngraph_onnx.onnx_importer.importer import import_onnx_model
BATCH_SIZE = 960
ONNX_MODEL = "./checkpoint/f1_model.onnx"
im_h = 32
im_w = 32
#ngraph
onnx_protobuf = onnx.load(ONNX_MODEL)
ng_model = import_onnx_model(onnx_protobuf)[0]
runtime = ng.runtime(backend_name='CPU')
resnet = runtime.computation(ng_model['output'], *ng_model['inputs'])
model = resnet20_cifar()
inputs = []
targets = []
@benchmarking(team=7, task=0, model=model, preprocess_fn=None)
def inference(model, test_loader,**kwargs):
total = 0
correct = 0
assert kwargs['device'] != None, 'Device error'
device = kwargs['device']
if device == "cpu":
print("device = ",device)
#for inputs, targets in testloader:
for idx in range(len(test_loader)):
inputs[idx], targets[idx] = inputs[idx].to(device).numpy(), targets[idx].to(device).numpy()
outputs = resnet(inputs[idx])
pred = np.argmax(outputs,axis=1)
total += len(targets[idx])
if(targets[idx].shape != pred.shape):
correct += np.equal(targets[idx],pred[0:len(targets)]).sum()
else:
correct += np.equal(targets[idx],pred).sum()
#print("correct=" ,correct )
else:
print("device = ",device)
model.to(device)
checkpoint = torch.load('./checkpoint/ckpt_last-V4.t7')
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict'].items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
# load params
model.load_state_dict(new_state_dict)
model.eval()
with torch.no_grad():
for batch_idx, (input_, target_) in enumerate(test_loader):
input_, target_ = input_.to(device), target_.to(device)
output_ = model(input_)
_, predicted = output_.max(1)
total += target_.size(0)
correct += predicted.eq(target_).sum().item()
#print("correct=",correct)
#total = len(test_loader) * BATCH_SIZE
acc = 100.*correct/total
print(acc)
return acc
class CINIC10(data.Dataset):
classes =['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def __init__(self, root, train=True,transform=None, target_transform=None):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
self.img = []
self.lab = []
file_name = self.root
# print(file_name)
# now load the numpy arrays
if os.path.exists(file_name):
data= np.load(file_name)
self.img = data['img']
self.lab = data['label']
else:
print("It can't find .np")
#self.test_data = self.test_data.transpose((0, 2, 3, 1)) # convert to HWC
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, lab= self.img[index], self.lab[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
lab = self.target_transform(lab)
return img, lab
def __len__(self):
return len(self.img)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
tmp = 'train' if self.train is True else 'test'
fmt_str += ' Split: {}\n'.format(tmp)
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
if __name__=='__main__':
#cinic_directory = os.environ['TESTDATADIR']
#assert cinic_directory is not None, "No data directory"
#cinic_directory = './data/cinic-10'
test_dir = './test_img.npz'
cinic_mean = [0.47889522, 0.47227842, 0.43047404]
cinic_std = [0.24205776, 0.23828046, 0.25874835]
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=cinic_mean, std=cinic_std)])
#testset = torchvision.datasets.ImageFolder(root=(cinic_directory + '/test'), transform=transform)
#testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=False, num_workers=4,drop_last=True)
testset = CINIC10(root=test_dir,transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE, shuffle=False, num_workers=4,drop_last=True)
for ins, ts in testloader:
inputs.append(ins)
targets.append(ts)
inference(model, testloader) | [
"[email protected]"
] | |
95fef40a5dc2a68a14ca2b5b5e61ee8198383dcd | 952daeb7e254e863fc57459b87ca29b745c8ce94 | /Amazon/purchasing/serializer.py | 1d302f484744ec8824783db41c73fd9cbfe0f5b5 | [] | no_license | yjesefcu/AmazonSeller | 2fbce7e08cbd0e8dff996d28e94f789d56441ddb | ca155dd81c0583eecd5128433291dc2b3f16f80d | refs/heads/master | 2021-01-22T06:01:53.656798 | 2018-01-28T13:39:49 | 2018-01-28T13:39:49 | 92,515,839 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,148 | py | __author__ = 'liucaiyun'
from rest_framework import serializers
from products.serializer import ProductSerializer
from models import *
class DateTimeFormat(serializers.DateTimeField):
def to_representation(self, value):
if not value:
return ''
return value.strftime('%Y-%m-%d %H:%M')
class ContractSerializer(serializers.ModelSerializer):
class Meta:
model = Contract
fields = '__all__'
class OrderStatusSerializer(serializers.ModelSerializer):
class Meta:
model = OrderStatus
fields = '__all__'
class PurchasingOrderSerializer(serializers.ModelSerializer):
product = ProductSerializer(read_only=True)
create_time = DateTimeFormat(read_only=True)
contract = ContractSerializer(read_only=True)
status = OrderStatusSerializer()
status_id = serializers.IntegerField()
class Meta:
model = PurchasingOrder
fields = '__all__'
class InboundSerializer(serializers.ModelSerializer):
inbound_time = DateTimeFormat()
status = OrderStatusSerializer()
class Meta:
model = InboundProducts
fields = '__all__' | [
"[email protected]"
] | |
104286c34947124afae89f5f9066ce8775935aa8 | e4cb0a704203c54d8c7eeb4e9881b71b96976bea | /outsourceScraper/main.py | ab8d86195cb1c1964a798ff7d0155c8e352f6d42 | [] | no_license | artemutin/outsourceScraper | a170ee53e56e317e3872a2d66408a247fea9a146 | 54196af06f03dca8f8aa2e39aa1dada78a21d171 | refs/heads/master | 2020-04-14T05:23:39.516288 | 2016-06-23T14:58:52 | 2016-06-23T14:58:52 | 61,720,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,672 | py | import sys
sys.path.append(".")
from outsourceScraper.Catalog import full_scrape
from outsourceScraper.utils import setupLog
from logging import exception, info
from sys import exc_info
from os.path import expanduser
import traceback
import datetime as dt
import argparse
import os
def main(path=expanduser('~'), max_threads=10, logTofile=True):
try:
if not path:
path = expanduser('~')
if not max_threads:
max_threads = 10
setupLog(logToFile=logTofile, path=path)
info('Доступные параметры main: path=<Путь до желаемой папки для сохранения файла>')
current_time = dt.datetime.now().strftime('%d-%m-%H-%M')
full_scrape(os.path.join(path,'results-{}.csv'.format(current_time)), max_threads)
except BaseException as e:
exception('I caught an exception in main func! This should never ever been happen!:(. But Here it is: {}; callstask: {}'.
format(str(e), traceback.print_tb(exc_info()[2])))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Scrape some parts of vl.ru and 2gis catalogs')
parser.add_argument('-p', dest='path', help='Specify folder to store files in.')
parser.add_argument('-t', dest='max_threads', type=int, help="Specify number of threads to use.")
parser.add_argument('-l', dest='logTofile', action='store_false', help="Set this flag to disable logging into file.")
args = parser.parse_args()
args = vars(args)
if args.get('path', False):
args['path'] = str(args['path'])
main(**args)
| [
"[email protected]"
] | |
acee49631bfcc0689e388480765c6dbf8cf16025 | ee890ca4c28bb6e23cc96b69e83d87cdcafebf7c | /towerofhanoi.py | e59572da9bb254a42af276afaf5f1ce8ce185ec2 | [] | no_license | deepandas11/Python-ADS | 6778b5501967379006b2c4f1d4429a60651d816d | 352e54e550e78ac7e2c1cf62e29520b823dbf462 | refs/heads/master | 2020-04-26T00:42:28.791873 | 2019-03-06T07:08:42 | 2019-03-06T07:08:42 | 173,187,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py |
def moveDisk(fp, tp):
print("Moving disk from "+str(fp)+" to "+str(tp)+".")
def moveTower(height,fromP, withP, toP):
if height>=1:
moveTower(height-1, fromP,toP,withP)
moveDisk(fromP,toP)
moveTower(height-1, withP, fromP, toP)
moveTower(4, "A", "B", "C")
| [
"[email protected]"
] | |
9bc990a63d2e3801b4ea704ddc416f4fa47663e5 | 7cba457b2bb23b79cb17d46a81d50a5c4b6b1b09 | /backprop.py | 766f1445a85ed22d4e5d788e00ce1d50e0f45521 | [] | no_license | josh-dh/CNN-framework | 44e5cf7d8bac752fdf1f30f58e09762d179e7f16 | bbd1cc58b764ff46c784272538655fc2c47bcb12 | refs/heads/master | 2021-01-16T02:51:50.326886 | 2018-02-07T02:26:22 | 2018-02-07T02:26:22 | 99,988,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,256 | py | #Backprop Module
import numpy as np
def final_layer_error(predictions, labels, weighted_input):
"""
calculate the error of the final layer
"""
def d_loss_quadratic_d_activations(predictions, labels):
"""
derivative of quadratic loss with respect to activations
"""
return np.subtract(predictions, labels)
def d_softmax_activations_d_weighted_input(weighted_input):
"""
derivate of softmax activations with respect to weighted inputs UNSURE IF CORRECT
"""
output = np.zeros((weighted_input.size,))
c = np.sum(np.exp(weighted_input)) #constant representing softmax denominator
for i in range(weighted_input.size):
ctemp = c - np.exp(weighted_input[i])
output[i] = (ctemp * np.exp(weighted_input[i]))/np.power((ctemp * np.exp(weighted_input[i])),2)
return output
#hadamard product of two functions:
return np.multiply(d_loss_quadratic_d_activations(predictions, labels), d_softmax_activations_d_weighted_input(weighted_input))
def lower_layer_error(currentweights,currenterror,lowerinput):
"""
calculate the error of the layer lower to the last calculated layer
"""
def relu_prime(lowerinput): #UNSURE IF CORRECT
return np.minimum(lowerinput, 0)
return np.multiply(np.multiply(np.transpose(currentweights),currenterror),relu_prime(lowerinput))
def d_loss_d_weight():
"""
return an array of loss derivatives with respect to their weights
"""
pass #TEMP
def d_loss_d_bias(error_for_layer):
"""
return an array of loss derivatives with respect to their biases THIS FUNCTION ONLY SERVES THE PURPOSE OF DOCUMENTATION
"""
return error_for_layer
def loss_quadratic(predictions, labels):
"""
quadratic loss function for one-hot labels and softmax predictions
"""
return np.sum(np.square(np.subtract(labels, predictions)))/(np.exp2(labels.size))
def loss_cross_entropy(predictions, labels):
"""
cross entropy loss designed to work with one-hot labels and softmax predictions UNTESTED; UNUSED
"""
output = 0
for i in range(len(predictions)):
output -= labels[i] * np.log(predictions[i])
return output/i
def stochastic_gradient_descent(batch, parameters, parameter_derivatives, step_size):
"""
single stochastic gradient descent iteration for parameters performed over a batch
"""
pass | [
"[email protected]"
] | |
927b0fc5f3b203bab957dfcd205dc23da901a874 | eb54a6dfa7bf958fdc145c011062657b6bd2b24c | /163music_spider.py | 39ab1e330da8571661b7b309f7c19ea5c75b4c39 | [] | no_license | youmulove/163musicpinlun_spider | e8b7e23afc76ffdccb94c56aa558e095598fb547 | c62dd0b647bc4302f69c87f75d7eddebd49107f7 | refs/heads/master | 2020-03-22T06:42:23.431354 | 2018-07-04T01:11:38 | 2018-07-04T01:11:38 | 139,652,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,428 | py | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
'''
@Description:
网易云音乐评论爬虫,可以完整爬取整个评论
部分参考了@平胸小仙女的文章(地址:https://www.zhihu.com/question/36081767)
post加密部分也给出了,可以参考原帖:
作者:平胸小仙女
链接:https://www.zhihu.com/question/36081767/answer/140287795
来源:知乎
'''
from Crypto.Cipher import AES
import base64
import requests
import json
import codecs
import time
# 头部信息
headers = {
'Host':"music.163.com",
'Accept-Language':"zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
'Accept-Encoding':"gzip, deflate",
'Content-Type':"application/x-www-form-urlencoded",
'Cookie':"_ntes_nnid=754361b04b121e078dee797cdb30e0fd,1486026808627; _ntes_nuid=754361b04b121e078dee797cdb30e0fd; JSESSIONID-WYYY=yfqt9ofhY%5CIYNkXW71TqY5OtSZyjE%2FoswGgtl4dMv3Oa7%5CQ50T%2FVaee%2FMSsCifHE0TGtRMYhSPpr20i%5CRO%2BO%2B9pbbJnrUvGzkibhNqw3Tlgn%5Coil%2FrW7zFZZWSA3K9gD77MPSVH6fnv5hIT8ms70MNB3CxK5r3ecj3tFMlWFbFOZmGw%5C%3A1490677541180; _iuqxldmzr_=32; vjuids=c8ca7976.15a029d006a.0.51373751e63af8; vjlast=1486102528.1490172479.21; __gads=ID=a9eed5e3cae4d252:T=1486102537:S=ALNI_Mb5XX2vlkjsiU5cIy91-ToUDoFxIw; vinfo_n_f_l_n3=411a2def7f75a62e.1.1.1486349441669.1486349607905.1490173828142; [email protected]|1489375076|1|study|00&99|null&null&null#hub&420100#10#0#0|155439&1|study_client|[email protected]; NTES_CMT_USER_INFO=84794134%7Cm155****4439%7Chttps%3A%2F%2Fsimg.ws.126.net%2Fe%2Fimg5.cache.netease.com%2Ftie%2Fimages%2Fyun%2Fphoto_default_62.png.39x39.100.jpg%7Cfalse%7CbTE1NTI3NTk0NDM5QDE2My5jb20%3D; usertrack=c+5+hljHgU0T1FDmA66MAg==; Province=027; City=027; _ga=GA1.2.1549851014.1489469781; __utma=94650624.1549851014.1489469781.1490664577.1490672820.8; __utmc=94650624; __utmz=94650624.1490661822.6.2.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; playerid=81568911; __utmb=94650624.23.10.1490672820",
'Connection':"keep-alive",
'Referer':'http://music.163.com/'
}
# 设置代理服务器
proxies= {
'http:':'http://121.232.146.184',
'https:':'https://144.255.48.197'
}
# offset的取值为:(评论页数-1)*20,total第一页为true,其余页为false
# first_param = '{rid:"", offset:"0", total:"true", limit:"20", csrf_token:""}' # 第一个参数
second_param = "010001" # 第二个参数
# 第三个参数
third_param = "00e0b509f6259df8642dbc35662901477df22677ec152b5ff68ace615bb7b725152b3ab17a876aea8a5aa76d2e417629ec4ee341f56135fccf695280104e0312ecbda92557c93870114af6c9d05c4f7f0c3685b7a46bee255932575cce10b424d813cfe4875d3e82047b97ddef52741d546b8e289dc6935b3ece0462db0a22b8e7"
# 第四个参数
forth_param = "0CoJUm6Qyw8W8jud"
# 获取参数
def get_params(page): # page为传入页数
iv = "0102030405060708"
first_key = forth_param
second_key = 16 * 'F'
if(page == 1): # 如果为第一页
first_param = '{rid:"", offset:"0", total:"true", limit:"20", csrf_token:""}'
h_encText = AES_encrypt(first_param, first_key, iv)
else:
offset = str((page-1)*20)
first_param = '{rid:"", offset:"%s", total:"%s", limit:"20", csrf_token:""}' %(offset,'false')
h_encText = AES_encrypt(first_param, first_key, iv)
h_encText = AES_encrypt(h_encText, second_key, iv)
return h_encText
# 获取 encSecKey
def get_encSecKey():
encSecKey = "257348aecb5e556c066de214e531faadd1c55d814f9be95fd06d6bff9f4c7a41f831f6394d5a3fd2e3881736d94a02ca919d952872e7d0a50ebfa1769a7a62d512f5f1ca21aec60bc3819a9c3ffca5eca9a0dba6d6f7249b06f5965ecfff3695b54e1c28f3f624750ed39e7de08fc8493242e26dbc4484a01c76f739e135637c"
return encSecKey
# 解密过程
def AES_encrypt(text, key, iv):
pad = 16 - len(text) % 16
text = text + pad * chr(pad)
encryptor = AES.new(key, AES.MODE_CBC, iv)
encrypt_text = encryptor.encrypt(text)
encrypt_text = base64.b64encode(encrypt_text)
return encrypt_text
# 获得评论json数据
def get_json(url, params, encSecKey):
data = {
"params": params,
"encSecKey": encSecKey
}
response = requests.post(url, headers=headers, data=data,proxies = proxies)
return response.content
# 抓取热门评论,返回热评列表
def get_hot_comments(url):
hot_comments_list = []
hot_comments_list.append(u"用户ID 用户昵称 用户头像地址 评论时间 点赞总数 评论内容\n")
params = get_params(1) # 第一页
encSecKey = get_encSecKey()
json_text = get_json(url,params,encSecKey)
json_dict = json.loads(json_text)
hot_comments = json_dict['hotComments'] # 热门评论
print("共有%d条热门评论!" % len(hot_comments))
for item in hot_comments:
comment = item['content'] # 评论内容
likedCount = item['likedCount'] # 点赞总数
comment_time = item['time'] # 评论时间(时间戳)
userID = item['user']['userID'] # 评论者id
nickname = item['user']['nickname'] # 昵称
avatarUrl = item['user']['avatarUrl'] # 头像地址
comment_info = userID + " " + nickname + " " + avatarUrl + " " + comment_time + " " + likedCount + " " + comment + u"\n"
hot_comments_list.append(comment_info)
return hot_comments_list
# 抓取某一首歌的全部评论
def get_all_comments(url):
all_comments_list = [] # 存放所有评论
all_comments_list.append(u"用户ID 用户昵称 用户头像地址 评论时间 点赞总数 评论内容\n") # 头部信息
params = get_params(1)
encSecKey = get_encSecKey()
json_text = get_json(url,params,encSecKey)
json_dict = json.loads(json_text)
comments_num = int(json_dict['total'])
if(comments_num % 20 == 0):
page = comments_num / 20
else:
page = int(comments_num / 20) + 1
print("共有%d页评论!" % page)
for i in range(page): # 逐页抓取
params = get_params(i+1)
encSecKey = get_encSecKey()
json_text = get_json(url,params,encSecKey)
json_dict = json.loads(json_text)
if i == 0:
print("共有%d条评论!" % comments_num) # 全部评论总数
for item in json_dict['comments']:
comment = item['content'] # 评论内容
likedCount = item['likedCount'] # 点赞总数
comment_time = item['time'] # 评论时间(时间戳)
userID = item['user']['userId'] # 评论者id
nickname = item['user']['nickname'] # 昵称
avatarUrl = item['user']['avatarUrl'] # 头像地址
comment_info = unicode(userID) + u" " + nickname + u" " + avatarUrl + u" " + unicode(comment_time) + u" " + unicode(likedCount) + u" " + comment + u"\n"
all_comments_list.append(comment_info)
print("第%d页抓取完毕!" % (i+1))
return all_comments_list
# 将评论写入文本文件
def save_to_file(list,filename):
with codecs.open(filename,'a',encoding='utf-8') as f:
f.writelines(list)
print("写入文件成功!")
if __name__ == "__main__":
start_time = time.time() # 开始时间
url = "http://music.163.com/weapi/v1/resource/comments/R_SO_4_186016/?csrf_token="
filename = u"晴天.txt"
all_comments_list = get_all_comments(url)
save_to_file(all_comments_list,filename)
end_time = time.time() #结束时间
print("程序耗时%f秒." % (end_time - start_time)) | [
"[email protected]"
] | |
60b0ba41b564ea65de3d21e92281b00dbe37aebc | d27091244f97969d5084c568f0e2c4040f18baa6 | /{{cookiecutter.project_slug}}/tasks.py | 0d881d7e4c671cbea56d5aa2b2d26b144b44a0a4 | [
"MIT"
] | permissive | i2biz/cookiecutter-python-pylint | 26d997d6f1928045e8568f9e722b0dffe1d4bb11 | ba07cd620d982cb11161fee808025257176c62c3 | refs/heads/master | 2021-07-07T13:00:30.758374 | 2019-06-17T14:13:42 | 2019-06-17T14:13:42 | 98,869,317 | 0 | 0 | MIT | 2021-04-20T18:14:25 | 2017-07-31T08:59:58 | Python | UTF-8 | Python | false | false | 537 | py | # coding=utf-8
from invoke import task
@task
def style(ctx):
ctx.run(
"black --check {{cookiecutter.project_slug}} {{cookiecutter.project_slug}}_test"
)
@task
def lint(ctx):
ctx.run(
"pylint {{cookiecutter.project_slug}} {{cookiecutter.project_slug}}_test -r n"
)
@task
def test(ctx):
ctx.run(
"py.test -v --cov {{cookiecutter.project_slug}} --cov-report=html --cov-report=term-missing {{cookiecutter.project_slug}}_test"
)
@task(pre=[test, style, lint])
def check(ctx):
pass
| [
"[email protected]"
] | |
c77f81ced232aaf35321ab772ccc4cc15336acb6 | 71bfc12150b66a3dea788368176eea11179bff0c | /main.py | 46e2788fa7abf2daa476d7dee4d4bcc61e6a243f | [] | no_license | qc5111/RA | d5526c5d07bb9c90c5ae0ece0e6c1b3ff1bb1979 | c5d95a1eb04218b6eb488730685f0cdcb9134577 | refs/heads/master | 2020-03-22T18:28:08.492812 | 2018-07-11T07:59:31 | 2018-07-11T07:59:31 | 140,462,261 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,374 | py | import os,aes256,rsa_lib,rsa,hashlib
def ra_encrypt(r_file_name,block_size,pub_key):
(path,filename) = os.path.split(r_file_name)
if path:
path=path+"\\"
md5=hashlib.md5()
w_file_name=path+str(int(time.time()*1000))+".ra.tmp"
w=open(w_file_name, 'wb')
r=open(r_file_name, 'rb')
#开始处理头
pwd1=aes256.get_random_key()
pwd2=aes256.get_random_key()
rsa_crypted=rsa.encrypt(pwd1+pwd2,pub_key)
rsa_size=len(rsa_crypted)
file_name_size=len(filename)
head=bytes("RA",encoding='utf-8')+block_size.to_bytes(4, byteorder='big')+rsa_size.to_bytes(2, byteorder='big')+rsa_crypted
md5.update(head)
w.write(head)
#头写入完毕
#开始处理第一部分加密共512个字节
data=r.read(511-file_name_size)
first_block=file_name_size.to_bytes(1, byteorder='big')+bytes(filename,encoding='utf-8')+data
first_block=aes256.aes_cbc_encrypt(first_block,pwd1)
md5.update(first_block)
w.write(first_block)
#第一部分加密处理&写入完毕
#第二部分(主部分)加密&处理写入开始
data=r.read(block_size)
while data:
data=aes256.aes_cbc_encrypt(data,pwd2)
md5.update(data)
w.write(data)
data=r.read(block_size)
r.close()
w.close()
md5_value=md5.hexdigest()
os.rename(w_file_name,path+md5_value)
return md5_value
def ra_decrypt(r_file_name,prv_key):
(path,filename) = os.path.split(r_file_name)
if path:
path=path+"\\"
md5=hashlib.md5()
r=open(r_file_name, 'rb')
#读入头
data=r.read(8)
if data[0:2].decode()!="RA":
return("该文件不是RSA-AES加密文件")
block_size=int.from_bytes(data[2:6],byteorder='big', signed=False)
rsa_size=int.from_bytes(data[6:8],byteorder='big', signed=False)
#读入加密的rsa数据
data=r.read(rsa_size)
passwd=rsa.decrypt(data,prv_key)
pwd1=passwd[0:32]
pwd2=passwd[32:64]
data=r.read(544)
data=aes256.aes_cbc_decrypt(data,pwd1)
file_name_size=int.from_bytes(data[0:1],byteorder='big', signed=False)
filename=data[1:1+file_name_size].decode("utf-8")
w_file_name=path+filename
if os.path.exists(w_file_name):
return("文件:"+w_file_name+"已存在,请删除或重命名原文件")
w=open(w_file_name, 'wb')
w.write(data[1+file_name_size:])
data=r.read(block_size+32)
while data:
data=aes256.aes_cbc_decrypt(data,pwd2)
w.write(data)
data=r.read(block_size+32)
r.close()
w.close()
| [
"[email protected]"
] | |
6dd2cfb8ffa27592008f6df6cc0c027517ba095b | 64b26ce32c9f1b3ce7fe5c57a45b06aca7b9c511 | /实验一/171860537_实验一/stackoverflow/doc2vec.py | bcdbd3b4f07b11a577d7a4d47dc48409bd514e15 | [] | no_license | wego1236/SREE | 584b155b523e2f93a01fa9eaaccadd12f7d074ce | edd1c4f6f357c81dd1aa4765ea509cc59701088b | refs/heads/main | 2023-03-21T18:21:37.249559 | 2021-03-17T14:45:22 | 2021-03-17T14:45:22 | 307,331,706 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,048 | py | import sys
import logging
import os
import gensim
# 引入doc2vec
from gensim.models import Doc2Vec
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
# 引入日志配置
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# 加载数据
documents = []
# 使用count当做每个句子的“标签”,标签和每个句子是一一对应的
count = 0
with open('title.txt', 'r', encoding='utf-8') as f:
for line in f:
title = line
# 切词,返回的结果是列表类型
words = title.split()
# 这里documents里的每个元素是二元组,具体可以查看函数文档
documents.append(gensim.models.doc2vec.TaggedDocument(words, [str(count)]))
count += 1
if count % 10000 == 0:
logging.info('{} has loaded...'.format(count))
# 模型训练
model = Doc2Vec(documents, dm=1, size=100, window=8, min_count=5, workers=4)
# 保存模型
model.save('title.model') | [
"[email protected]"
] | |
35af1d18981e1a0ce05538dca938add529273b43 | 3ce1ebbbf3e69e7c3bafd2cd359c63e85c960e5b | /src/models/RCNN_elmo.py | 5a2a87876dc8f6b03d0a3641fe671196e6965907 | [] | no_license | IreneZihuiLi/TopicAttentionMedicalAD | 2afb75b2be0965d9601c5d459224eb5fc2535a44 | 8d5ccdabbae6ad0c7abef9765ae3441b565615da | refs/heads/master | 2020-08-20T18:35:13.410327 | 2019-12-03T17:23:58 | 2019-12-03T17:23:58 | 216,054,583 | 14 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,982 | py | # _*_ coding: utf-8 _*_
'''
This is the one using ELMO!
'''
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
from allennlp.modules.elmo import Elmo, batch_to_ids
options_file = "/home/lily/zl379/Playing/bilm-tf/mmc_new/options.json"
weight_file = "/home/lily/zl379/Playing/bilm-tf/dump/weights.hdf5"
class RCNN_elmo(nn.Module):
def __init__(self, config):
super(RCNN_elmo, self).__init__()
"""
Arguments
---------
batch_size : Size of the batch which is same as the batch_size of the data returned by the TorchText BucketIterator
output_size : 2 = (pos, neg)
hidden_sie : Size of the hidden_state of the LSTM
vocab_size : Size of the vocabulary containing unique words
embedding_length : Embedding dimension of GloVe word embeddings
weights : Pre-trained GloVe word_embeddings which we will use to create our word_embedding look-up table
"""
self.batch_size = config.batch_size
self.output_size = config.num_classes
self.hidden_size = config.embedding_dim
self.vocab_size = config.vocab_size
self.embedding_length = config.embedding_dim
self.elmo = config.elmo
if self.elmo is True:
# elmo
print(self.elmo,'++++' * 30)
self.elmo = Elmo(options_file, weight_file, config.elmo_level, dropout=0, requires_grad=False) # default is False
else:
print (self.elmo,'---'* 30)
self.embedding = nn.Embedding(self.vocab_size, self.embedding_length) # Initializing the look-up table.
# self.word_embeddings.weight = nn.Parameter(self.weights, requires_grad=True) # Assigning the look-up table to the pre-trained GloVe word embedding.
self.dropout = 0.8
self.lstm = nn.LSTM(self.embedding_length, self.hidden_size, dropout=self.dropout, bidirectional=True)
self.W2 = nn.Linear(2 * self.hidden_size + self.embedding_length, self.hidden_size)
self.label = nn.Linear(self.hidden_size, self.output_size)
def forward(self, input_sentences, batch_size=None):
"""
Parameters
----------
input_sentence: input_sentence of shape = (batch_size, num_sequences)
batch_size : default = None. Used only for prediction on a single sentence after training (batch_size = 1)
Returns
-------
Output of the linear layer containing logits for positive & negative class which receives its input as the final_hidden_state of the LSTM
final_output.shape = (batch_size, output_size)
"""
"""
The idea of the paper "Recurrent Convolutional Neural Networks for Text Classification" is that we pass the embedding vector
of the text sequences through a bidirectional LSTM and then for each sequence, our final embedding vector is the concatenation of
its own GloVe embedding and the left and right contextual embedding which in bidirectional LSTM is same as the corresponding hidden
state. This final embedding is passed through a linear layer which maps this long concatenated encoding vector back to the hidden_size
vector. After this step, we use a max pooling layer across all sequences of texts. This converts any varying length text into a fixed
dimension tensor of size (batch_size, hidden_size) and finally we map this to the output layer.
"""
if self.elmo is True:
elmo_embedding = self.elmo(input_sentences)
sents = elmo_embedding['elmo_representations'][-1]
input = sents.permute(1, 0, 2)
else:
input = self.embedding(input_sentences).permute(1, 0, 2)
# input = self.word_embeddings(
# input_sentence) # embedded input of shape = (batch_size, num_sequences, embedding_length)
# input = input.permute(1, 0, 2) # input.size() = (num_sequences, batch_size, embedding_length)
if batch_size is None:
h_0 = Variable(torch.zeros(2, self.batch_size, self.hidden_size).cuda()) # Initial hidden state of the LSTM
c_0 = Variable(torch.zeros(2, self.batch_size, self.hidden_size).cuda()) # Initial cell state of the LSTM
else:
h_0 = Variable(torch.zeros(2, batch_size, self.hidden_size).cuda())
c_0 = Variable(torch.zeros(2, batch_size, self.hidden_size).cuda())
output, (final_hidden_state, final_cell_state) = self.lstm(input, (h_0, c_0))
final_encoding = torch.cat((output, input), 2).permute(1, 0, 2)
y = self.W2(final_encoding) # y.size() = (batch_size, num_sequences, hidden_size)
y = y.permute(0, 2, 1) # y.size() = (batch_size, hidden_size, num_sequences)
y = F.max_pool1d(y, y.size()[2]) # y.size() = (batch_size, hidden_size, 1)
y = y.squeeze(2)
logits = self.label(y)
return logits
| [
"[email protected]"
] | |
336b50fad3e7231e3b5b30f884847f23185931fe | 8557f0621d75848afddd65b1a5cce12449772e0a | /app/routes.py | b7cba988184e473476a98744d335ccd7a30f5ed9 | [] | no_license | halversondm/microblog | 5fd2f835db83d90b46bc136c5815a94e7e38cd74 | c621844acef4549dca547b81c6f7ca5570020751 | refs/heads/master | 2023-08-08T06:13:55.578623 | 2019-11-02T14:12:25 | 2019-11-02T14:12:25 | 219,163,332 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,256 | py | from datetime import datetime
from flask import render_template, flash, redirect, url_for, request
from flask_login import current_user, login_user, logout_user, login_required
from werkzeug.urls import url_parse
from app import app, db
from app.forms import LoginForm, RegistrationForm, EditProfileForm
from app.models import User
@app.route('/')
@app.route('/index')
@login_required
def index():
posts = [
{
'author': {'username': 'John'},
'body': 'Beautiful day in Portland!'
},
{
'author': {'username': 'Susan'},
'body': 'The Avengers movie was so cool!'
}
]
return render_template('index.html', title='Home', posts=posts)
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid username or password')
return redirect(url_for('login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('index')
return redirect(next_page)
return render_template('login.html', title='Sign In', form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash('Congratulations, you are now a registered user!')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@app.route('/user/<username>')
@login_required
def user(username):
user = User.query.filter_by(username=username).first_or_404()
posts = [
{'author': user, 'body': 'Test post #1'},
{'author': user, 'body': 'Test post #2'}
]
return render_template('user.html', user=user, posts=posts)
@app.route('/edit_profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm(current_user.username)
if form.validate_on_submit():
current_user.username = form.username.data
current_user.about_me = form.about_me.data
db.session.commit()
flash('Your changes have been saved.')
return redirect(url_for('edit_profile'))
elif request.method == 'GET':
form.username.data = current_user.username
form.about_me.data = current_user.about_me
return render_template('edit_profile.html', title='Edit Profile',
form=form)
@app.before_request
def before_request():
if current_user.is_authenticated:
current_user.last_seen = datetime.utcnow()
db.session.commit()
| [
"[email protected]"
] | |
e61797bb2a81da08a2b58f8a43a84c3f54913716 | 0556889bc09ccebacb6091a0e6ef7dcc07598ad7 | /python/step5_tco.py | cbb92c975d71c2fb4c7a71ffb22419a372b17de2 | [] | no_license | mkhoeini/mal | 0b475dc45f9ad256cca1b8be5a0049ece81c1a13 | 0de08030ebf15d58f626d822692159f2ef1a0649 | refs/heads/master | 2020-12-11T01:46:38.956456 | 2015-01-08T17:53:00 | 2015-01-08T17:53:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,775 | py | import sys, traceback
import mal_readline
import mal_types as types
import reader, printer
from env import Env
import core
# read
def READ(str):
return reader.read_str(str)
# eval
def eval_ast(ast, env):
if types._symbol_Q(ast):
return env.get(ast)
elif types._list_Q(ast):
return types._list(*map(lambda a: EVAL(a, env), ast))
elif types._vector_Q(ast):
return types._vector(*map(lambda a: EVAL(a, env), ast))
elif types._hash_map_Q(ast):
keyvals = []
for k in ast.keys():
keyvals.append(EVAL(k, env))
keyvals.append(EVAL(ast[k], env))
return types._hash_map(*keyvals)
else:
return ast # primitive value, return unchanged
def EVAL(ast, env):
while True:
#print("EVAL %s" % printer._pr_str(ast))
if not types._list_Q(ast):
return eval_ast(ast, env)
# apply list
if len(ast) == 0: return ast
a0 = ast[0]
if "def!" == a0:
a1, a2 = ast[1], ast[2]
res = EVAL(a2, env)
return env.set(a1, res)
elif "let*" == a0:
a1, a2 = ast[1], ast[2]
let_env = Env(env)
for i in range(0, len(a1), 2):
let_env.set(a1[i], EVAL(a1[i+1], let_env))
ast = a2
env = let_env
# Continue loop (TCO)
elif "do" == a0:
eval_ast(ast[1:-1], env)
ast = ast[-1]
# Continue loop (TCO)
elif "if" == a0:
a1, a2 = ast[1], ast[2]
cond = EVAL(a1, env)
if cond is None or cond is False:
if len(ast) > 3: ast = ast[3]
else: ast = None
else:
ast = a2
# Continue loop (TCO)
elif "fn*" == a0:
a1, a2 = ast[1], ast[2]
return types._function(EVAL, Env, a2, env, a1)
else:
el = eval_ast(ast, env)
f = el[0]
if hasattr(f, '__ast__'):
ast = f.__ast__
env = f.__gen_env__(el[1:])
else:
return f(*el[1:])
# print
def PRINT(exp):
return printer._pr_str(exp)
# repl
repl_env = Env()
def REP(str):
return PRINT(EVAL(READ(str), repl_env))
# core.py: defined using python
for k, v in core.ns.items(): repl_env.set(k, v)
# core.mal: defined using the language itself
REP("(def! not (fn* (a) (if a false true)))")
# repl loop
while True:
try:
line = mal_readline.readline("user> ")
if line == None: break
if line == "": continue
print(REP(line))
except reader.Blank: continue
except Exception as e:
print("".join(traceback.format_exception(*sys.exc_info())))
| [
"[email protected]"
] | |
af8dafba7bb90bef6fb05a2448c570252ae0ea10 | c7a4193e2f2835307a288dff24305d0702af6873 | /misc/comprehension.py | 1a188ef9e3456510ca886e50443282b2a5a2107b | [] | no_license | n3k0fi5t/pythonLearn | 15ffa6269d417c500b5b01806537832e45972916 | 5b6807c8fb4e353a3722df18852ac44625298056 | refs/heads/master | 2020-05-25T02:35:26.196590 | 2019-01-07T06:49:01 | 2019-01-07T06:49:01 | 84,902,079 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#list comprehension
a = [i for i in xrange(10)]
print a
#dic comprehension
b = {key:val for key,val in [(i,i**2) for i in xrange(10)]}
print b
| [
"[email protected]"
] | |
a6dbe76b742a764e70d0d2ab068d6cbd11481fb5 | a8a97b1a9f4b136c39522e1a5e2ef6fbe3d33f9f | /analysis/flux_selection/beta.py | 588a63df2b5778c352ba74cf3c5ba66ca24d155b | [] | no_license | stephenmwilkins/optimum-selection | 99900aa721215ad068c13ffe227e0c37b77bdc95 | 2c07421eec42d36f9f9ec5c5e0118773191d1ce5 | refs/heads/master | 2022-11-05T00:55:38.228129 | 2020-06-25T15:26:28 | 2020-06-25T15:26:28 | 272,703,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,407 | py |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import FLARE
cosmo = FLARE.default_cosmo()
import FLARE.SED.models as models
import FLARE.plt as fplt
import FLARE.photom as photo
import FLARE.surveys
import FLARE.filters
import FLARE.plt as fplt
import h5py
cmap = mpl.cm.magma
# norm = mpl.colors.Normalize(vmin=0, vmax=10)
#
# --- define filters for colour-colour diagram
selection_filters = [['Webb.NIRCam.F090W', 'Webb.NIRCam.F115W','Webb.NIRCam.F150W'], ['Webb.NIRCam.F115W','Webb.NIRCam.F150W','Webb.NIRCam.F200W']]
# selection_filters = [['Webb.NIRCam.F090W', 'Webb.NIRCam.F115W','Webb.NIRCam.F150W']]
all_filters = []
for s in selection_filters: all_filters += s
all_filters = list(set(all_filters))
# --- define selection regions
selections = [(0.7, 0.0, 0.8, 1.2),(0.7, 0.0, 0.8, 1.2)]
# flat f_nu noise
noise = {f: 1. for f in all_filters} # nJy
noise = FLARE.surveys.surveys['Webb10k'].fields['base'].depths # nJy
print(noise)
noise = {key:value for key,value in noise.items()}
SNR_detection = 100.
cat = h5py.File('../../data/beta_lum.hdf5', 'r') # contains only \beta, z, and fluxes
# fig = plt.figure()
# left = 0.15
# bottom = 0.15
# width = 0.8
# height = 0.8
#
# ax = fig.add_axes((left, bottom, width, height))
# ax.set_facecolor('0.0')
fig, axes = plt.subplots(1, 2, sharey=True, figsize = (4, 2))
fig.subplots_adjust(left=0.1, bottom=0.2, right=0.95, top=0.95, wspace=0, hspace=0)
for i, ax in zip(range(len(selection_filters)), axes):
filters = selection_filters[i]
flux_limit = SNR_detection * noise[filters[-1]] # nJy in final band
print(f'magnitude limit: {photo.flux_to_m(flux_limit)}')
sel = selections[i]
N = len(cat['z'][:])
fluxes = {f: cat[f][:] for f in filters} # f_nu/nJy
for f in filters:
fluxes[f] += noise[f]*np.random.randn(N)
c1 = fluxes[filters[0]]/fluxes[filters[1]] # break colour (mag) - usually on y-axis
c2 = fluxes[filters[-2]]/fluxes[filters[-1]] # slope colour (mag) - usually on x-axis
s_all = (fluxes[filters[-1]]>flux_limit)
s = (c2>sel[0])&(c1<sel[2]*(c2-sel[0])+sel[1])&(c1<sel[2]*(sel[3]-sel[0])+sel[1])&s_all
ranges = [[6, 11.99], [-3, 1]]
bins = [20,20]
H_detected, xedges, yedges = np.histogram2d(cat['z'][s_all], cat['beta'][s_all], bins=bins, range=ranges)
H_selected, xedges, yedges = np.histogram2d(cat['z'][s], cat['beta'][s], bins=bins, range=ranges)
H_all, xedges, yedges = np.histogram2d(cat['z'][:], cat['beta'][:], bins=bins, range=ranges)
R = H_selected.T/H_all.T
ax.imshow(R, extent = [*ranges[0], *ranges[1]], origin='lower', cmap = cmap, aspect = 'auto', alpha = 1.0)
ax.set_xlabel(r'$\rm z$')
axes[0].set_ylabel(r'$\rm \beta$')
fig.savefig(f'figures/beta.pdf')
fig.clf()
fig = plt.figure(figsize = (3, 3))
left = 0.2
bottom = 0.2
width = 0.75
height = 0.75
ax = fig.add_axes((left, bottom, width, height))
H_detected = {}
H_selected = {}
for i in range(len(selection_filters)):
filters = selection_filters[i]
flux_limit = SNR_detection * noise[filters[-1]] # nJy in final band
print(f'magnitude limit: {photo.flux_to_m(flux_limit)}')
sel = selections[i]
N = len(cat['z'][:])
fluxes = {f: cat[f][:] for f in filters} # f_nu/nJy
for f in filters:
fluxes[f] += noise[f]*np.random.randn(N)
c1 = fluxes[filters[0]]/fluxes[filters[1]] # break colour (mag) - usually on y-axis
c2 = fluxes[filters[-2]]/fluxes[filters[-1]] # slope colour (mag) - usually on x-axis
s_all = (fluxes[filters[-1]]>flux_limit)
s = (c2>sel[0])&(c1<sel[2]*(c2-sel[0])+sel[1])&(c1<sel[2]*(sel[3]-sel[0])+sel[1])&s_all
ranges = [[6, 11.99], [-3, 1]]
bins = [20,20]
H_detected[i], xedges, yedges = np.histogram2d(cat['z'][s_all], cat['beta'][s_all], bins=bins, range=ranges)
H_selected[i], xedges, yedges = np.histogram2d(cat['z'][s], cat['beta'][s], bins=bins, range=ranges)
H_all, xedges, yedges = np.histogram2d(cat['z'][:], cat['beta'][:], bins=bins, range=ranges)
R = (H_selected[0]+H_selected[1])/H_all
# R = (H_detected[0]+H_detected[1])/H_all
ax.imshow(R.T, extent = [*ranges[0], *ranges[1]], origin='lower', cmap = cmap, aspect = 'auto', alpha = 1.0)
ax.set_xlabel(r'$\rm z$')
ax.set_ylabel(r'$\rm \beta$')
fig.savefig(f'figures/beta_joint.pdf')
fig.clf()
| [
"[email protected]"
] | |
5e3658bca27393e2631a0f8f2f2c6ee58daf9337 | 407cb37f5c826840263a9755589c23a40b07ebed | /phones/migrations/0001_initial.py | 9858f175087205af37baf43159347430e8fcdc68 | [] | no_license | r1zon/dj_dip | b53b9f4b2c59aa3e952b12106ab22421c89aec17 | 60a8caa238c4aaf5badb8376e1510473c1d4961d | refs/heads/master | 2022-12-10T22:35:30.883903 | 2020-09-14T14:21:01 | 2020-09-14T14:21:01 | 293,579,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,829 | py | # Generated by Django 2.2.10 on 2020-09-11 13:57
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_ordered', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=50, null=True)),
('product_type', models.CharField(blank=True, max_length=50, null=True)),
('img', models.ImageField(upload_to='products/')),
('slug', models.SlugField(blank=True, null=True, unique=True, verbose_name='slug')),
],
options={
'ordering': ['id'],
},
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=50, null=True)),
('text', models.TextField()),
('rate', models.IntegerField(blank=True, default=0, null=True)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='phones.Product')),
],
),
migrations.CreateModel(
name='ProductCounts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.IntegerField(default=1)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='product_counts', to='phones.Order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='product_counts', to='phones.Product')),
],
),
migrations.AddField(
model_name='order',
name='products',
field=models.ManyToManyField(related_name='orders', through='phones.ProductCounts', to='phones.Product'),
),
migrations.AddField(
model_name='order',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
] | |
18070692b9d88bc2d548cfc41d82881611e741e0 | 6afc1214961b6461ed9f73111f6aa43bf9142339 | /myhod_wrapper/code/funcdef.py | 0bc800cfac0cd48c6c4106da8396d908744d5428 | [] | no_license | sp-shah/H0_pipeline | bb60c6c01323d75fb2abd98badcf39840f9fc920 | 99c6f26b5a708b756b503e23003f207230dfe46e | refs/heads/main | 2023-07-18T12:51:53.470467 | 2021-09-02T03:29:50 | 2021-09-02T03:29:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,506 | py | import numpy as np
import h5py
import matplotlib.pyplot as plt
import hod_wrapper as hodWrap
import sys
from matplotlib import rc
rc('text', usetex=True)
rc("text.latex", unicode = True)
rc("font", size =16., family = 'serif')
def read_cat(path):
f = h5py.File(path, "r")
s = f["sim1"]
pos = s["pos"][:]
vel = s["vel"][:]
abs_mag = s["abs_mag"][:]
halo_ind = s["halo_ind"][:]
is_cen = s["is_cen"][:]
halo_mass = s["halo_mass"][:]
remove_gal = s["remove_gal"][:]
remove_subhalo = s["remove_subhalo"][:]
gal_ind_change = s["gal_ind_change"][:]
sub_ind_change = s["sub_ind_change"][:]
hod_dict = dict(pos = pos, vel = vel, abs_mag=abs_mag,
halo_ind = halo_ind, is_cen = is_cen,
halo_mass = halo_mass)
tag = dict(remove_gal = remove_gal, remove_subhalo = remove_subhalo,
gal_ind_change = gal_ind_change,
sub_ind_change = sub_ind_change)
return hod_dict, tag
def read_sim(path_to_sim):
sim_cat = hodWrap.halo_cat(path_to_sim)
plt.figure()
plt.hist(sim_cat.SubhaloMass, log = True, bins = 50)
plt.ylabel(r"Number of Subhalos")
plt.xlabel(r"Mass")
plt.savefig("../plots/massTestSubhalos.png", bbox_inches = "tight")
plt.show()
centralInd = sim_cat.GroupFirstSub[sim_cat.GroupNsubs > 0]
centralMass = sim_cat.SubhaloMass[centralInd]
centralHaloInd = sim_cat.SubhaloGrNr[centralInd]
sim_dict = hodWrap.remove_firstsub(sim_cat)
sim_dict["centralMass"] = centralMass
sim_dict["centralHaloInd"] = centralHaloInd
#-----------------------------------------
loneHaloInd = np.where(sim_cat.GroupNsubs == 0)[0]
print(sim_cat.GroupMass[loneHaloInd])
#--------------------------------------------
sim_dict["loneHaloInd"] = loneHaloInd
#-------------------------------------------
#creating a dictionary for the group that can be used maybe
#writing this dictionary into the mock_cat file
groupDict = dict(groupMass = sim_cat.GroupMass,
groupPos = sim_cat.GroupPos, groupVel = sim_cat.GroupVel)
return sim_dict, groupDict
def edit_cat(hod_dict, sim_dict, tag):
is_cen = np.copy(hod_dict["is_cen"])
isSat = np.where(is_cen == 0)[0]
isCen = np.where(is_cen == 1)[0]
#-------------------------------------------------------------
#assign the mass of central satellites to central galaxies
hod_dict["SubhaloMass"] = np.full(len(hod_dict["abs_mag"]), -1.)
hod_HaloInd_centrals = hod_dict["halo_ind"][isCen]
loneHaloInd = sim_dict["loneHaloInd"]
#--the arrays below serve as indices to match the masses of the central galaxies
hod_HaloInd_centrals = np.delete(hod_HaloInd_centrals, loneHaloInd)
sim_HaloInd_centrals = sim_dict["centralHaloInd"]
print(len(hod_HaloInd_centrals))
print(len(sim_HaloInd_centrals))
sys.exit()
hod_dict["SubhaloMass"][hod_HaloInd_centrals] = sim_dict["centralMass"]
print("Number of centrals in simulation with 0 mass")
print(len(np.where(sim_dict["centralMass"] == 0.)[0]))
hod_dict["SubhaloMass"][loneHaloInd] = hod_dict["halo_mass"][loneHaloInd]
print("galaxies in hod with halo mass and equal to 0")
print(len(np.where(hod_dict["halo_mass"][loneHaloInd] == 0.)[0]))
#-------------------------------------------------------------------
gal_ind_change = tag["gal_ind_change"].astype("int")
sub_ind_change = tag["sub_ind_change"].astype("int")
hod_dict["pos"][isSat][gal_ind_change] = sim_dict["pos"][sub_ind_change]
hod_dict["vel"][isSat][gal_ind_change] = sim_dict["vel"][sub_ind_change]
hod_dict["SubhaloMass"][isSat][gal_ind_change] = sim_dict["mass"][sub_ind_change]
print("Number of satellites in simulation with 0 mass")
print(len(np.where(sim_dict["mass"][sub_ind_change] == 0.)[0]))
print("Total number of galaxies in mock")
print(len(hod_dict["abs_mag"]))
print("Total number of galaxies that are getting prop changes")
print(len(gal_ind_change))
print("Total mass assignment")
a = len(sim_dict["mass"][sub_ind_change])
print("a")
print(a)
b = len(sim_dict["centralMass"])
print("b")
print(b)
print("Centrals in hOd + lone halos")
print(len(hod_dict["SubhaloMass"][hod_HaloInd_centrals]) + len(loneHaloInd))
c = len(hod_dict["halo_mass"][loneHaloInd])
print(a+b+c)
plt.figure()
plt.hist(hod_dict["SubhaloMass"], bins = 50, log = True)
plt.ylabel(r"Number of Galaxies")
plt.xlabel(r"Mass")
plt.savefig("../plots/massTestGalaxies_beforeRemoval.png", bbox_inches = "tight")
plt.show()
remove_gal = tag["remove_gal"].astype("int") + len(isCen) #collecting only sat gal
#print(len(hod_dict["abs_mag"]))
#print(len(remove_gal))
abs_mag = hod_dict["abs_mag"]
removed_abs_mag = abs_mag[remove_gal]
#print(removed_abs_mag)
'''
plt.figure()
plt.hist(removed_abs_mag, bins = 50)
plt.yscale("log")
plt.ylabel(r"Number\ of\ galaxies\ removed")
plt.xlabel(r"$\mathrm{M_r}$")
#plt.show()
plt.savefig("../plots/galRemoved_dist.png", bbox_inches = "tight")
'''
for key in hod_dict:
hod_dict[key] = np.delete(hod_dict[key], remove_gal, axis = 0)
print("Number of galaxies being removed")
print(len(remove_gal))
print("Total number of galaxies remaining")
print(len(hod_dict["abs_mag"]))
print("Total number of galaxies with 0 mass")
print(len(np.where(hod_dict["SubhaloMass"] == 0.)[0]))
print("Total number of halos with 0 mass")
print(len(np.where(hod_dict["halo_mass"] == 0.)[0]))
plt.figure()
plt.hist(hod_dict["SubhaloMass"], bins = 50, log = True)
plt.ylabel(r"Number of galaxies")
plt.xlabel(r"Mass")
plt.savefig("../plots/massTestGalaxies_afterRemoval.png", bbox_inches = "tight")
plt.show()
remove_subhalo = tag["remove_subhalo"].astype("int")
print(len(remove_subhalo))
print(len(sim_dict["mass"]))
for key in sim_dict:
sim_dict[key] = np.delete(sim_dict[key], remove_subhalo, axis = 0)
print("Total number of galaxies in sim dic")
print(len(sim_dict["mass"]))
sys.exit()
return hod_dict
def write_newCat(mockEdited, path_to_mockEdited):
f = h5py.File(path_to_mockEdited, "a")
s = f.create_group("sim1")
for key in mockEdited:
key = s.create_dataset(key, data = mockEdited[key])
| [
"[email protected]"
] | |
15ec9b50a88b09460a9b8d7b4366e8046bcc58af | 067bc0691f8db2754607cc6a2e3ba3d230151d90 | /run_s2s_image_digit.py | 4362a7ce0cd235551b7e28e4b457ff60b63b6cfe | [] | no_license | tristanguigue/vib_s2s | bf9579c5a57c731df54979390f9f654003de5a0d | 46149200a7571dee67954d58dfbb180dd5f14327 | refs/heads/master | 2021-06-27T01:54:31.895545 | 2017-09-08T10:08:38 | 2017-09-08T10:08:38 | 94,911,512 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,616 | py | """Apply the variational information bottleneck to predict the labels of a sequence of MNIST
images."""
from tensorflow.examples.tutorials.mnist import input_data
from networks import Seq2Labels
from learners import DiscreteLossLearner
from tools import Batcher
import argparse
import time
import os
import numpy as np
DATA_DIR = '/tmp/tensorflow/mnist/input_data'
CHECKPOINT_PATH = 'checkpoints/'
DIR = os.path.dirname(os.path.realpath(__file__)) + '/'
SAMPLE_EVERY = 100
NB_PRED_SAMPLES = 4
def main(beta, learning_rate, seq_length, layers, train_samples, test_samples,
epochs, hidden1_units, hidden2_units, bottleneck_size, label_selected, batch_size, test_batch,
save_checkpoints, nb_samples, update_marginal, dropout):
mnist = input_data.read_data_sets(DATA_DIR, one_hot=True)
run_name = 's2s_imdigit_' + str(int(time.time()))
input_size = mnist.train.images.shape[1]
output_size = mnist.train.labels.shape[1]
train_data = mnist.train.images
test_data = mnist.test.images
train_labels = mnist.train.labels
test_labels = mnist.test.labels
if not train_samples:
train_samples = int(train_data.shape[0] / seq_length)
if not test_samples:
test_samples = int(test_data.shape[0] / seq_length)
train_data = train_data[:train_samples * seq_length, :]
test_data = test_data[:test_samples * seq_length, :]
train_labels = train_labels[:train_samples * seq_length, :]
test_labels = test_labels[:test_samples * seq_length, :]
train_data = np.array(np.split(train_data, train_samples))
test_data = np.array(np.split(test_data, test_samples))
train_labels = np.array(np.split(train_labels, train_samples))
test_labels = np.array(np.split(test_labels, test_samples))
train_loader = Batcher(train_data, train_labels, batch_size)
test_loader = Batcher(test_data, test_labels, test_batch)
seq2seq = Seq2Labels(seq_length, hidden1_units, hidden2_units, bottleneck_size, input_size,
output_size, layers, nb_samples, update_marginal=update_marginal,
dropout=dropout)
learner = DiscreteLossLearner(seq2seq, beta, learning_rate, batch_size, run_name,
reduce_seq=True)
best_loss = None
best_accuracy = 0
for epoch in range(epochs):
print('\nEpoch:', epoch)
start = time.time()
train_loader.reset_batch_pointer()
total_loss = 0
for i in range(train_loader.num_batches):
batch_xs, batch_ys = train_loader.next_batch()
current_loss, loss_summary = learner.train_network(batch_xs, batch_ys, learning_rate)
total_loss += current_loss
learner.writer.add_summary(loss_summary, epoch * train_loader.num_batches + i)
learner.training_saver.save(learner.sess, DIR + CHECKPOINT_PATH + run_name)
learner.saver.restore(learner.test_sess, DIR + CHECKPOINT_PATH + run_name)
train_loss, train_accuracy = learner.test_network(train_loader, epoch=None)
test_loss, test_accuracy = learner.test_network(test_loader, epoch)
if SAMPLE_EVERY is not None and not epoch % SAMPLE_EVERY:
train_samples = learner.predict_sequence(
train_data[:NB_PRED_SAMPLES], train_labels[:NB_PRED_SAMPLES])
test_samples = learner.predict_sequence(
test_data[:NB_PRED_SAMPLES], test_labels[:NB_PRED_SAMPLES])
print(train_samples)
print(test_samples)
print('Time: ', time.time() - start)
print('Loss: ', total_loss / train_loader.num_batches)
print('Train accuracy: ', train_accuracy, ', test accuracy: ', test_accuracy)
print('Train loss: ', train_loss, ', test loss: ', test_loss)
if test_accuracy > best_accuracy:
best_accuracy = test_accuracy
print('-----')
print('### Best accuracy ###')
print('-----')
if best_loss is None or test_loss < best_loss:
if save_checkpoints:
learner.saver.save(learner.sess, DIR + CHECKPOINT_PATH + run_name)
best_loss = test_loss
print('-----')
print('### Best loss ###')
print('-----')
learner.sess.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--beta', type=float, default=0.001,
help='the value of beta, mutual information regulariser')
parser.add_argument('--rate', type=float, default=0.0001,
help='the learning rate for the Adam optimiser')
parser.add_argument('--length', type=int,
help='length of sequence')
parser.add_argument('--layers', type=int, default=1,
help='number of rnn layers')
parser.add_argument('--train', type=int,
help='train samples')
parser.add_argument('--test', type=int,
help='test samples')
parser.add_argument('--epochs', type=int, default=5000,
help='number of epochs to run')
parser.add_argument('--hidden1', type=int, default=128,
help='hidden units of encoder')
parser.add_argument('--hidden2', type=int, default=16,
help='hidden units of decoder')
parser.add_argument('--bottleneck', type=int, default=32,
help='bottleneck size')
parser.add_argument('--label', type=int,
help='label of images selected')
parser.add_argument('--batch', type=int, default=100,
help='batch size')
parser.add_argument('--test_batch', type=int, default=500,
help='batch size')
parser.add_argument('--checkpoint', type=int, default=0,
help='save checkpoints')
parser.add_argument('--samples', type=int, default=12,
help='number of samples to get posterior expectation')
parser.add_argument('--update_marginal', type=int, default=0,
help='marginal has learnable variable mean and variance')
parser.add_argument('--dropout', type=int, default=0,
help='dropout regulariser')
args = parser.parse_args()
main(args.beta, args.rate, args.length, args.layers, args.train, args.test, args.epochs,
args.hidden1, args.hidden2, args.bottleneck, args.label, args.batch, args.test_batch,
bool(args.checkpoint), args.samples, bool(args.update_marginal), bool(args.dropout))
| [
"[email protected]"
] | |
c38778bbf98df1850395ea59fc8b6ea5eb4ad7e1 | 310b4ea73685ae061d8531030e2bcc6e023019c1 | /app/migrations/0003_auto_20190910_2158.py | 67586ce38e6300642968ecbb33839eb2c268c8a5 | [] | no_license | Kirill882/news_blog | a8b6f0e284658f5ad404977d54ffc771103cb50a | 691ff92a5bc8c622d87f09949198331a3923159a | refs/heads/master | 2023-04-26T20:07:52.307715 | 2019-11-01T10:51:47 | 2019-11-01T10:51:47 | 218,967,092 | 0 | 0 | null | 2023-04-21T20:39:41 | 2019-11-01T10:47:00 | Python | UTF-8 | Python | false | false | 387 | py | # Generated by Django 2.2.5 on 2019-09-10 18:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0002_comment_approved_comment'),
]
operations = [
migrations.RenameField(
model_name='comment',
old_name='approved_comment',
new_name='approved_comments',
),
]
| [
"[email protected]"
] | |
0e195bf69220bc5cfcdf17a5a47617fb4f551f3b | fa19df33728779d9a662542a674be6554af1feba | /src/main/java/nl/Ipsen5Server/Service/kik-bot-api-unofficial/examples/kik_unofficial/protobuf/groups/v2/group_service_pb2.py | ba231b404a8416c8469d6f44b7819a72f55c37bd | [
"Apache-2.0",
"MIT"
] | permissive | anthonyscheeres/Ipen5BackendGroep11 | 3abfe0e07ec1d240a8e0b2db79a4f8d2f26bd59f | e2675c2ac6580f0a6f1d9e5f755f19405d17e514 | refs/heads/master | 2022-11-15T07:08:22.619268 | 2020-06-23T09:10:33 | 2020-06-23T09:10:33 | 263,292,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 51,098 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: groups/v2/group_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import kik_unofficial.protobuf.protobuf_validation_pb2 as protobuf__validation__pb2
from kik_unofficial.protobuf.common.v2 import model_pb2 as common_dot_v2_dot_model__pb2
from kik_unofficial.protobuf.messaging.v2 import model_pb2 as messaging_dot_v2_dot_model__pb2
from kik_unofficial.protobuf.groups.v2 import groups_common_pb2 as groups_dot_v2_dot_groups__common__pb2
from kik_unofficial.protobuf.chats.v2 import chat_info_pb2 as chats_dot_v2_dot_chat__info__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='groups/v2/group_service.proto',
package='mobile.groups.v2',
syntax='proto3',
serialized_pb=_b('\n\x1dgroups/v2/group_service.proto\x12\x10mobile.groups.v2\x1a\x19protobuf_validation.proto\x1a\x15\x63ommon/v2/model.proto\x1a\x18messaging/v2/model.proto\x1a\x1dgroups/v2/groups_common.proto\x1a\x18\x63hats/v2/chat_info.proto\"\x81\x01\n\x17GetGroupInfoFullRequest\x12*\n\x07\x63hat_id\x18\x01 \x01(\x0b\x32\x11.common.v2.ChatIdB\x06\xca\x9d%\x02\x08\x01\x12:\n\x14requestor_persona_id\x18\x02 \x01(\x0b\x32\x14.common.v2.PersonaIdB\x06\xca\x9d%\x02\x08\x01\"\xac\x01\n\x18GetGroupInfoFullResponse\x12\x41\n\x06result\x18\x01 \x01(\x0e\x32\x31.mobile.groups.v2.GetGroupInfoFullResponse.Result\x12,\n\x04info\x18\x02 \x01(\x0b\x32\x1e.common.chats.v2.GroupInfoFull\"\x1f\n\x06Result\x12\x06\n\x02OK\x10\x00\x12\r\n\tNOT_FOUND\x10\x01\"\x8c\x01\n\"GetBannedGroupMembersStreamRequest\x12*\n\x07\x63hat_id\x18\x01 \x01(\x0b\x32\x11.common.v2.ChatIdB\x06\xca\x9d%\x02\x08\x01\x12:\n\x14requestor_persona_id\x18\x02 \x01(\x0b\x32\x14.common.v2.PersonaIdB\x06\xca\x9d%\x02\x08\x01\"\xe1\x01\n#GetBannedGroupMembersStreamResponse\x12L\n\x06result\x18\x01 \x01(\x0e\x32<.mobile.groups.v2.GetBannedGroupMembersStreamResponse.Result\x12K\n\x14\x62\x61nned_group_members\x18\x02 \x03(\x0b\x32#.mobile.groups.v2.BannedGroupMemberB\x08\xca\x9d%\x04\x80\x01\x80\x08\"\x1f\n\x06Result\x12\x06\n\x02OK\x10\x00\x12\r\n\tNOT_FOUND\x10\x01\"E\n\x11\x42\x61nnedGroupMember\x12\x30\n\npersona_id\x18\x01 \x01(\x0b\x32\x14.common.v2.PersonaIdB\x06\xca\x9d%\x02\x08\x01\"\x9f\x02\n\x12\x43reateGroupRequest\x12\x30\n\ncreator_id\x18\x01 \x01(\x0b\x32\x14.common.v2.PersonaIdB\x06\xca\x9d%\x02\x08\x01\x12\x37\n\x0finitial_members\x18\x02 \x03(\x0b\x32\x14.common.v2.PersonaIdB\x08\xca\x9d%\x04\x80\x01\x80\x08\x12/\n\ngroup_name\x18\x03 \x01(\x0b\x32\x1b.mobile.groups.v2.GroupName\x12@\n\x13public_group_fields\x18\x04 \x01(\x0b\x32#.common.groups.v2.PublicGroupFields\x12+\n\nrequest_id\x18\x05 \x01(\x0b\x32\x0f.common.v2.UuidB\x06\xca\x9d%\x02\x08\x01\"\x82\x03\n\x13\x43reateGroupResponse\x12<\n\x06result\x18\x01 \x01(\x0e\x32,.mobile.groups.v2.CreateGroupResponse.Result\x12\'\n\x04\x63hat\x18\x02 \x01(\x0b\x32\x19.common.messaging.v2.Chat\x12P\n\x11restricted_fields\x18\x03 \x03(\x0e\x32\x35.mobile.groups.v2.CreateGroupResponse.RestrictedField\"k\n\x06Result\x12\x06\n\x02OK\x10\x00\x12\x16\n\x12\x43REATION_ID_EXISTS\x10\x01\x12\x16\n\x12POLICY_RESTRICTION\x10\x02\x12\x16\n\x12HASHTAG_NOT_UNIQUE\x10\x03\x12\x11\n\rNOT_PERMITTED\x10\x04\"E\n\x0fRestrictedField\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0e\n\nGROUP_NAME\x10\x01\x12\x11\n\rGROUP_HASHTAG\x10\x02\"*\n\tGroupName\x12\x1d\n\ngroup_name\x18\x01 \x01(\tB\t\xca\x9d%\x05\x08\x01 \xfa\x01\"p\n\x11LeaveGroupRequest\x12*\n\x07\x63hat_id\x18\x01 \x01(\x0b\x32\x11.common.v2.ChatIdB\x06\xca\x9d%\x02\x08\x01\x12/\n\tleaver_id\x18\x02 \x01(\x0b\x32\x14.common.v2.PersonaIdB\x06\xca\x9d%\x02\x08\x01\"c\n\x12LeaveGroupResponse\x12;\n\x06result\x18\x01 \x01(\x0e\x32+.mobile.groups.v2.LeaveGroupResponse.Result\"\x10\n\x06Result\x12\x06\n\x02OK\x10\x00\"\xaa\x01\n\x11\x41\x64\x64ToGroupRequest\x12*\n\x07\x63hat_id\x18\x01 \x01(\x0b\x32\x11.common.v2.ChatIdB\x06\xca\x9d%\x02\x08\x01\x12.\n\x08\x61\x64\x64\x65r_id\x18\x02 \x01(\x0b\x32\x14.common.v2.PersonaIdB\x06\xca\x9d%\x02\x08\x01\x12\x39\n\x0enew_member_ids\x18\x03 \x03(\x0b\x32\x14.common.v2.PersonaIdB\x0b\xca\x9d%\x07\x08\x01x\x01\x80\x01\x64\"\x93\x03\n\x12\x41\x64\x64ToGroupResponse\x12;\n\x06result\x18\x01 \x01(\x0e\x32+.mobile.groups.v2.AddToGroupResponse.Result\x12L\n\x0f\x66\x61ilure_details\x18\x02 \x03(\x0b\x32\x33.mobile.groups.v2.AddToGroupResponse.FailureDetails\x1a\xcc\x01\n\x0e\x46\x61ilureDetails\x12(\n\npersona_id\x18\x01 \x01(\x0b\x32\x14.common.v2.PersonaId\x12J\n\x06reason\x18\x02 \x01(\x0e\x32:.mobile.groups.v2.AddToGroupResponse.FailureDetails.Reason\"D\n\x06Reason\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06\x42\x41NNED\x10\x01\x12\x0e\n\nGROUP_FULL\x10\x02\x12\x11\n\rNOT_PERMITTED\x10\x03\"#\n\x06Result\x12\x06\n\x02OK\x10\x00\x12\x11\n\rNOT_PERMITTED\x10\x01\"\xae\x01\n\x16RemoveFromGroupRequest\x12*\n\x07\x63hat_id\x18\x01 \x01(\x0b\x32\x11.common.v2.ChatIdB\x06\xca\x9d%\x02\x08\x01\x12\x30\n\nremover_id\x18\x02 \x01(\x0b\x32\x14.common.v2.PersonaIdB\x06\xca\x9d%\x02\x08\x01\x12\x36\n\x0bremovee_ids\x18\x03 \x03(\x0b\x32\x14.common.v2.PersonaIdB\x0b\xca\x9d%\x07\x08\x01x\x01\x80\x01\x64\"\x8b\x03\n\x17RemoveFromGroupResponse\x12@\n\x06result\x18\x01 \x01(\x0e\x32\x30.mobile.groups.v2.RemoveFromGroupResponse.Result\x12Q\n\x0f\x66\x61ilure_details\x18\x02 \x03(\x0b\x32\x38.mobile.groups.v2.RemoveFromGroupResponse.FailureDetails\x1a\xb5\x01\n\x0e\x46\x61ilureDetails\x12(\n\npersona_id\x18\x01 \x01(\x0b\x32\x14.common.v2.PersonaId\x12O\n\x06reason\x18\x02 \x01(\x0e\x32?.mobile.groups.v2.RemoveFromGroupResponse.FailureDetails.Reason\"(\n\x06Reason\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x11\n\rNOT_PERMITTED\x10\x01\"#\n\x06Result\x12\x06\n\x02OK\x10\x00\x12\x11\n\rNOT_PERMITTED\x10\x01\x32\x83\x05\n\x05Group\x12k\n\x10GetGroupInfoFull\x12).mobile.groups.v2.GetGroupInfoFullRequest\x1a*.mobile.groups.v2.GetGroupInfoFullResponse\"\x00\x12\x8e\x01\n\x1bGetBannedGroupMembersStream\x12\x34.mobile.groups.v2.GetBannedGroupMembersStreamRequest\x1a\x35.mobile.groups.v2.GetBannedGroupMembersStreamResponse\"\x00\x30\x01\x12\\\n\x0b\x43reateGroup\x12$.mobile.groups.v2.CreateGroupRequest\x1a%.mobile.groups.v2.CreateGroupResponse\"\x00\x12Y\n\nLeaveGroup\x12#.mobile.groups.v2.LeaveGroupRequest\x1a$.mobile.groups.v2.LeaveGroupResponse\"\x00\x12Y\n\nAddToGroup\x12#.mobile.groups.v2.AddToGroupRequest\x1a$.mobile.groups.v2.AddToGroupResponse\"\x00\x12h\n\x0fRemoveFromGroup\x12(.mobile.groups.v2.RemoveFromGroupRequest\x1a).mobile.groups.v2.RemoveFromGroupResponse\"\x00\x42w\n\x15\x63om.kik.gen.groups.v2ZJgithub.com/kikinteractive/xiphias-api-mobile/generated/go/groups/v2;groups\xa2\x02\x11KPBMobileGroupsV2b\x06proto3')
,
dependencies=[protobuf__validation__pb2.DESCRIPTOR,common_dot_v2_dot_model__pb2.DESCRIPTOR,messaging_dot_v2_dot_model__pb2.DESCRIPTOR,groups_dot_v2_dot_groups__common__pb2.DESCRIPTOR,chats_dot_v2_dot_chat__info__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_GETGROUPINFOFULLRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='mobile.groups.v2.GetGroupInfoFullResponse.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOT_FOUND', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=458,
serialized_end=489,
)
_sym_db.RegisterEnumDescriptor(_GETGROUPINFOFULLRESPONSE_RESULT)
_GETBANNEDGROUPMEMBERSSTREAMRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='mobile.groups.v2.GetBannedGroupMembersStreamResponse.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOT_FOUND', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=458,
serialized_end=489,
)
_sym_db.RegisterEnumDescriptor(_GETBANNEDGROUPMEMBERSSTREAMRESPONSE_RESULT)
_CREATEGROUPRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='mobile.groups.v2.CreateGroupResponse.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CREATION_ID_EXISTS', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POLICY_RESTRICTION', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HASHTAG_NOT_UNIQUE', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOT_PERMITTED', index=4, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1432,
serialized_end=1539,
)
_sym_db.RegisterEnumDescriptor(_CREATEGROUPRESPONSE_RESULT)
_CREATEGROUPRESPONSE_RESTRICTEDFIELD = _descriptor.EnumDescriptor(
name='RestrictedField',
full_name='mobile.groups.v2.CreateGroupResponse.RestrictedField',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GROUP_NAME', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GROUP_HASHTAG', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1541,
serialized_end=1610,
)
_sym_db.RegisterEnumDescriptor(_CREATEGROUPRESPONSE_RESTRICTEDFIELD)
_LEAVEGROUPRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='mobile.groups.v2.LeaveGroupResponse.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=458,
serialized_end=474,
)
_sym_db.RegisterEnumDescriptor(_LEAVEGROUPRESPONSE_RESULT)
_ADDTOGROUPRESPONSE_FAILUREDETAILS_REASON = _descriptor.EnumDescriptor(
name='Reason',
full_name='mobile.groups.v2.AddToGroupResponse.FailureDetails.Reason',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BANNED', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GROUP_FULL', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOT_PERMITTED', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2343,
serialized_end=2411,
)
_sym_db.RegisterEnumDescriptor(_ADDTOGROUPRESPONSE_FAILUREDETAILS_REASON)
_ADDTOGROUPRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='mobile.groups.v2.AddToGroupResponse.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOT_PERMITTED', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2413,
serialized_end=2448,
)
_sym_db.RegisterEnumDescriptor(_ADDTOGROUPRESPONSE_RESULT)
_REMOVEFROMGROUPRESPONSE_FAILUREDETAILS_REASON = _descriptor.EnumDescriptor(
name='Reason',
full_name='mobile.groups.v2.RemoveFromGroupResponse.FailureDetails.Reason',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOT_PERMITTED', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2946,
serialized_end=2986,
)
_sym_db.RegisterEnumDescriptor(_REMOVEFROMGROUPRESPONSE_FAILUREDETAILS_REASON)
_REMOVEFROMGROUPRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='mobile.groups.v2.RemoveFromGroupResponse.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOT_PERMITTED', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2413,
serialized_end=2448,
)
_sym_db.RegisterEnumDescriptor(_REMOVEFROMGROUPRESPONSE_RESULT)
_GETGROUPINFOFULLREQUEST = _descriptor.Descriptor(
name='GetGroupInfoFullRequest',
full_name='mobile.groups.v2.GetGroupInfoFullRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='chat_id', full_name='mobile.groups.v2.GetGroupInfoFullRequest.chat_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))),
_descriptor.FieldDescriptor(
name='requestor_persona_id', full_name='mobile.groups.v2.GetGroupInfoFullRequest.requestor_persona_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=185,
serialized_end=314,
)
_GETGROUPINFOFULLRESPONSE = _descriptor.Descriptor(
name='GetGroupInfoFullResponse',
full_name='mobile.groups.v2.GetGroupInfoFullResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='mobile.groups.v2.GetGroupInfoFullResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='info', full_name='mobile.groups.v2.GetGroupInfoFullResponse.info', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETGROUPINFOFULLRESPONSE_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=317,
serialized_end=489,
)
_GETBANNEDGROUPMEMBERSSTREAMREQUEST = _descriptor.Descriptor(
name='GetBannedGroupMembersStreamRequest',
full_name='mobile.groups.v2.GetBannedGroupMembersStreamRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='chat_id', full_name='mobile.groups.v2.GetBannedGroupMembersStreamRequest.chat_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))),
_descriptor.FieldDescriptor(
name='requestor_persona_id', full_name='mobile.groups.v2.GetBannedGroupMembersStreamRequest.requestor_persona_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=492,
serialized_end=632,
)
_GETBANNEDGROUPMEMBERSSTREAMRESPONSE = _descriptor.Descriptor(
name='GetBannedGroupMembersStreamResponse',
full_name='mobile.groups.v2.GetBannedGroupMembersStreamResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='mobile.groups.v2.GetBannedGroupMembersStreamResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='banned_group_members', full_name='mobile.groups.v2.GetBannedGroupMembersStreamResponse.banned_group_members', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\004\200\001\200\010'))),
],
extensions=[
],
nested_types=[],
enum_types=[
_GETBANNEDGROUPMEMBERSSTREAMRESPONSE_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=635,
serialized_end=860,
)
_BANNEDGROUPMEMBER = _descriptor.Descriptor(
name='BannedGroupMember',
full_name='mobile.groups.v2.BannedGroupMember',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='persona_id', full_name='mobile.groups.v2.BannedGroupMember.persona_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=862,
serialized_end=931,
)
_CREATEGROUPREQUEST = _descriptor.Descriptor(
name='CreateGroupRequest',
full_name='mobile.groups.v2.CreateGroupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='creator_id', full_name='mobile.groups.v2.CreateGroupRequest.creator_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))),
_descriptor.FieldDescriptor(
name='initial_members', full_name='mobile.groups.v2.CreateGroupRequest.initial_members', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\004\200\001\200\010'))),
_descriptor.FieldDescriptor(
name='group_name', full_name='mobile.groups.v2.CreateGroupRequest.group_name', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='public_group_fields', full_name='mobile.groups.v2.CreateGroupRequest.public_group_fields', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='request_id', full_name='mobile.groups.v2.CreateGroupRequest.request_id', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=934,
serialized_end=1221,
)
_CREATEGROUPRESPONSE = _descriptor.Descriptor(
name='CreateGroupResponse',
full_name='mobile.groups.v2.CreateGroupResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='mobile.groups.v2.CreateGroupResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='chat', full_name='mobile.groups.v2.CreateGroupResponse.chat', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='restricted_fields', full_name='mobile.groups.v2.CreateGroupResponse.restricted_fields', index=2,
number=3, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_CREATEGROUPRESPONSE_RESULT,
_CREATEGROUPRESPONSE_RESTRICTEDFIELD,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1224,
serialized_end=1610,
)
_GROUPNAME = _descriptor.Descriptor(
name='GroupName',
full_name='mobile.groups.v2.GroupName',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='group_name', full_name='mobile.groups.v2.GroupName.group_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\005\010\001 \372\001'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1612,
serialized_end=1654,
)
_LEAVEGROUPREQUEST = _descriptor.Descriptor(
name='LeaveGroupRequest',
full_name='mobile.groups.v2.LeaveGroupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='chat_id', full_name='mobile.groups.v2.LeaveGroupRequest.chat_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))),
_descriptor.FieldDescriptor(
name='leaver_id', full_name='mobile.groups.v2.LeaveGroupRequest.leaver_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1656,
serialized_end=1768,
)
_LEAVEGROUPRESPONSE = _descriptor.Descriptor(
name='LeaveGroupResponse',
full_name='mobile.groups.v2.LeaveGroupResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='mobile.groups.v2.LeaveGroupResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_LEAVEGROUPRESPONSE_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1770,
serialized_end=1869,
)
_ADDTOGROUPREQUEST = _descriptor.Descriptor(
name='AddToGroupRequest',
full_name='mobile.groups.v2.AddToGroupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='chat_id', full_name='mobile.groups.v2.AddToGroupRequest.chat_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))),
_descriptor.FieldDescriptor(
name='adder_id', full_name='mobile.groups.v2.AddToGroupRequest.adder_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))),
_descriptor.FieldDescriptor(
name='new_member_ids', full_name='mobile.groups.v2.AddToGroupRequest.new_member_ids', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\007\010\001x\001\200\001d'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1872,
serialized_end=2042,
)
_ADDTOGROUPRESPONSE_FAILUREDETAILS = _descriptor.Descriptor(
name='FailureDetails',
full_name='mobile.groups.v2.AddToGroupResponse.FailureDetails',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='persona_id', full_name='mobile.groups.v2.AddToGroupResponse.FailureDetails.persona_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reason', full_name='mobile.groups.v2.AddToGroupResponse.FailureDetails.reason', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_ADDTOGROUPRESPONSE_FAILUREDETAILS_REASON,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2207,
serialized_end=2411,
)
_ADDTOGROUPRESPONSE = _descriptor.Descriptor(
name='AddToGroupResponse',
full_name='mobile.groups.v2.AddToGroupResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='mobile.groups.v2.AddToGroupResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='failure_details', full_name='mobile.groups.v2.AddToGroupResponse.failure_details', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_ADDTOGROUPRESPONSE_FAILUREDETAILS, ],
enum_types=[
_ADDTOGROUPRESPONSE_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2045,
serialized_end=2448,
)
_REMOVEFROMGROUPREQUEST = _descriptor.Descriptor(
name='RemoveFromGroupRequest',
full_name='mobile.groups.v2.RemoveFromGroupRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='chat_id', full_name='mobile.groups.v2.RemoveFromGroupRequest.chat_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))),
_descriptor.FieldDescriptor(
name='remover_id', full_name='mobile.groups.v2.RemoveFromGroupRequest.remover_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))),
_descriptor.FieldDescriptor(
name='removee_ids', full_name='mobile.groups.v2.RemoveFromGroupRequest.removee_ids', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\007\010\001x\001\200\001d'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2451,
serialized_end=2625,
)
_REMOVEFROMGROUPRESPONSE_FAILUREDETAILS = _descriptor.Descriptor(
name='FailureDetails',
full_name='mobile.groups.v2.RemoveFromGroupResponse.FailureDetails',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='persona_id', full_name='mobile.groups.v2.RemoveFromGroupResponse.FailureDetails.persona_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reason', full_name='mobile.groups.v2.RemoveFromGroupResponse.FailureDetails.reason', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_REMOVEFROMGROUPRESPONSE_FAILUREDETAILS_REASON,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2805,
serialized_end=2986,
)
_REMOVEFROMGROUPRESPONSE = _descriptor.Descriptor(
name='RemoveFromGroupResponse',
full_name='mobile.groups.v2.RemoveFromGroupResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='mobile.groups.v2.RemoveFromGroupResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='failure_details', full_name='mobile.groups.v2.RemoveFromGroupResponse.failure_details', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_REMOVEFROMGROUPRESPONSE_FAILUREDETAILS, ],
enum_types=[
_REMOVEFROMGROUPRESPONSE_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2628,
serialized_end=3023,
)
_GETGROUPINFOFULLREQUEST.fields_by_name['chat_id'].message_type = common_dot_v2_dot_model__pb2._CHATID
_GETGROUPINFOFULLREQUEST.fields_by_name['requestor_persona_id'].message_type = common_dot_v2_dot_model__pb2._PERSONAID
_GETGROUPINFOFULLRESPONSE.fields_by_name['result'].enum_type = _GETGROUPINFOFULLRESPONSE_RESULT
_GETGROUPINFOFULLRESPONSE.fields_by_name['info'].message_type = chats_dot_v2_dot_chat__info__pb2._GROUPINFOFULL
_GETGROUPINFOFULLRESPONSE_RESULT.containing_type = _GETGROUPINFOFULLRESPONSE
_GETBANNEDGROUPMEMBERSSTREAMREQUEST.fields_by_name['chat_id'].message_type = common_dot_v2_dot_model__pb2._CHATID
_GETBANNEDGROUPMEMBERSSTREAMREQUEST.fields_by_name['requestor_persona_id'].message_type = common_dot_v2_dot_model__pb2._PERSONAID
_GETBANNEDGROUPMEMBERSSTREAMRESPONSE.fields_by_name['result'].enum_type = _GETBANNEDGROUPMEMBERSSTREAMRESPONSE_RESULT
_GETBANNEDGROUPMEMBERSSTREAMRESPONSE.fields_by_name['banned_group_members'].message_type = _BANNEDGROUPMEMBER
_GETBANNEDGROUPMEMBERSSTREAMRESPONSE_RESULT.containing_type = _GETBANNEDGROUPMEMBERSSTREAMRESPONSE
_BANNEDGROUPMEMBER.fields_by_name['persona_id'].message_type = common_dot_v2_dot_model__pb2._PERSONAID
_CREATEGROUPREQUEST.fields_by_name['creator_id'].message_type = common_dot_v2_dot_model__pb2._PERSONAID
_CREATEGROUPREQUEST.fields_by_name['initial_members'].message_type = common_dot_v2_dot_model__pb2._PERSONAID
_CREATEGROUPREQUEST.fields_by_name['group_name'].message_type = _GROUPNAME
_CREATEGROUPREQUEST.fields_by_name['public_group_fields'].message_type = groups_dot_v2_dot_groups__common__pb2._PUBLICGROUPFIELDS
_CREATEGROUPREQUEST.fields_by_name['request_id'].message_type = common_dot_v2_dot_model__pb2._UUID
_CREATEGROUPRESPONSE.fields_by_name['result'].enum_type = _CREATEGROUPRESPONSE_RESULT
_CREATEGROUPRESPONSE.fields_by_name['chat'].message_type = messaging_dot_v2_dot_model__pb2._CHAT
_CREATEGROUPRESPONSE.fields_by_name['restricted_fields'].enum_type = _CREATEGROUPRESPONSE_RESTRICTEDFIELD
_CREATEGROUPRESPONSE_RESULT.containing_type = _CREATEGROUPRESPONSE
_CREATEGROUPRESPONSE_RESTRICTEDFIELD.containing_type = _CREATEGROUPRESPONSE
_LEAVEGROUPREQUEST.fields_by_name['chat_id'].message_type = common_dot_v2_dot_model__pb2._CHATID
_LEAVEGROUPREQUEST.fields_by_name['leaver_id'].message_type = common_dot_v2_dot_model__pb2._PERSONAID
_LEAVEGROUPRESPONSE.fields_by_name['result'].enum_type = _LEAVEGROUPRESPONSE_RESULT
_LEAVEGROUPRESPONSE_RESULT.containing_type = _LEAVEGROUPRESPONSE
_ADDTOGROUPREQUEST.fields_by_name['chat_id'].message_type = common_dot_v2_dot_model__pb2._CHATID
_ADDTOGROUPREQUEST.fields_by_name['adder_id'].message_type = common_dot_v2_dot_model__pb2._PERSONAID
_ADDTOGROUPREQUEST.fields_by_name['new_member_ids'].message_type = common_dot_v2_dot_model__pb2._PERSONAID
_ADDTOGROUPRESPONSE_FAILUREDETAILS.fields_by_name['persona_id'].message_type = common_dot_v2_dot_model__pb2._PERSONAID
_ADDTOGROUPRESPONSE_FAILUREDETAILS.fields_by_name['reason'].enum_type = _ADDTOGROUPRESPONSE_FAILUREDETAILS_REASON
_ADDTOGROUPRESPONSE_FAILUREDETAILS.containing_type = _ADDTOGROUPRESPONSE
_ADDTOGROUPRESPONSE_FAILUREDETAILS_REASON.containing_type = _ADDTOGROUPRESPONSE_FAILUREDETAILS
_ADDTOGROUPRESPONSE.fields_by_name['result'].enum_type = _ADDTOGROUPRESPONSE_RESULT
_ADDTOGROUPRESPONSE.fields_by_name['failure_details'].message_type = _ADDTOGROUPRESPONSE_FAILUREDETAILS
_ADDTOGROUPRESPONSE_RESULT.containing_type = _ADDTOGROUPRESPONSE
_REMOVEFROMGROUPREQUEST.fields_by_name['chat_id'].message_type = common_dot_v2_dot_model__pb2._CHATID
_REMOVEFROMGROUPREQUEST.fields_by_name['remover_id'].message_type = common_dot_v2_dot_model__pb2._PERSONAID
_REMOVEFROMGROUPREQUEST.fields_by_name['removee_ids'].message_type = common_dot_v2_dot_model__pb2._PERSONAID
_REMOVEFROMGROUPRESPONSE_FAILUREDETAILS.fields_by_name['persona_id'].message_type = common_dot_v2_dot_model__pb2._PERSONAID
_REMOVEFROMGROUPRESPONSE_FAILUREDETAILS.fields_by_name['reason'].enum_type = _REMOVEFROMGROUPRESPONSE_FAILUREDETAILS_REASON
_REMOVEFROMGROUPRESPONSE_FAILUREDETAILS.containing_type = _REMOVEFROMGROUPRESPONSE
_REMOVEFROMGROUPRESPONSE_FAILUREDETAILS_REASON.containing_type = _REMOVEFROMGROUPRESPONSE_FAILUREDETAILS
_REMOVEFROMGROUPRESPONSE.fields_by_name['result'].enum_type = _REMOVEFROMGROUPRESPONSE_RESULT
_REMOVEFROMGROUPRESPONSE.fields_by_name['failure_details'].message_type = _REMOVEFROMGROUPRESPONSE_FAILUREDETAILS
_REMOVEFROMGROUPRESPONSE_RESULT.containing_type = _REMOVEFROMGROUPRESPONSE
DESCRIPTOR.message_types_by_name['GetGroupInfoFullRequest'] = _GETGROUPINFOFULLREQUEST
DESCRIPTOR.message_types_by_name['GetGroupInfoFullResponse'] = _GETGROUPINFOFULLRESPONSE
DESCRIPTOR.message_types_by_name['GetBannedGroupMembersStreamRequest'] = _GETBANNEDGROUPMEMBERSSTREAMREQUEST
DESCRIPTOR.message_types_by_name['GetBannedGroupMembersStreamResponse'] = _GETBANNEDGROUPMEMBERSSTREAMRESPONSE
DESCRIPTOR.message_types_by_name['BannedGroupMember'] = _BANNEDGROUPMEMBER
DESCRIPTOR.message_types_by_name['CreateGroupRequest'] = _CREATEGROUPREQUEST
DESCRIPTOR.message_types_by_name['CreateGroupResponse'] = _CREATEGROUPRESPONSE
DESCRIPTOR.message_types_by_name['GroupName'] = _GROUPNAME
DESCRIPTOR.message_types_by_name['LeaveGroupRequest'] = _LEAVEGROUPREQUEST
DESCRIPTOR.message_types_by_name['LeaveGroupResponse'] = _LEAVEGROUPRESPONSE
DESCRIPTOR.message_types_by_name['AddToGroupRequest'] = _ADDTOGROUPREQUEST
DESCRIPTOR.message_types_by_name['AddToGroupResponse'] = _ADDTOGROUPRESPONSE
DESCRIPTOR.message_types_by_name['RemoveFromGroupRequest'] = _REMOVEFROMGROUPREQUEST
DESCRIPTOR.message_types_by_name['RemoveFromGroupResponse'] = _REMOVEFROMGROUPRESPONSE
GetGroupInfoFullRequest = _reflection.GeneratedProtocolMessageType('GetGroupInfoFullRequest', (_message.Message,), dict(
DESCRIPTOR = _GETGROUPINFOFULLREQUEST,
__module__ = 'groups.v2.group_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.groups.v2.GetGroupInfoFullRequest)
))
_sym_db.RegisterMessage(GetGroupInfoFullRequest)
GetGroupInfoFullResponse = _reflection.GeneratedProtocolMessageType('GetGroupInfoFullResponse', (_message.Message,), dict(
DESCRIPTOR = _GETGROUPINFOFULLRESPONSE,
__module__ = 'groups.v2.group_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.groups.v2.GetGroupInfoFullResponse)
))
_sym_db.RegisterMessage(GetGroupInfoFullResponse)
GetBannedGroupMembersStreamRequest = _reflection.GeneratedProtocolMessageType('GetBannedGroupMembersStreamRequest', (_message.Message,), dict(
DESCRIPTOR = _GETBANNEDGROUPMEMBERSSTREAMREQUEST,
__module__ = 'groups.v2.group_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.groups.v2.GetBannedGroupMembersStreamRequest)
))
_sym_db.RegisterMessage(GetBannedGroupMembersStreamRequest)
GetBannedGroupMembersStreamResponse = _reflection.GeneratedProtocolMessageType('GetBannedGroupMembersStreamResponse', (_message.Message,), dict(
DESCRIPTOR = _GETBANNEDGROUPMEMBERSSTREAMRESPONSE,
__module__ = 'groups.v2.group_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.groups.v2.GetBannedGroupMembersStreamResponse)
))
_sym_db.RegisterMessage(GetBannedGroupMembersStreamResponse)
BannedGroupMember = _reflection.GeneratedProtocolMessageType('BannedGroupMember', (_message.Message,), dict(
DESCRIPTOR = _BANNEDGROUPMEMBER,
__module__ = 'groups.v2.group_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.groups.v2.BannedGroupMember)
))
_sym_db.RegisterMessage(BannedGroupMember)
CreateGroupRequest = _reflection.GeneratedProtocolMessageType('CreateGroupRequest', (_message.Message,), dict(
DESCRIPTOR = _CREATEGROUPREQUEST,
__module__ = 'groups.v2.group_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.groups.v2.CreateGroupRequest)
))
_sym_db.RegisterMessage(CreateGroupRequest)
CreateGroupResponse = _reflection.GeneratedProtocolMessageType('CreateGroupResponse', (_message.Message,), dict(
DESCRIPTOR = _CREATEGROUPRESPONSE,
__module__ = 'groups.v2.group_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.groups.v2.CreateGroupResponse)
))
_sym_db.RegisterMessage(CreateGroupResponse)
GroupName = _reflection.GeneratedProtocolMessageType('GroupName', (_message.Message,), dict(
DESCRIPTOR = _GROUPNAME,
__module__ = 'groups.v2.group_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.groups.v2.GroupName)
))
_sym_db.RegisterMessage(GroupName)
LeaveGroupRequest = _reflection.GeneratedProtocolMessageType('LeaveGroupRequest', (_message.Message,), dict(
DESCRIPTOR = _LEAVEGROUPREQUEST,
__module__ = 'groups.v2.group_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.groups.v2.LeaveGroupRequest)
))
_sym_db.RegisterMessage(LeaveGroupRequest)
LeaveGroupResponse = _reflection.GeneratedProtocolMessageType('LeaveGroupResponse', (_message.Message,), dict(
DESCRIPTOR = _LEAVEGROUPRESPONSE,
__module__ = 'groups.v2.group_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.groups.v2.LeaveGroupResponse)
))
_sym_db.RegisterMessage(LeaveGroupResponse)
AddToGroupRequest = _reflection.GeneratedProtocolMessageType('AddToGroupRequest', (_message.Message,), dict(
DESCRIPTOR = _ADDTOGROUPREQUEST,
__module__ = 'groups.v2.group_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.groups.v2.AddToGroupRequest)
))
_sym_db.RegisterMessage(AddToGroupRequest)
AddToGroupResponse = _reflection.GeneratedProtocolMessageType('AddToGroupResponse', (_message.Message,), dict(
FailureDetails = _reflection.GeneratedProtocolMessageType('FailureDetails', (_message.Message,), dict(
DESCRIPTOR = _ADDTOGROUPRESPONSE_FAILUREDETAILS,
__module__ = 'groups.v2.group_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.groups.v2.AddToGroupResponse.FailureDetails)
))
,
DESCRIPTOR = _ADDTOGROUPRESPONSE,
__module__ = 'groups.v2.group_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.groups.v2.AddToGroupResponse)
))
_sym_db.RegisterMessage(AddToGroupResponse)
_sym_db.RegisterMessage(AddToGroupResponse.FailureDetails)
RemoveFromGroupRequest = _reflection.GeneratedProtocolMessageType('RemoveFromGroupRequest', (_message.Message,), dict(
DESCRIPTOR = _REMOVEFROMGROUPREQUEST,
__module__ = 'groups.v2.group_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.groups.v2.RemoveFromGroupRequest)
))
_sym_db.RegisterMessage(RemoveFromGroupRequest)
RemoveFromGroupResponse = _reflection.GeneratedProtocolMessageType('RemoveFromGroupResponse', (_message.Message,), dict(
FailureDetails = _reflection.GeneratedProtocolMessageType('FailureDetails', (_message.Message,), dict(
DESCRIPTOR = _REMOVEFROMGROUPRESPONSE_FAILUREDETAILS,
__module__ = 'groups.v2.group_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.groups.v2.RemoveFromGroupResponse.FailureDetails)
))
,
DESCRIPTOR = _REMOVEFROMGROUPRESPONSE,
__module__ = 'groups.v2.group_service_pb2'
# @@protoc_insertion_point(class_scope:mobile.groups.v2.RemoveFromGroupResponse)
))
_sym_db.RegisterMessage(RemoveFromGroupResponse)
_sym_db.RegisterMessage(RemoveFromGroupResponse.FailureDetails)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\025com.kik.gen.groups.v2ZJgithub.com/kikinteractive/xiphias-api-mobile/generated/go/groups/v2;groups\242\002\021KPBMobileGroupsV2'))
_GETGROUPINFOFULLREQUEST.fields_by_name['chat_id'].has_options = True
_GETGROUPINFOFULLREQUEST.fields_by_name['chat_id']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))
_GETGROUPINFOFULLREQUEST.fields_by_name['requestor_persona_id'].has_options = True
_GETGROUPINFOFULLREQUEST.fields_by_name['requestor_persona_id']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))
_GETBANNEDGROUPMEMBERSSTREAMREQUEST.fields_by_name['chat_id'].has_options = True
_GETBANNEDGROUPMEMBERSSTREAMREQUEST.fields_by_name['chat_id']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))
_GETBANNEDGROUPMEMBERSSTREAMREQUEST.fields_by_name['requestor_persona_id'].has_options = True
_GETBANNEDGROUPMEMBERSSTREAMREQUEST.fields_by_name['requestor_persona_id']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))
_GETBANNEDGROUPMEMBERSSTREAMRESPONSE.fields_by_name['banned_group_members'].has_options = True
_GETBANNEDGROUPMEMBERSSTREAMRESPONSE.fields_by_name['banned_group_members']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\004\200\001\200\010'))
_BANNEDGROUPMEMBER.fields_by_name['persona_id'].has_options = True
_BANNEDGROUPMEMBER.fields_by_name['persona_id']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))
_CREATEGROUPREQUEST.fields_by_name['creator_id'].has_options = True
_CREATEGROUPREQUEST.fields_by_name['creator_id']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))
_CREATEGROUPREQUEST.fields_by_name['initial_members'].has_options = True
_CREATEGROUPREQUEST.fields_by_name['initial_members']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\004\200\001\200\010'))
_CREATEGROUPREQUEST.fields_by_name['request_id'].has_options = True
_CREATEGROUPREQUEST.fields_by_name['request_id']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))
_GROUPNAME.fields_by_name['group_name'].has_options = True
_GROUPNAME.fields_by_name['group_name']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\005\010\001 \372\001'))
_LEAVEGROUPREQUEST.fields_by_name['chat_id'].has_options = True
_LEAVEGROUPREQUEST.fields_by_name['chat_id']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))
_LEAVEGROUPREQUEST.fields_by_name['leaver_id'].has_options = True
_LEAVEGROUPREQUEST.fields_by_name['leaver_id']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))
_ADDTOGROUPREQUEST.fields_by_name['chat_id'].has_options = True
_ADDTOGROUPREQUEST.fields_by_name['chat_id']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))
_ADDTOGROUPREQUEST.fields_by_name['adder_id'].has_options = True
_ADDTOGROUPREQUEST.fields_by_name['adder_id']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))
_ADDTOGROUPREQUEST.fields_by_name['new_member_ids'].has_options = True
_ADDTOGROUPREQUEST.fields_by_name['new_member_ids']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\007\010\001x\001\200\001d'))
_REMOVEFROMGROUPREQUEST.fields_by_name['chat_id'].has_options = True
_REMOVEFROMGROUPREQUEST.fields_by_name['chat_id']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))
_REMOVEFROMGROUPREQUEST.fields_by_name['remover_id'].has_options = True
_REMOVEFROMGROUPREQUEST.fields_by_name['remover_id']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\002\010\001'))
_REMOVEFROMGROUPREQUEST.fields_by_name['removee_ids'].has_options = True
_REMOVEFROMGROUPREQUEST.fields_by_name['removee_ids']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\312\235%\007\010\001x\001\200\001d'))
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
c1480def64eb483f4e4fc918b25fb7381d35c616 | 19973144ec11438c8bc4c973473033ee877e1ad3 | /tuples/named_tuples_modifying_and_extending.py | 9ac0a122c05ed86ec52cc8c8849be7adcf3bdeaa | [] | no_license | kuzmich321/python-core | b6d1720c1a0202e5d40032ed9f4b96aec1d284df | cd137980c8c5677a72f3e9c0703a1f3f06ef37e5 | refs/heads/main | 2023-04-16T19:10:42.450055 | 2021-04-24T06:52:43 | 2021-04-24T06:52:43 | 332,165,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,846 | py | from collections import namedtuple
# Named Tuples are Immutable
# So how can we "change" one or more values inside the tuple?
# Just like with strings, we have to create a new tuple, with the modified values
Point2D = namedtuple('Point2D', 'x y')
pt = Point2D(0, 0)
pt = Point2D(100, pt.y)
print(pt)
# Maybe slicing or unpacking?
Stock = namedtuple('Stock', 'symbol year month day open high low close')
stock = Stock('DJIA', 2021, 1, 25, 26_313, 26_322, 26_410, 26_393)
current = stock[:7]
*current, _ = stock
stock = Stock(*current, 26_555)
print(stock)
# We can also use the <_make> class method - but we need to create an iterable that
# contains all the values first
current = stock[:7]
new_values = current + (26_444,)
stock = Stock._make(new_values)
print(stock)
# This still has drawbacks
# What if we wanted ti change a value in the middle, say day?
pre = stock[:3]
post = stock[4:]
new_values = pre + (26,) + post
stock = Stock(*new_values)
print(stock)
# But event this still has drawbacks!
# How about modifying both the day and the high values?
# The <_replace> instance method
stock = stock._replace(day=27, high=27_000, close=26_777)
print(stock)
# Extending a Named Tuple
# Sometimes we want to create named tuple that extends another named tuple, appending one or more fields
Stock = namedtuple('Stock', 'symbol year month day open high low close')
# We want to create a new named tuple class, StockExt that adds a single field, prev_close
# When dealing with classes, this is sometimes done by using subclassing.
# But this not easy to do with named tuples
# And there is a cleaner way of doing it anyway
new_fields = Stock._fields + ('prev_close',)
StockExt = namedtuple('StockExt', new_fields)
stock = Stock('DJIA', 2021, 1, 25, 26_313, 26_322, 26_410, 26_393)
stock_ext = StockExt(*stock, 26_000)
print(stock_ext)
| [
"[email protected]"
] | |
af4c49328bbfed02acdb365d357d20e4a58a6be9 | bad62c2b0dfad33197db55b44efeec0bab405634 | /sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2021_03_31/operations/_operations.py | 84e5a71475974e9361d744abcd9b2b494f011e56 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | test-repo-billy/azure-sdk-for-python | 20c5a2486456e02456de17515704cb064ff19833 | cece86a8548cb5f575e5419864d631673be0a244 | refs/heads/master | 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 | MIT | 2019-07-25T22:28:52 | 2019-04-19T20:59:15 | Python | UTF-8 | Python | false | false | 6,149 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
from urllib.parse import parse_qs, urljoin, urlparse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-31")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.Devices/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.iothub.v2021_03_31.IotHubClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.Operation"]:
"""Lists all of the available IoT Hub REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.iothub.v2021_03_31.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-31")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.OperationListResult]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.Devices/operations"} # type: ignore
| [
"[email protected]"
] | |
1ad3c1b95cd561d50dbb80b504710344c5e194ba | 3c529285a0029436ec63185c5c3bd2484ab18a15 | /jwst/regtest/test_fgs_image2.py | 3eabd0340ed05c28372673a4ecdf25744df29eb4 | [
"BSD-2-Clause"
] | permissive | ARKA1112/jwst-1 | 7d2c30aaf1e58c26731f0c64b6b94f734b8989a9 | 63b5caa69dbe102245d703feb635a33f72e47e75 | refs/heads/master | 2023-04-10T17:58:54.212100 | 2021-04-21T23:34:27 | 2021-04-21T23:34:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | import pytest
from astropy.io.fits.diff import FITSDiff
from jwst.stpipe import Step
@pytest.fixture(scope="module")
def run_fgs_image2(rtdata_module):
rtdata = rtdata_module
rtdata.get_data("fgs/image2/jw86500007001_02101_00001_GUIDER2_rate.fits")
args = ["calwebb_image2", rtdata.input,
"--steps.flat_field.save_results=True",
"--steps.resample.save_results=True"]
Step.from_cmdline(args)
@pytest.mark.bigdata
@pytest.mark.parametrize("suffix", ['flat_field', 'cal', 'i2d'])
def test_fgs_image2(run_fgs_image2, rtdata_module, fitsdiff_default_kwargs, suffix):
"""Regression test for FGS data in the image2 pipeline"""
rtdata = rtdata_module
output = f"jw86500007001_02101_00001_GUIDER2_{suffix}.fits"
rtdata.output = output
rtdata.get_truth(f"truth/test_fgs_image2/{output}")
diff = FITSDiff(rtdata.output, rtdata.truth, **fitsdiff_default_kwargs)
assert diff.identical, diff.report()
| [
"[email protected]"
] | |
e9746ca8aa1b22d1059d49bd4430f3ce70b24f96 | 26d33a53ee00b5c10134da72529caa945c20ef8d | /23_group_of_statement_with_UDF.py | 9294e6eda6e6c300257f9be3e6192e293d5bace5 | [] | no_license | Shinakshi09/Python | 560cc802eeb6ebb850707fd0ed9f18493b89522e | 033e35622d0263d5f8aaa5c9aabff33c01137455 | refs/heads/main | 2023-06-18T20:55:54.941772 | 2021-07-19T15:00:55 | 2021-07-19T15:00:55 | 387,363,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py |
"""
Printing a group of statements by combining them into a user defined a function
"""
def print_group_of_statement(i):
""" Printing a group of statement"""
print("__________________")
print(" I am bloody superman once again")
print(" I am Q ")
print(" Super bond girls")
for i in range(10):
print_group_of_statement(i)
| [
"[email protected]"
] | |
4dae29235c8e5a38aad40c087fb29bfbb50c2257 | c005fd84d7153fb26c42ba205270869033a7f339 | /sinfonier-backend-api/clustering/storm/StormUI.py | 1c515bca149c7dd8f5f3479396c63f9fe726d898 | [
"Apache-2.0"
] | permissive | aspgems/fiware-sinfonier | 7a5df99f33f9e99cbfaf7b2b5f596a01d63589b2 | 8dac57bef88f19fc79bdde5edfbb2dd37f1d1fba | refs/heads/master | 2021-01-11T03:08:48.874844 | 2016-10-18T11:10:46 | 2016-10-18T11:10:46 | 70,148,771 | 0 | 0 | null | 2016-10-18T15:40:54 | 2016-10-06T11:45:35 | Java | UTF-8 | Python | false | false | 11,531 | py | import json
from utils.HTTPHandler import HTTPHandler
from config.config import conf
from error.ErrorHandler import TopologyNotInCluster
class StormUI:
@staticmethod
def baseurl():
return "http://{}:{}".format(conf.STORM_UI_HOST,conf.STORM_UI_PORT )
# GET Operations
######################################
# /api/v1/cluster/configuration (GET)
# Returns the cluster configuration.
######################################
@staticmethod
def getClusterConfiguration():
url = StormUI.baseurl() + "/api/v1/cluster/configuration"
return HTTPHandler.getJson(url)
######################################
# /api/v1/cluster/summary (GET)
# Returns cluster summary information such as nimbus uptime or number of supervisors.
######################################
@staticmethod
def getClusterSummary():
url = StormUI.baseurl() + "/api/v1/cluster/summary"
return HTTPHandler.getJson(url)
######################################
# /api/v1/supervisor/summary (GET)
# Returns summary information for all supervisors.
######################################
@staticmethod
def getSupervisorSummary():
url = StormUI.baseurl() + "/api/v1/supervisor/summary"
return HTTPHandler.getJson(url)
######################################
# /api/v1/topology/summary (GET)
# Returns summary information for all topologies.
######################################
@staticmethod
def getTopologySummary():
url = StormUI.baseurl() + "/api/v1/topology/summary"
return HTTPHandler.getJson(url)
######################################
# /api/v1/topology/:id (GET)
# Returns topology information and statistics. Substitute id with topology id.
######################################
@staticmethod
def getTopology(topologyid):
url = StormUI.baseurl() + "/api/v1/topology/" + topologyid
return HTTPHandler.getJson(url)
######################################
# /api/v1/topology/:id/component/:component (GET)
# Returns detailed metrics and executor information
######################################
@staticmethod
def getTopologyComponent(topologyid, componentid):
url = StormUI.baseurl() + "/api/v1/topology/" + topologyid + "/component/" + componentid
return HTTPHandler.getJson(url)
# POST Operations
######################################
# /api/v1/uploadTopology (POST)
# uploads a topology.
######################################
@staticmethod
def uploadTopology(topologyConfig, topologyJar):
'''
>>> StormUI.uploadTopology("config","my.jar")
'Not implemented yet in this version'
'''
return "Not implemented yet in this version"
# url = StormUI.baseurl+"/api/v1/uploadTopology"
# return HTTPHandler.get(url).json()
######################################
# /api/v1/topology/:id/activate (POST)
# Activates a topology.
######################################
@staticmethod
def activateTopology(topologyid):
url = StormUI.baseurl() + "/api/v1/topology/" + topologyid + "/activate"
return HTTPHandler.postJson(url)
######################################
# /api/v1/topology/:id/activate (POST)
# Activates a topology.
######################################
@staticmethod
def activateTopologyByName(topology_name):
id = StormUI.getTopologyIdByName(topology_name)
if id is not None:
return StormUI.activateTopology(id)
######################################
# /api/v1/topology/:id/deactivate (POST)
# Deactivates a topology.
######################################
@staticmethod
def deactivateTopology(topologyid):
url = StormUI.baseurl() + "/api/v1/topology/" + topologyid + "/deactivate"
return HTTPHandler.postJson(url)
######################################
# /api/v1/topology/:id/deactivate (POST)
# Deactivates a topology.
######################################
@staticmethod
def deactivateTopologyByName(topology_name):
id = StormUI.getTopologyIdByName(topology_name)
return StormUI.deactivateTopology(id)
######################################
# /api/v1/topology/:id/rebalance/:wait-time (POST)
# Rebalances a topology.
# rebalanceOptions = {"rebalanceOptions": {"numWorkers": 2, "executors": { "spout" : "5", "split": 7, "count": 5 }}, "callback":"foo"}
######################################
@staticmethod
def rebalanceTopology(topologyid, wait_time, rebalanceOptions={}):
url = StormUI.baseurl() + "/api/v1/topology/" + topologyid + "/rebalance/" + wait_time
headers = {"Content-Type": "application/json"}
return HTTPHandler.postJson(url, data=json.dumps(rebalanceOptions), headers=headers)
######################################
# /api/v1/topology/:id/kill/:wait-time (POST)
# Kills a topology.
######################################
@staticmethod
def killTopology(topologyid, wait_time):
url = StormUI.baseurl() + "/api/v1/topology/" + topologyid + "/kill/" + str(wait_time)
return HTTPHandler.postJson(url)
######################################
# /api/v1/topology/:id/deactivate (POST)
# Deactivates a topology.
######################################
@staticmethod
def killTopologyByName(topology_name,wait_time=0 ):
id = StormUI.getTopologyIdByName(topology_name)
if id is not None:
return StormUI.killTopology(id,wait_time)
return None
######################################
# /api/v1/topology/:id/visualization (GET)
# Get topology visualization data.
######################################
@staticmethod
def getTopologyVisualization(topologyid):
url = StormUI.baseurl() + "/api/v1/topology/" + topologyid + "/visualization"
return HTTPHandler.getJson(url)
######################################
######################################
# Get topology summary by name (GET)
# This function makes 1 StormUI API query
######################################
@staticmethod
def getTopologySummaryByName(topologyname):
response = StormUI.getTopologySummary()
topologies = response["topologies"]
for topo in topologies:
if topo["name"] == topologyname:
return topo
return {}
######################################
# Get topology detail by name (GET)
# This function makes 2 StormUI API queries
######################################
@staticmethod
def getTopologyByName(topologyname):
response = StormUI.getTopologySummary()
topologies = response["topologies"]
for topo in topologies:
if topo["name"] == topologyname:
response = StormUI.getTopology(topo["id"])
return response
raise TopologyNotInCluster
######################################
# Get topology detail by name (GET)
# This function makes 2 StormUI API queries
######################################
@staticmethod
def getTopologyIdByName(topology_name):
response = StormUI.getTopologySummary()
topologies = response["topologies"]
for topo in topologies:
if topo["name"] == topology_name:
return topo["id"]
raise TopologyNotInCluster
######################################
# Get worker by ID (GET)
# This function makes 2 StormUI API queries
######################################
## Return workers list from all spouts and all executors of the topology. Without duplicates.
@staticmethod
def getWorkersByTopologyID(topologyid):
topo = StormUI.getTopology(topologyid)
return StormUI.getWorkersByTopology(topo)
######################################
# Get worker by Name (GET)
# This function makes 3 StormUI API queries
######################################
## Return workers list from all spouts and all executors of the topology. Without duplicates.
@staticmethod
def getWorkersByTopologyName(topologyname):
topo = StormUI.getTopologyByName(topologyname)
return StormUI.getWorkersByTopology(topo)
######################################
# Get workers by Topology
#
######################################
## Return workers list from all spouts and all executors of the topology. Without duplicates.
@staticmethod
def getWorkersByTopology(topo):
spoutids = [spout["spoutId"] for spout in topo["spouts"]]
workersLinks = list()
for spoutid in spoutids:
component = StormUI.getTopologyComponent(topo["id"], spoutid)
for executor in component["executorStats"]:
workersLinks.append(executor["workerLogLink"])
return list(set(workersLinks))
######################################
# Get worker by Name (GET)
# This function makes 3 StormUI API queries
######################################
## Return workers list from all spouts and all executors of the topology. Without duplicates.
@staticmethod
def getWorkersByTopologyId(topology_id):
topo = StormUI.getTopology(topology_id)
spoutids = [spout["spoutId"] for spout in topo["spouts"]]
workersLinks = list()
for spoutid in spoutids:
component = StormUI.getTopologyComponent(topo["id"], spoutid)
for executor in component["executorStats"]:
workersLinks.append(executor["workerLogLink"])
return list(set(workersLinks))
######################################
# Get error in topology by topology Name (GET)
# This function makes 2 StormUI API queries
######################################
@staticmethod
def getErrorInTopologyByName(topologyname):
topo = StormUI.getTopologyByName(topologyname)
if topo:
# Return True if there is an error in any module of the topology and False if not
return any(module["lastError"] for module in (topo["spouts"] + topo["bolts"]))
######################################
# Get error details in topology by topology Name (GET)
# This function makes 2 StormUI API queries
######################################
@staticmethod
def getErrorDetailsInTopologyByName(topologyname):
topo = StormUI.getTopologyByName(topologyname)
return [{module["spoutId"]: module["lastError"]} for module in topo["spouts"]] + [
{module["boltId"]: module["lastError"]} for module in topo["bolts"]] if topo else None
######################################
# Get topology visualization by name (GET)
# This function makes 1 StormUI API query
######################################
@staticmethod
def getTopologyVisualizationByName(topologyname):
response = StormUI.getTopologySummary()
topologies = response["topologies"]
for topo in topologies:
if topo["name"] == topologyname:
response = StormUI.getTopologyVisualization(topo["id"])
return response
return {}
@staticmethod
def getFile(file_name,num_lines):
url = file_name+ "&tail=" + str(num_lines)
return HTTPHandler.get(url)
| [
"[email protected]"
] | |
b8b6a6d2a24f72e6e26a34f9dabbad7a00dcbd5d | f2f9c70d0e333286ff56c65217575ceeada4ddb6 | /reinforcement learning/zad.py | d109e7ea4e94314f81dc113803b231fd5dfd2bae | [] | no_license | maciejsz95/WNO-2 | 47532ebcc8095f7ad778fc5cf7dfe3bda102e52f | 24a327efc841a4d3a34efe4ce8cfba2aa1272e4a | refs/heads/master | 2020-03-18T20:25:48.524877 | 2018-05-28T22:37:42 | 2018-05-28T22:37:42 | 135,215,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,624 | py | import numpy as np
import gym
from gym import wrappers
import tensorflow as tf
import json, sys, os
from os import path
import random
from collections import deque
env_to_use = 'Pendulum-v0'
# hyperparameters
gamma = 0.99 # reward discount factor
h1_actor = 8 # hidden layer 1 size for the actor
h2_actor = 8 # hidden layer 2 size for the actor
h3_actor = 8 # hidden layer 3 size for the actor
h1_critic = 8 # hidden layer 1 size for the critic
h2_critic = 8 # hidden layer 2 size for the critic
h3_critic = 8 # hidden layer 3 size for the critic
lr_actor = 1e-3 # learning rate for the actor
lr_critic = 1e-3 # learning rate for the critic
lr_decay = 1 # learning rate decay (per episode)
l2_reg_actor = 1e-6 # L2 regularization factor for the actor
l2_reg_critic = 1e-6 # L2 regularization factor for the critic
dropout_actor = 0 # dropout rate for actor (0 = no dropout)
dropout_critic = 0 # dropout rate for critic (0 = no dropout)
num_episodes = 15000 # number of episodes
max_steps_ep = 10000 # default max number of steps per episode (unless env has a lower hardcoded limit)
tau = 1e-2 # soft target update rate
train_every = 1 # number of steps to run the policy (and collect experience) before updating network weights
replay_memory_capacity = int(1e5) # capacity of experience replay memory
minibatch_size = 1024 # size of minibatch from experience replay memory for updates
initial_noise_scale = 0.1 # scale of the exploration noise process (1.0 is the range of each action dimension)
noise_decay = 0.99 # decay rate (per episode) of the scale of the exploration noise process
exploration_mu = 0.0 # mu parameter for the exploration noise process: dXt = theta*(mu-Xt)*dt + sigma*dWt
exploration_theta = 0.15 # theta parameter for the exploration noise process: dXt = theta*(mu-Xt)*dt + sigma*dWt
exploration_sigma = 0.2 # sigma parameter for the exploration noise process: dXt = theta*(mu-Xt )*dt + sigma*dWt
# game parameters
env = gym.make(env_to_use)
state_dim = np.prod(np.array(env.observation_space.shape)) # Get total number of dimensions in state
action_dim = np.prod(np.array(env.action_space.shape)) # Assuming continuous action space
# set seeds to 0
env.seed(0)
np.random.seed(0)
# prepare monitorings
outdir = '/tmp/ddpg-agent-results'
#env = wrappers.Monitor(env, outdir, force=True)
def writefile(fname, s):
with open(path.join(outdir, fname), 'w') as fh: fh.write(s)
info = {}
info['env_id'] = env.spec.id
info['params'] = dict(
gamma=gamma,
h1_actor=h1_actor,
h2_actor=h2_actor,
h3_actor=h3_actor,
h1_critic=h1_critic,
h2_critic=h2_critic,
h3_critic=h3_critic,
lr_actor=lr_actor,
lr_critic=lr_critic,
lr_decay=lr_decay,
l2_reg_actor=l2_reg_actor,
l2_reg_critic=l2_reg_critic,
dropout_actor=dropout_actor,
dropout_critic=dropout_critic,
num_episodes=num_episodes,
max_steps_ep=max_steps_ep,
tau=tau,
train_every=train_every,
replay_memory_capacity=replay_memory_capacity,
minibatch_size=minibatch_size,
initial_noise_scale=initial_noise_scale,
noise_decay=noise_decay,
exploration_mu=exploration_mu,
exploration_theta=exploration_theta,
exploration_sigma=exploration_sigma
)
np.set_printoptions(threshold=np.nan)
replay_memory = deque(maxlen=replay_memory_capacity) # used for O(1) popleft() operation
def add_to_memory(experience):
replay_memory.append(experience)
def sample_from_memory(minibatch_size):
return random.sample(replay_memory, minibatch_size)
#####################################################################################################
## Tensorflow
tf.reset_default_graph()
# placeholders
state_ph = tf.placeholder(dtype=tf.float32, shape=[None, state_dim])
action_ph = tf.placeholder(dtype=tf.float32, shape=[None, action_dim])
reward_ph = tf.placeholder(dtype=tf.float32, shape=[None])
next_state_ph = tf.placeholder(dtype=tf.float32, shape=[None, state_dim])
is_not_terminal_ph = tf.placeholder(dtype=tf.float32, shape=[None]) # indicators (go into target computation)
is_training_ph = tf.placeholder(dtype=tf.bool, shape=()) # for dropout
# episode counter
episodes = tf.Variable(0.0, trainable=False, name='episodes')
episode_inc_op = episodes.assign_add(1)
# will use this to initialize both the actor network its slowly-changing target network with same structure
def generate_actor_network(s, trainable, reuse):
hidden = tf.layers.dense(s, h1_actor, activation=tf.nn.relu, trainable=trainable, name='dense', reuse=reuse)
hidden_drop = tf.layers.dropout(hidden, rate=dropout_actor, training=trainable & is_training_ph)
hidden_2 = tf.layers.dense(hidden_drop, h2_actor, activation=tf.nn.relu, trainable=trainable, name='dense_1',
reuse=reuse)
hidden_drop_2 = tf.layers.dropout(hidden_2, rate=dropout_actor, training=trainable & is_training_ph)
hidden_3 = tf.layers.dense(hidden_drop_2, h3_actor, activation=tf.nn.relu, trainable=trainable, name='dense_2',
reuse=reuse)
hidden_drop_3 = tf.layers.dropout(hidden_3, rate=dropout_actor, training=trainable & is_training_ph)
actions_unscaled = tf.layers.dense(hidden_drop_3, action_dim, trainable=trainable, name='dense_3', reuse=reuse)
actions = env.action_space.low + tf.nn.sigmoid(actions_unscaled) * (
env.action_space.high - env.action_space.low) # bound the actions to the valid range
return actions
# actor network
with tf.variable_scope('actor'):
# Policy's outputted action for each state_ph (for generating actions and training the critic)
actions = generate_actor_network(state_ph, trainable=True, reuse=False)
# slow target actor network
with tf.variable_scope('slow_target_actor', reuse=False):
# Slow target policy's outputted action for each next_state_ph (for training the critic)
# use stop_gradient to treat the output values as constant targets when doing backprop
slow_target_next_actions = tf.stop_gradient(generate_actor_network(next_state_ph, trainable=False, reuse=False))
# will use this to initialize both the critic network its slowly-changing target network with same structure
def generate_critic_network(s, a, trainable, reuse):
state_action = tf.concat([s, a], axis=1)
hidden = tf.layers.dense(state_action, h1_critic, activation=tf.nn.relu, trainable=trainable, name='dense',
reuse=reuse)
hidden_drop = tf.layers.dropout(hidden, rate=dropout_critic, training=trainable & is_training_ph)
hidden_2 = tf.layers.dense(hidden_drop, h2_critic, activation=tf.nn.relu, trainable=trainable, name='dense_1',
reuse=reuse)
hidden_drop_2 = tf.layers.dropout(hidden_2, rate=dropout_critic, training=trainable & is_training_ph)
hidden_3 = tf.layers.dense(hidden_drop_2, h3_critic, activation=tf.nn.relu, trainable=trainable, name='dense_2',
reuse=reuse)
hidden_drop_3 = tf.layers.dropout(hidden_3, rate=dropout_critic, training=trainable & is_training_ph)
q_values = tf.layers.dense(hidden_drop_3, 1, trainable=trainable, name='dense_3', reuse=reuse)
return q_values
with tf.variable_scope('critic') as scope:
# Critic applied to state_ph and a given action (for training critic)
q_values_of_given_actions = generate_critic_network(state_ph, action_ph, trainable=True, reuse=False)
# Critic applied to state_ph and the current policy's outputted actions for state_ph (for training actor via deterministic policy gradient)
q_values_of_suggested_actions = generate_critic_network(state_ph, actions, trainable=True, reuse=True)
# slow target critic network
with tf.variable_scope('slow_target_critic', reuse=False):
# Slow target critic applied to slow target actor's outputted actions for next_state_ph (for training critic)
slow_q_values_next = tf.stop_gradient(
generate_critic_network(next_state_ph, slow_target_next_actions, trainable=False, reuse=False))
# isolate vars for each network
actor_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='actor')
slow_target_actor_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='slow_target_actor')
critic_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='critic')
slow_target_critic_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='slow_target_critic')
# update values for slowly-changing targets towards current actor and critic
update_slow_target_ops = []
for i, slow_target_actor_var in enumerate(slow_target_actor_vars):
update_slow_target_actor_op = slow_target_actor_var.assign(tau * actor_vars[i] + (1 - tau) * slow_target_actor_var)
update_slow_target_ops.append(update_slow_target_actor_op)
for i, slow_target_var in enumerate(slow_target_critic_vars):
update_slow_target_critic_op = slow_target_var.assign(tau * critic_vars[i] + (1 - tau) * slow_target_var)
update_slow_target_ops.append(update_slow_target_critic_op)
update_slow_targets_op = tf.group(*update_slow_target_ops, name='update_slow_targets')
# One step TD targets y_i for (s,a) from experience replay
# = r_i + gamma*Q_slow(s',mu_slow(s')) if s' is not terminal
# = r_i if s' terminal
targets = tf.expand_dims(reward_ph, 1) + tf.expand_dims(is_not_terminal_ph, 1) * gamma * slow_q_values_next
# 1-step temporal difference errors
td_errors = targets - q_values_of_given_actions
# critic loss function (mean-square value error with regularization)
critic_loss = tf.reduce_mean(tf.square(td_errors))
for var in critic_vars:
if not 'bias' in var.name:
critic_loss += l2_reg_critic * 0.5 * tf.nn.l2_loss(var)
# critic optimizer
critic_train_op = tf.train.AdamOptimizer(lr_critic * lr_decay ** episodes).minimize(critic_loss)
# actor loss function (mean Q-values under current policy with regularization)
actor_loss = -1 * tf.reduce_mean(q_values_of_suggested_actions)
for var in actor_vars:
if not 'bias' in var.name:
actor_loss += l2_reg_actor * 0.5 * tf.nn.l2_loss(var)
# actor optimizer
# the gradient of the mean Q-values wrt actor params is the deterministic policy gradient (keeping critic params fixed)
actor_train_op = tf.train.AdamOptimizer(lr_actor * lr_decay ** episodes).minimize(actor_loss, var_list=actor_vars)
# initialize session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
#####################################################################################################
## Training
total_steps = 0
for ep in range(num_episodes):
total_reward = 0
steps_in_ep = 0
# Initialize exploration noise process
noise_process = np.zeros(action_dim)
noise_scale = (initial_noise_scale * noise_decay ** ep) * (env.action_space.high - env.action_space.low)
# Initial state
observation = env.reset()
#if ep % 10 == 0: env.render()
if ep % 5 == 0:
env.render()
for t in range(max_steps_ep):
# choose action based on deterministic policy
action_for_state, = sess.run(actions,
feed_dict={state_ph: observation[None], is_training_ph: False})
# add temporally-correlated exploration noise to action (using an Ornstein-Uhlenbeck process)
# print(action_for_state)
noise_process = exploration_theta * (exploration_mu - noise_process) + exploration_sigma * np.random.randn(
action_dim)
# print(noise_scale*noise_process)
action_for_state += noise_scale * noise_process
# take step
next_observation, reward, done, _info = env.step(action_for_state)
#if ep % 40 == 0: env.render()
if ep % 5 == 0: env.render()
total_reward += reward
add_to_memory((observation, action_for_state, reward, next_observation,
# is next_observation a terminal state?
# 0.0 if done and not env.env._past_limit() else 1.0))
0.0 if done else 1.0))
# update network weights to fit a minibatch of experience
if total_steps % train_every == 0 and len(replay_memory) >= minibatch_size:
# grab N (s,a,r,s') tuples from replay memory
minibatch = sample_from_memory(minibatch_size)
# update the critic and actor params using mean-square value error and deterministic policy gradient, respectively
_, _ = sess.run([critic_train_op, actor_train_op],
feed_dict={
state_ph: np.asarray([elem[0] for elem in minibatch]),
action_ph: np.asarray([elem[1] for elem in minibatch]),
reward_ph: np.asarray([elem[2] for elem in minibatch]),
next_state_ph: np.asarray([elem[3] for elem in minibatch]),
is_not_terminal_ph: np.asarray([elem[4] for elem in minibatch]),
is_training_ph: True})
# update slow actor and critic targets towards current actor and critic
_ = sess.run(update_slow_targets_op)
observation = next_observation
total_steps += 1
steps_in_ep += 1
if done:
# Increment episode counter
_ = sess.run(episode_inc_op)
break
#print('Episode %2i, Reward: %7.3f, Steps: %i, Final noise scale: %7.3f' % (
# ep, total_reward, steps_in_ep, noise_scale))
print('Episode %2i' % (ep))
# Finalize and upload results
writefile('info.json', json.dumps(info))
env.close() | [
"[email protected]"
] | |
e425346780022da8a5f60b47f83874eabb99ecd7 | e3900b74d7ac56ed215f35612b61b1b465fdae1f | /deepzen/api/base/layer/norm/dropout.py | 0fefc6c4b541fe1c2e97bc8b65d23d913ddb92f6 | [] | no_license | knighton/deepzen | ae3bf9405344a917e4be480ac9d2887a7397ce5b | e60e8441d05789979e87ef87c8d2844c7d70ba72 | refs/heads/master | 2020-03-10T17:49:19.224002 | 2018-08-16T07:08:25 | 2018-08-16T07:08:25 | 126,268,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,693 | py | class BaseDropoutAPI(object):
def _dropout_mask_shape(self, x_shape, keep_spatial_axis=None):
if keep_spatial_axis is None:
keep_spatial_axes = []
elif isinstance(keep_spatial_axis, int):
keep_spatial_axes = [keep_spatial_axis]
elif isinstance(keep_spatial_axis, (list, tuple)):
keep_spatial_axes = keep_spatial_axis
else:
assert False
mask_shape = [1] * len(x_shape)
mask_shape[0] = x_shape[0]
for axis in keep_spatial_axes:
mask_shape[1 + axis] = x_shape[1 + axis]
return tuple(mask_shape)
def dropout(self, x, is_training, rate=0.5, keep_spatial_axis=None,
xsnd=None):
if not is_training:
return x
if xsnd is not None:
assert self.spatial_ndim(x) == xsnd
mask_shape = self._dropout_mask_shape(self.shape(x), keep_spatial_axis)
mask = self.random_binomial(
mask_shape, 1 - rate, self.dtype(x), self.device(x))
mask = self.constant(mask)
return x * mask / (1 - rate)
def dropout0d(self, x, is_training, rate=0.5, keep_spatial_axis=None):
return self.dropout(x, is_training, rate, keep_spatial_axis, 0)
def dropout1d(self, x, is_training, rate=0.5, keep_spatial_axis=None):
return self.dropout(x, is_training, rate, keep_spatial_axis, 1)
def dropout2d(self, x, is_training, rate=0.5, keep_spatial_axis=None):
return self.dropout(x, is_training, rate, keep_spatial_axis, 2)
def dropout3d(self, x, is_training, rate=0.5, keep_spatial_axis=None):
return self.dropout(x, is_training, rate, keep_spatial_axis, 3)
| [
"[email protected]"
] | |
716e3562ae637f4edc50528a02bd8174c192ab40 | 7d71acdf6e8954452ce0910c229ce866ad373efd | /Web_Server/led.py | 35e6d34308f839c21094705938aef6baa8f54978 | [] | no_license | cverbitzki/SeniorProject | 1a432074634b2695dec4c366853dca6a874fa774 | b6896ad9798d25e56267abe36152e82850f4f3d0 | refs/heads/master | 2020-12-24T07:45:17.606666 | 2017-03-06T21:58:13 | 2017-03-06T21:58:13 | 58,071,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,148 | py | #!/usr/python
import RPi.GPIO as GPIO
import time
import sqlite3
# blinking function
def blink(pin):
GPIO.output(pin,GPIO.HIGH)
time.sleep(1)
GPIO.output(pin,GPIO.LOW)
time.sleep(1)
return
GPIO.setwarnings(False)
# to use Raspberry Pi board pin numbers
GPIO.setmode(GPIO.BOARD)
# set up GPIO output channel
GPIO.setup(11, GPIO.OUT)
# blink GPIO17 50 times
GPIO.setup(13, GPIO.OUT)
while True:
file = open('state','r')
text = file.readline()
if text == 'A':
GPIO.output(11,True)
GPIO.output(13,False)
elif text == 'D':
GPIO.output(11,False)
GPIO.output(13,True)
file.close()
'''
while True:
db = sqlite3.connect('activity.db')
cursor = db.execute(" select status from activity ORDER BY rowid DESC LIMIT 1")
for row in cursor:
if row[0] == "locked":
GPIO.output(11,True)
GPIO.output(13,False)
elif row[0] == "unlocked":
GPIO.output(11,False)
GPIO.output(13,True)
db.close()
time.sleep(2)
'''
'''
for i in range(0,1000):
blink(11)
blink(13)
'''
GPIO.cleanup()
| [
"[email protected]"
] | |
c022c602e2a4697d0a16386b80833b7a667fed92 | 2969ee2b6c579348870c346eb3edcaec7eac4fd3 | /mcnets/tempering.py | c8bcf852aa6b740a6537c02e4080196176aa012a | [] | no_license | stuhlmueller/mcnets | 500143476509af1dfc137ba278189480648983d4 | f564db53c7d85dce1fb365e8e704bd261e3ec5f1 | refs/heads/master | 2020-04-04T13:35:17.394491 | 2014-02-06T01:55:24 | 2014-02-06T01:55:24 | 15,242,129 | 1 | 0 | null | 2014-02-06T01:55:26 | 2013-12-17T01:34:19 | Python | UTF-8 | Python | false | false | 14,131 | py | ## Copyright 2013 Google Inc. All Rights Reserved.
##
## Licensed under the Apache License, Version 2.0 (the )
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an AS IS BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
"""Tempered Markov chains, with fixed and adaptively set temperatures.
TODO(stuhlmueller): Refactor TemperedChain and NetworkChain to share
more code (e.g., by building a ReplicaStatsMixin class).
"""
from __future__ import division
import numpy as np
from scipy import interpolate
from mcnets import mcmc
from mcnets import utils
class TemperedChain(mcmc.MetropolisHastingsChain):
"""A compound chain that interleaves elementary transitions with swaps."""
def __init__(self, chains, nsteps_per_sweep, nswaps_per_sweep, rng=None):
"""Initializes tempered chain based on component chains.
Args:
chains: list of component chains, in increasing order of temperature
nsteps_per_sweep: the number of transitions to apply to each
component chain before we propose to swap neighboring pairs of
chains.
nswaps_per_sweep: the number of swaps to execute each time the
network chain transition method is called.
rng: a random stream (utils.RandomState).
"""
assert len(chains) > 1, len(chains)
self.CheckTemperatureOrder(chains)
self.chains = chains
self.nsteps_per_sweep = nsteps_per_sweep
self.nswaps_per_sweep = nswaps_per_sweep
super(TemperedChain, self).__init__(rng=rng)
def InitializeState(self):
"""The chain doesn't keep state beyond the state of its component chains."""
for chain in self.chains:
chain.InitializeState()
def InitializeStatistics(self):
"""Keeps track of statistics for each rank in the temperature ladder.
Statistics:
transitions: the number of transitions; each transition consists
of len(self.chains)*nsteps_per_sweep base moves and
len(self.chains)-1 swap moves.
swaps_proposed: map chain index to number of swap proposals with
neighbor at next-higher temperature
swaps_accepted: map chain index to number of accepted swaps with
neighbor at next-higher temperature
"""
n = len(self.chains)
self.statistics = utils.SimpleNamespace(
transitions=0,
swaps_proposed=np.zeros(n-1),
swaps_accepted=np.zeros(n-1)
)
for chain in self.chains:
chain.InitializeStatistics()
def CheckTemperatureOrder(self, chains):
"""Verifies that chains are given in ascending order of temperatures."""
for chain1, chain2 in utils.Pairwise(chains):
assert hasattr(chain1, "temp")
assert hasattr(chain2, "temp")
assert chain1.temp < chain2.temp, (chain1.temp, chain2.temp)
def SwapTransition(self, chain1, chain2, level):
"""Proposes to swap the states of two chains, accepts according to MH rule.
Args:
chain1: first chain to take part in swap
chain2: second chain to take part in swap
level: index of the lower-temperature chain
Returns:
swap_accepted: a Boolean indicating whether the swap was accepted
"""
assert chain1 != chain2
self.statistics.swaps_proposed[level] += 1
logp_old = (chain1.LogProbability(chain1.state) +
chain2.LogProbability(chain2.state))
logp_new = (chain1.LogProbability(chain2.state) +
chain2.LogProbability(chain1.state))
log_acceptance_ratio = logp_new - logp_old
swap_accepted = (log_acceptance_ratio >= utils.LOG_PROB_1 or
self.rng.log_flip(log_acceptance_ratio))
if swap_accepted:
self.statistics.swaps_accepted[level] += 1
chain1.state, chain2.state = chain2.state, chain1.state
return swap_accepted
def BaseTransition(self):
"""Applies transition operator to component chains."""
for chain in self.chains:
chain.TransitionN(self.nsteps_per_sweep)
def Transition(self):
"""Executes nsteps_per_sweep local transitions for each chain, then swaps.
Swap strategy: Repeat nswaps_per_sweep times: pick an edge in the
temperature ladder at random, propose to swap the chains connected
by this edge.
"""
self.BaseTransition()
for _ in xrange(self.nswaps_per_sweep):
level = self.rng.randint(len(self.chains)-1)
self.SwapTransition(self.chains[level], self.chains[level+1], level)
self.statistics.transitions += 1
@property
def state(self):
"""Only the state of the base chain is visible externally."""
return self.chains[0].state
@property
def temps(self):
"""Returns the list of temperatures of the component chains (ascending)."""
return [chain.temp for chain in self.chains]
def ComputeAdaptedTemperatures(temps, hist, epsilon=.001):
"""Given statistics about replica behavior, computes new temperatures.
This function is based on the presentation of the Feedback-Optimized
Parallel Tempering algorithm in [1]. The algorithm requires that the
histogram describing the fraction of replicas flowing upwards (for
each temperature) is monotonically decreasing in temperature. To
ensure this, we mix in epsilon of a linearly decreasing histogram.
[1] Robust Parameter Selection for Parallel Tempering
Firas Hamze, Neil Dickson, Kamran Karimi (2010)
http://arxiv.org/abs/1004.2840
Args:
temps: a list of temperatures (positive floating-point
numbers, strictly increasing)
hist: for each temperature, the fraction of replicas that are
on the way up (decreasing, in [0, 1])
epsilon: a number in [0, 1] that determines how strongly we mix a
linearly decreasing function into hist (to ensure that it
is strictly monotonically decreasing, and hence invertible)
Returns:
new_temps: an adapted list of temperatures
"""
n = len(temps)
assert n > 1
assert len(hist) == n, (len(hist), n)
for temp1, temp2 in utils.Pairwise(temps):
assert temp1 > 0, temp1
assert temp1 < temp2, (temp1, temp2)
for p1, p2 in utils.Pairwise(hist):
assert 0 <= p1 <= 1, p1
assert p1 >= p2, (p1, p2)
# Create a linearly spaced list of numbers in [0, 1] with n elements.
# For example, for n=5, linear_hist = [1.0, 0.75, 0.5, 0.25, 0.0].
linear_hist = [x/(n-1) for x in reversed(range(n))]
stricly_monotonic_hist = utils.Mix(hist, linear_hist, epsilon)
fraction_to_temp = interpolate.interp1d(
list(reversed(stricly_monotonic_hist)),
list(reversed(temps)))
new_temps = [temps[0]]
for i in reversed(range(2, n)):
new_temps.append(float(fraction_to_temp((i-1)/(n-1))))
new_temps.append(temps[-1])
return new_temps
class AdaptiveTemperedChain(TemperedChain):
"""Adaptive tempering for Markov Chains based on [1] and [2].
We want to maximize the rate of round trips that each replica
(state) performs between the two extremal temperatures. We collect
for each temperature statistics about how many replicas we have
observed that have visited the lowest temperature most recently
(replicas that are "on the way up"), and how many have visited the
highest temperature more recently ("on the way down").
These statistics allow us to calculate for each temperature the
fraction of replicas which have visited one of the extremal
temperatures most recently. Ideally, this fraction decreases
linearly over the range of temperatures. Therefore, adaptation
rearranges the temperatures based on the observed fractions such
that we would have observed a linear decrease if we had observed the
actual fractions together with the rearranged temperatures.
To ensure that the empirical histogram of fractions is strictly
monotonically decreasing as temperatures grow (a condition necessary
for the algorithm), we mix in epsilon of a list of linearly
decreasing fractions.
[1] Feedback-optimized parallel tempering Monte Carlo
Helmut G. Katzgraber, Simon Trebst, David A. Huse, Matthias Troyer (2006)
http://arxiv.org/abs/cond-mat/0602085
[2] Robust Parameter Selection for Parallel Tempering
Firas Hamze, Neil Dickson, Kamran Karimi (2010)
http://arxiv.org/abs/1004.2840
"""
def __init__(self, chains, nsteps_per_sweep, nswaps_per_sweep,
burn_roundtrips=0, rng=None):
"""Initializes tempered chain based on component chains.
Args:
chains: list of component chains, in increasing order of temperature
nsteps_per_sweep: the number of transitions to apply to each
component chain before we propose to swap neighboring pairs of
chains.
nswaps_per_sweep: the number of swaps to execute each time the
network chain transition method is called.
burn_roundtrips: the number of replica roundtrips required
before we start updating statistics about replica direction
averages
rng: a random stream (utils.RandomState)
"""
super(AdaptiveTemperedChain, self).__init__(
chains, nsteps_per_sweep, nswaps_per_sweep, rng=rng)
self.burn_roundtrips = burn_roundtrips
def InitializeStatistics(self):
"""Initializes acceptance and replica behavior acceptance statisics.
Statistics:
transitions: the number of transitions; each transition consists
of len(self.chains)*self.nsteps_per_sweep base moves and
len(self.chains)-1 swap moves.
swaps_proposed: maps chain index to number of swap proposals
with next-higher neighbor
swaps_accepted: maps chain index to number of accepted swaps with
next-higher neighbor
replica: a record with statistics that describe the behavior of
replica, i.e., the movement of states across different
temperatures (see InitializeReplicaStatistics).
"""
n = len(self.chains)
self.statistics = utils.SimpleNamespace(
transitions=0,
swaps_proposed=np.zeros(n-1),
swaps_accepted=np.zeros(n-1),
replica=None)
self.InitializeReplicaStatistics()
for chain in self.chains:
chain.InitializeStatistics()
def InitializeReplicaStatistics(self):
"""Initializes replica behavior statistics.
Statistics:
ids: maps chain index to id of replica currently located at
chain
directions: maps replica id to {-1, 0, 1}, depending on whether
replica last visited bottom-most chain (1, "upwards"), top
chain (-1, "downwards"), or neither (0).
tracker: maps chain index to a list that stores for every
replica the direction it has been observed moving in most
recently by chain
hist: for each chain, cumulative moving average of fraction of
replica observed moving upwards
hist_n: for each chain, the number of observations that made it
into the moving average
roundtrips: the total number of replica that made it all the way
from the top to the bottom
"""
n = len(self.chains)
replica_stats = utils.SimpleNamespace(
ids=range(n),
directions=np.zeros(n),
tracker=[[0]*n for _ in range(n)],
hist=np.zeros(n),
hist_n=np.zeros(n),
roundtrips=0)
replica_stats.directions[0] = 1
replica_stats.directions[-1] = -1
replica_stats.hist[0] = 1
self.statistics.replica = replica_stats
def ResetReplicaStatistics(self):
"""Resets replica statistics to initial state."""
self.InitializeReplicaStatistics()
def UpdateReplicaStatistics(self, swap_accepted, level):
"""Updates information about replica behavior after a swap.
Args:
swap_accepted: a Boolean indicating whether the swap was accepted.
level: the index of the swap chain with lower temperature
"""
if not swap_accepted:
return
r = self.statistics.replica
n = len(self.chains)
# Swap the replica ids.
r.ids[level], r.ids[level+1] = r.ids[level+1], r.ids[level]
# Update roundtrips and directions of the replicas at top/bottom chains.
if r.directions[r.ids[0]] == -1:
r.roundtrips += 1
r.directions[r.ids[0]] = 1
r.directions[r.ids[-1]] = -1
# Update tracker: store direction of new replicas at level, level+1.
for k in (level, level+1):
r.tracker[k][r.ids[k]] = r.directions[r.ids[k]]
# Update moving average of replica directions (fraction moving up).
for i in range(n):
if self.statistics.replica.roundtrips > self.burn_roundtrips:
n_up = r.tracker[i].count(1)
n_down = r.tracker[i].count(-1)
assert 0 < n_up + n_down <= n
p = n_up / (n_up + n_down)
r.hist[i] += (p - r.hist[i]) / (r.hist_n[i] + 1)
r.hist_n[i] += 1
def TransitionsPerRoundtrip(self, stats=None):
"""Returns number of transitions necessary for a single roundtrip.
A single transition encompasses len(self.chains) *
self.nsteps_per_sweep base steps and self.nswaps_per_sweep swaps.
Args:
stats: statistics of a NetworkChain.
Returns:
score: average number of transitions per roundtrip.
"""
stats = stats if stats else self.statistics
if not stats.replica.roundtrips:
return float("inf")
return stats.transitions / stats.replica.roundtrips
def SwapTransition(self, chain1, chain2, level):
"""Proposes to swap states of two chains, accept according to MH rule."""
swap_accepted = super(AdaptiveTemperedChain, self).SwapTransition(
chain1, chain2, level)
self.UpdateReplicaStatistics(swap_accepted, level)
def AdaptTemperatures(self):
"""Computes and sets new temperatures based on replica statistics."""
new_temps = ComputeAdaptedTemperatures(
self.temps, self.statistics.replica.hist)
for chain, new_temp in zip(self.chains, new_temps):
chain.temp = new_temp
| [
"[email protected]"
] | |
13e837f8a0c0ff0d32762df56f54b0d1fdea8bb9 | 3539f98e7b54e58622cabb745117a861817c1e1c | /main.py | d7192d5b91d0bdbff5de41c5cfbfb06bf2599d85 | [] | no_license | youfeng243/mongo-dump | 285e03349f261c56e9755d103ea0e10f81c23676 | 966d5933eed0f78d60ecd95d103686df17c92e08 | refs/heads/master | 2020-12-02T19:16:46.047885 | 2018-02-07T08:25:14 | 2018-02-07T08:25:14 | 96,317,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,491 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: youfeng
@email: [email protected]
@license: Apache Licence
@file: main.py
@time: 2017/7/1 12:27
"""
import json
import os
import subprocess
import sys
import time
import tools
from config import app_data_config, sleep_time, check_period, data_sync_config, dump_table_flag, dump_path, \
dump_status_file_name, dump_tmp_path
from logger import Logger
from mongo import MongDb
log = Logger("mongo-dump.log").get_logger()
app_data = MongDb(app_data_config['host'], app_data_config['port'], app_data_config['db'],
app_data_config['username'],
app_data_config['password'], log=log)
data_sync = MongDb(data_sync_config['host'], data_sync_config['port'], data_sync_config['db'],
data_sync_config['username'],
data_sync_config['password'], log=log)
TABLE_CONFIG = "table.config"
# 获得所有的表信息
def get_all_table_name():
table_set = set()
with open(TABLE_CONFIG) as p_file:
for line in p_file:
table_name = line.strip().strip("\r").strip("\n")
table_set.add(table_name)
return list(table_set)
# 运行命令
def run_cmd(cmd):
log.info(cmd)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
line = p.stdout.readline()
log.info(line)
if line:
sys.stdout.flush()
else:
break
p.wait()
# 删除所有任务
def remove_all_task():
for table_name in data_sync.collection_names():
if dump_table_flag in table_name:
data_sync.drop(table_name)
log.info("删除所有表成功, 开始休眠10s")
exit()
# 记录导出文件状态
def record_status_file(date, dump_table_list):
full_path = dump_path + date + "/"
status_file_path = full_path + dump_status_file_name
# 已经存在表的信息
exists_table_set = set()
# 读取状态文件信息
if os.path.exists(status_file_path):
with open(status_file_path) as p_file:
for line in p_file:
file_name = line.strip().strip("\r").strip("\n")
table_name = file_name.split(".")[0]
exists_table_set.add(table_name)
# 如果文件以及存在 则不再写入
# if os.path.exists(status_file_path):
# return
if len(exists_table_set) <= 0:
# 确保批次文件夹是否已经存在
run_cmd("mkdir -p {path}".format(path=dump_path + date))
is_all_in_file = True
for table_name in dump_table_list:
if table_name not in exists_table_set:
is_all_in_file = False
break
# 开始记录状态信息
if not is_all_in_file:
with open(status_file_path, mode="w") as p_file:
for name in dump_table_list:
p_file.write(name + ".zip" + "\r\n")
# 分解任务
def split_dump_task():
log.info("分解任务开始...")
start_time = time.time()
table_list = get_all_table_name()
dump_table_list = list()
for app_data_table in table_list:
dump_table_list.append(app_data_table)
# 生成任务信息
for period in xrange(1, check_period + 1):
# 获得日期信息
_id = tools.get_one_day(period)
# 写入列表信息
record_status_file(_id, dump_table_list)
for app_data_table in table_list:
dump_table_name = dump_table_flag + app_data_table
search_item = data_sync.find_one(dump_table_name, {"_id": _id})
if search_item is None:
task_item = {
"_id": _id,
"finish": False,
"createTime": tools.get_now_time(),
"updateTime": tools.get_now_time(),
"startTime": tools.get_start_time(_id),
"endTime": tools.get_end_time(_id)
}
data_sync.insert(dump_table_name, task_item)
continue
log.info("分解任务执行完成..")
end_time = time.time()
log.info('分解任务消耗时间: {t}s'.format(t=end_time - start_time))
# 当前单进程执行导出任务
def execute_dump_task():
log.info("导出任务开始...")
start_exec_time = time.time()
# 获得全部表信息
table_list = get_all_table_name()
# 根据日期开始导出数据
for period in xrange(check_period, 0, -1):
# 获得日期信息
_id = tools.get_one_day(period)
# 遍历表导出
for app_data_table in table_list:
dump_table_name = dump_table_flag + app_data_table
task_item = data_sync.find_one(dump_table_name, {'_id': _id})
if task_item is None:
continue
if task_item['finish']:
continue
# 获得未完成的任务列表信息
# for task_item in data_sync.traverse(dump_table_name, {'finish': False}):
start_time = task_item["startTime"]
end_time = task_item["endTime"]
date = task_item["_id"]
# 导出临时路径
dump_date_tmp_path = dump_tmp_path + date + "/"
run_cmd("mkdir -p {path}".format(path=dump_date_tmp_path))
# 删除已经存在的数据
run_cmd("rm -rf {source}{table}.json".format(
source=dump_date_tmp_path,
table=app_data_table))
# 删除存在的压缩包
run_cmd("rm -rf {source}{table}.zip".format(
source=dump_date_tmp_path,
table=app_data_table))
cmd = "./mongoexport -h " + app_data_config["host"] + ":" + str(app_data_config["port"]) + " -d " + \
app_data_config[
"db"] + " -c " + app_data_table + " -u " + app_data_config["username"] + " -p " + app_data_config[
"password"] + " -o " + dump_date_tmp_path + app_data_table + ".json" + " -q "
cmd += "'" + json.dumps(
{"$and": [{"_utime": {"$gte": start_time}}, {"_utime": {"$lte": end_time}}]}) + "'"
# 开始执行导出任务
run_cmd(cmd)
# 移动文件
target_path = dump_path + date + "/"
run_cmd("mkdir -p {path}".format(path=target_path))
# 压缩数据
run_cmd("zip {source}{table}.zip {source}{table}.json".format(
source=dump_date_tmp_path,
table=app_data_table))
# 移动数据
run_cmd("mv -f {source}{table}.zip {target_path}".format(
source=dump_date_tmp_path,
table=app_data_table,
target_path=target_path))
# 删除文件
run_cmd("rm -rf {source}{table}.json".format(
source=dump_date_tmp_path,
table=app_data_table))
# 删除存在的压缩包
run_cmd("rm -rf {source}{table}.zip".format(
source=dump_date_tmp_path,
table=app_data_table))
task_item["finish"] = True
task_item["updateTime"] = tools.get_now_time()
# 记录状态
data_sync.insert_batch_data(dump_table_name, [task_item])
log.info("导出任务执行完成..")
end_exec_time = time.time()
log.info('导出任务消耗时间: {t}s'.format(t=end_exec_time - start_exec_time))
# 创建索引
def ensure_index():
table_list = get_all_table_name()
for app_data_table in table_list:
dump_table_name = dump_table_flag + app_data_table
data_sync.create_index(dump_table_name, [('finish', MongDb.ASCENDING)])
def main():
# remove_all_task()
while True:
try:
start_time = time.time()
log.info("开始执行dump任务..")
# 分解任务
split_dump_task()
# 创建索引
ensure_index()
# 执行导出任务
execute_dump_task()
log.info("dump任务执行完成..")
end_time = time.time()
log.info('dump任务消耗时间: {t}s'.format(t=end_time - start_time))
# 休眠时间
time.sleep(sleep_time)
except Exception as e:
log.exception(e)
# 休眠时间
time.sleep(sleep_time)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
01bc570da861c7c1e88b64e8ad0b6c96647329ec | 5d3c7539ba982e58d8cf249fb1f55cf4e248c730 | /Create Maximum Number.py | 6e3493a83a1bcf3c614abb3a73cdf337e80fe2d4 | [] | no_license | jinxin0924/LeetCode | b95bc4321d0a0589d66c18fa5bb17e5d38b2f436 | bdf5518fb387dfa599964425354115dfad49eb00 | refs/heads/master | 2020-04-05T14:04:59.839117 | 2016-09-19T09:44:27 | 2016-09-19T09:44:27 | 42,444,294 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,879 | py | __author__ = 'Xing'
# Given two arrays of length m and n with digits 0-9 representing two numbers.
# Create the maximum number of length k <= m + n from digits of the two.
# The relative order of the digits from the same array must be preserved.
# Return an array of the k digits. You should try to optimize your time and space complexity.
# Example 1:
# nums1 = [3, 4, 6, 5]
# nums2 = [9, 1, 2, 5, 8, 3]
# k = 5
# return [9, 8, 6, 5, 3]
#
# Example 2:
# nums1 = [6, 7]
# nums2 = [6, 0, 4]
# k = 5
# return [6, 7, 6, 0, 4]
class Solution(object):
def maxNumber(self, nums1, nums2, k):
"""
:type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[int]
"""
def maxList(nums,k):
drop = len(nums) - k
out = []
for num in nums:
while drop and out and out[-1] < num:
out.pop()
drop -= 1
out.append(num)
return out[:k]
def merge(nums1,nums2):
return [max(nums1,nums2).pop(0) for _ in nums1+nums2]
# return [max(a, b).pop(0) for _ in a+b]
return max(merge(maxList(nums1, i), maxList(nums2, k-i))
for i in range(k+1)
if i <= len(nums1) and k-i <= len(nums2))
s=Solution()
print(s.maxNumber([3, 4, 6, 5],[9, 1, 2, 5, 8, 3],5))
# print(s.maxNumber([6,7],[6,0,4],5))
# print(s.maxNumber([3,9],[8,9],3))
# print(s.maxNumber([6,6,8],[5,0,9],3))
# print(s.maxNumber([3,4,6,5],[9,1,2,5,8,3],5))
# print(s.maxList([6,6,8],2))
# print(s.maxList([9,1,2,5,8,3],3))
def test():
import random
n=random.randint(0,5)
m=random.randint(0,5)
k=random.randint(0,n+m)
nums1=[random.randint(0,9) for i in range(n)]
nums2=[random.randint(0,9) for i in range(m)]
print(nums1,nums2,k,s.maxNumber(nums1,nums2,k))
| [
"[email protected]"
] | |
59f12fb2cc85e4f4266e067f7685062e16ceef87 | 8175f5bcd468236d4739bf4c5e3b6d8e26a65b2e | /Tarea1 - Ensamblador_Maquina/Expresiones regulares/venv/Scripts/pip3-script.py | 1bc6ae06628cedaf9ff542dfa649f74abc2a8450 | [] | no_license | FernandoFigueroa14/Multiprocesadores | 5678b7450f64822df2b513de0add553abc9463dc | 4210eca16aa86aee755d239aadf499e3e8e4c09d | refs/heads/main | 2023-07-10T01:51:17.223370 | 2021-08-15T17:41:29 | 2021-08-15T17:41:29 | 395,199,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | #!"D:\Lenguajes y traductores\Expresiones regulares\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
] | |
719032921ebe700cca37282e531011fdbb1c0f6b | c6dcecd43edf7ee1488be38779a59b41781b8371 | /06. Index Numbers/index.py | 36af556092a1b430f841ff225aa9542e4f5b6bcb | [
"MIT"
] | permissive | joshtsengsh/Learn-Coding-with-Python | c56ead768cc9fff38b33e992b6d6368c9ab821d0 | 9280c41f919f60c33a5d7d0a8bc454877692feec | refs/heads/master | 2022-09-11T03:20:46.364526 | 2020-06-04T14:25:47 | 2020-06-04T14:25:47 | 260,665,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | #created by Josh Tseng, 1 May 2020
#This file introduces the concept of index numbers, which are important when handling certain types of data in Python
#In this example, we would be looking at the indexes in strings
sample_string = "This is my string."
#Show the value in the string at index position 0
print(sample_string[0])
#Exercise:
#Change the number in the bracket to: -1, 2, 9, and 34.
#What changes?
| [
"[email protected]"
] | |
c409eb09079f579e2f14e64adf5125cb958596e3 | 4fc7805a55135224c068e9e2ff67983d3a626569 | /Week 6 [JSON,SOA]/json1.py | a826f30dc0bdf0c1e5468b1c91dc8aabfe8a589f | [] | no_license | Pryangan/Using-Python-to-Access-Web-Data | 0da06571fc067c9aaaf25db9d7fb3a190ee1f0d7 | 561ad34bf00207b41c71a475da55ba866ac12874 | refs/heads/master | 2020-04-15T05:22:36.136422 | 2019-01-07T10:59:41 | 2019-01-07T10:59:41 | 164,419,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | import json
data = '''{
"name" : "Pryangan",
"phone" : {
"type" : "intl",
"number" : "9876543210"
},
"email" : {
"hide" : "yes"
}
}'''
info = json.loads(data)
print('Name:',info["name"])
print('Hide:',info['email']['hide'])
| [
"[email protected]"
] | |
58a5ac993e18950c414a11b2bd8fdc06e517b292 | bbc3ba50910f57d00c54d9c16103853dedc9c78a | /src/CreateInsByHooks.py | 1e28875c8a049f5a9e06493c9af22b8021a78da2 | [
"Apache-2.0"
] | permissive | wjtlt3/pyloader | 4bcc98a028e17ba6a54b4033c915463e8374c9bf | 2be0919ab80dbb38b1fdd7969c31647be20e8fa9 | refs/heads/master | 2021-10-08T20:09:17.191412 | 2018-12-17T08:23:56 | 2018-12-17T08:23:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | '''
Created on 2018年10月30日
@author: yinyayun
'''
from loader.hook.DynamicLoadByHooks import DynamicLoadByHooks
if __name__ == '__main__':
base = '../package/demo'
dynamic = DynamicLoadByHooks(base)
print("-----create 001.ImportDemo")
ins1 = dynamic.create_ins('001', 'ImportDemo', 'ImportDemo')
print(ins1.zz())
print("-----create 002.ImportDemo")
ins2 = dynamic.create_ins('002', 'ImportDemo', 'ImportDemo')
print(ins2.zz())
print('run ins1:', ins1.zz())
print('run ins2:', ins2.zz())
| [
"[email protected]"
] | |
3e2a108c6f256bcf94bf27c819c5affb28ecfab2 | 1e09bc56bf2904b349df1873e11da3d527437880 | /lecture-08/stringfn.py | ef0277ea4f2fa1625465ed088e5402016831126e | [] | no_license | codeforcauseorg-archive/DSA-Live-Python-Jun-0621 | f3444f5671cb4985644c7432517477c3585c70fb | e4fe544178d7851c24755242390f39675b99fabe | refs/heads/main | 2023-08-09T08:31:41.449120 | 2021-09-14T16:44:39 | 2021-09-14T16:44:39 | 384,725,085 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | line = "Hello {}, Its {} to meet you."
# print(line.index("o"))
# print(line.format("Amit", "good"))
out = "we are so happy. and then again.".capitalize()
print(out)
# line.endswith("yu.")
| [
"[email protected]"
] | |
a1d938d10d362c370e273a1a5d6abc819fdfd4f6 | 0089c10e2fde9538415623ec2f6b6c213d4b81db | /ex_2_spyder.py | c4356d9992de1ff4f2e547d38f6da10cbf259389 | [] | no_license | isuru-m/RL_labs | c7546b4e5b430cc7899069b53c5670f064a56728 | b2cf1344b9d7388cfe644190c6e0655ef92cc2e0 | refs/heads/master | 2020-04-25T04:02:07.227518 | 2019-03-24T02:01:00 | 2019-03-24T02:01:00 | 172,497,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,968 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 1 06:02:53 2019
@author: Isuru
"""
import numpy as np
class Gridworld:
def __init__(self):
self.num_rows = 5
self.num_cols = 5
self.num_fields = self.num_cols * self.num_rows
self.gold_reward = 10
self.bomb_reward = -10
self.gold_positions = np.array([23])
self.bomb_positions = np.array([18])
self.terminal_states = np.concatenate((self.gold_positions, self.bomb_positions))
self.V = np.zeros(shape=self.num_fields)
self.rewards = np.ones(shape=self.num_fields) * (-1)
self.rewards[self.bomb_positions] = self.rewards[self.bomb_positions] + self.bomb_reward
self.rewards[self.gold_positions] = self.rewards[self.gold_positions] + self.gold_reward
def GetAvailableActions(self,s):
''' This method outputs the available actions for a given state'''
available_actions = np.full((1,4), False, dtype=bool)
self.s_n = s + self.num_cols
self.s_e = s + 1
self.s_s = s - self.num_cols
self.s_w = s - 1
if self.s_n < self.num_fields:
available_actions[0,0] = True
if self.s_e % self.num_cols > 0:
available_actions[0,1] = True
if self.s_s >= 0:
available_actions[0,2] = True
if self.s_w % self.num_cols < self.num_cols - 1:
available_actions[0,3] = True
return available_actions
def OneStepLookAhead(self, s, possible_actions, gamma=1, e=0.2):
''' This method calculates the value of neighbouring states for a given state
putting e=0 (deterministic) yeilds the same result as excercise 1
'''
if not possible_actions[0,0]:
self.s_n = s
if not possible_actions[0,1]:
self.s_e = s
if not possible_actions[0,2]:
self.s_s = s
if not possible_actions[0,3]:
self.s_w = s
NESW_Cells = np.zeros(4)
all_actions = np.array([self.s_n, self.s_e, self.s_s, self.s_w])
for i in range(len(all_actions)):
NESW_Cells[i] = self.rewards[all_actions[i]] + gamma*((1-e)*self.V[all_actions[i]]+0.25*e*(self.V[self.s_n] + self.V[self.s_e] + self.V[self.s_s] + self.V[self.s_w]))
return NESW_Cells
def GetPolicy(self, value):
''' This method finds the optimal policy based on a converged value function'''
policy_init = np.zeros(25, dtype=str)
max_vals = np.zeros(25)
arg_vals = np.zeros(25)
for j in range(len(value)):
pos_actions = self.GetAvailableActions(j)
max_vals = self.OneStepLookAhead(j, pos_actions)
arg_vals[j] = np.argmax(max_vals)
if arg_vals[j] == 0:
policy_init[j]='n'
elif arg_vals[j] == 1:
policy_init[j] = 'e'
elif arg_vals[j] == 2:
policy_init[j] = 's'
elif arg_vals[j] == 3:
policy_init[j] = 'w'
return policy_init
grid = Gridworld()
theta = 1e-10
delta=np.zeros(grid.num_fields)
counter =0
while 1:
for state in range(grid.num_fields):
if state in grid.terminal_states:
continue
v = grid.V[state]
available_actions = grid.GetAvailableActions(state)
bellman_value_old = grid.OneStepLookAhead(state, available_actions)
grid.V[state] = np.max(bellman_value_old)
delta[state] = abs(v - grid.V[state])
d = np.amax(delta)
counter += 1
if d < theta:
break
v = grid.V
v_disp = grid.V.reshape(5,5)
print(np.flip(v_disp,0))
policy = grid.GetPolicy(v)
pol_disp = policy.reshape(5,5)
print(np.flip(pol_disp,0)) | [
"[email protected]"
] | |
51495f4dae656f1832ef0dbee351dfe3ff503ca3 | e23749ca0f78c3e2ecfca58352ad65f3c4d487bb | /Face-Recognition/picture_menu/local_main.py | da580ed14c821c7fc325ddbce72561bb4e578b15 | [] | no_license | davichiar/2018_KidsHaru-FaceRecognition | a923c8bcc1a83d549d55ddcaa8e4da37bc366d4a | 5c2d43202dae45ae49b302104533900c2fd48bec | refs/heads/master | 2020-03-23T19:13:28.142373 | 2019-01-22T05:56:29 | 2019-01-22T05:56:29 | 141,961,423 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,851 | py | import json
import requests
import os
import sys
import shutil
import urllib.request
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
from utility import path, download, pickle
from picture_util import detecting, clustering, saving
def local_play():
# =========================================
# 이미지 다운로드
url = path.getDirname("image")
data = download.getLinkDownload(url)
# =========================================
# 이미지 저장
url = path.getDirname("pickle_data") + "/picture_pickle.pickle"
data_temp, data = saving.picture_saving(url, data)
# print(len(data), len(data_temp))
# =========================================
# 재 로드 하기
data = pickle.ReadPickle(url)
print('pickle 로드 완료!')
index_url = path.getDirname("pickle_data") + "/index_pickle.pickle"
box_url = path.getDirname("pickle_data") + "/box_pickle.pickle"
encoding_url = path.getDirname("pickle_data") + "/encoding_pickle.pickle"
indexE_url = path.getDirname("pickle_data") + "/indexE_pickle.pickle"
encodings_url = path.getDirname("pickle_data") + "/encodings_pickle.pickle"
index = saving.util_saving("index", index_url)
box = saving.util_saving("box", box_url)
encoding = saving.util_saving("encoding", encoding_url)
indexE = saving.util_saving("indexE", indexE_url)
encodings = saving.util_saving("encodings", encodings_url)
# =========================================
# 이미지 detecting
# data.loc[data.index == 1, 'encoding'] = 'complete'
data_temp = data.loc[data['encoding'] == "empty"]
result = data_temp.index
for x in range(len(data)):
if x in result:
box_t, encoding_t = detecting.faceDetect(data.ix[x])
if len(box_t) > 0 and len(encoding_t) > 0:
index.append(x)
box.append(box_t)
encoding.append(encoding_t)
temp_len = len(index) - 1
for y in range(len(box[temp_len])):
indexE.append(str(x) + "." + str(y))
encodings.append( encoding[temp_len][y] )
data.loc[data.index == x, 'box'] = len(box[temp_len])
data.loc[data.index == x, 'encoding'] = "complete"
clustering.cluster(data, indexE, encodings)
else:
data.loc[data.index == x, 'box'] = -1
data.loc[data.index == x, 'encoding'] = "Fail"
# =========================================
# 저장하기
pickle.WritePickle(url, data)
pickle.WritePickle(index_url, index)
pickle.WritePickle(box_url, box)
pickle.WritePickle(encoding_url, encoding)
pickle.WritePickle(indexE_url, indexE)
pickle.WritePickle(encodings_url, encodings)
print(data)
| [
"[email protected]"
] | |
7f41409c32f68944f9c19faceb81e9904a6b9f4d | 2ef5c8fe070db835f944250be648d32c9ffe208f | /backjoon/10818.py | fa6c8147a70a384755ee8763baa009391dcbc088 | [] | no_license | Hyoukjoo/study-algo | dfde817fdccfd7f6fe03a35d9b08e8b7de6f3ef0 | fd99ee7b03910226b03eba369319c7d2ba5824ed | refs/heads/main | 2023-04-20T05:16:55.867061 | 2021-05-09T12:24:03 | 2021-05-09T12:24:03 | 358,767,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | n = int(input())
nums = list(map(int, input().split()))
print(f'{min(nums)} {max(nums)}') | [
"[email protected]"
] | |
04ff18cb3a28a6777a34d20828fd38fb9815439b | 8aa352851122a6ba3cd4d11e0ad89a10058f5206 | /mysite/mysite/urls.py | 953baa1c0eadcffe5be08b693cc1c20fe35c3237 | [] | no_license | marchell93/django_news | 3f8d7134ee4d1df40897e9dc0a4bd5bac51ebb07 | 7d2db7b0de497206ed4357ca25de55460605b59f | refs/heads/main | 2023-01-02T07:29:40.159772 | 2020-10-27T08:39:33 | 2020-10-27T08:39:33 | 307,632,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,226 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('ckeditor/', include('ckeditor_uploader.urls')),
path('captcha/', include('captcha.urls')),
path('', include('news.urls')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
path('__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
421a98d91f2a1ef9d1ef0c88cb7e72d9f3a0a075 | 0bf6b37f856d3fb7e0970b58c6f4cf49fbd74b35 | /preprocess.py | 8c3b9b71639be580838a01fffb51e2006995fe95 | [] | no_license | Skinny-Joey/Retrieval-Automatic-Comment-System | a08a67679bfabcf021b8f253bb4ec392dc09afb4 | d925397152bf9a31b2a0a419ef1a0c94e543b565 | refs/heads/master | 2022-04-03T06:05:31.859650 | 2018-11-21T15:11:04 | 2018-11-21T15:11:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,390 | py | # -*- coding: utf-8 -*-
import os
import cPickle
import json
import jieba
# 加载数据只需修改此函数即可
def train_data(path):
print 'start process ', path
data = []
with open(path, 'r') as f:
for line in f.readlines():
line = line.strip().split('\t')
post = line[0].split()
comm_true = line[1].split()
comm_false = line[2].split()
if len(post) > 30 or len(comm_true) > 30 or len(comm_false) > 30:
print(len(post), len(comm_true), len(comm_false))
if len(post) == 0 or len(comm_true) == 0 or len(comm_false) == 0:
print(len(post), len(comm_true), len(comm_false))
data.append([post, comm_true, comm_false])
return data
def dev_data(path):
print 'start process ', path
data = []
with open(path, 'r') as f:
for line in f.readlines():
if line == "--------------------------------------------\n":
continue
if line[:4] == "post":
post = line.strip().split()[1:]
elif line[:4] == "comm":
comm = line.strip().split()[1:]
data.append([post, comm, "1"])
elif line[:8] == "mismatch":
mis_comm = line.strip().split()[1:]
data.append([post, mis_comm, "0"])
return data
def build_word_count(data):
wordCount = {}
def add_count(lst):
for word in lst:
if word not in wordCount:
wordCount[word] = 0
wordCount[word] += 1
for one in data:
[add_count(x) for x in one[0:3]]
print 'word type size ', len(wordCount)-2
return wordCount
def build_word2id(wordCount, threshold=10):
word2id = {'<PAD>': 0, '<UNK>': 1}
if os.path.exists("data/vocab_50000.txt"):
with open("data/vocab_50000.txt", 'r') as fr:
for word in fr.readlines():
word2id[word.strip()] = len(word2id)
else:
for word in wordCount:
if wordCount[word] >= threshold:
if word not in word2id:
word2id[word] = len(word2id)
# 将词拆成字对英文不适用
'''
else:
chars = list(word)
for char in chars:
if char not in word2id:
word2id[char] = len(word2id)
'''
print 'processed word size ', len(word2id)
return word2id
def transform_train_to_id(raw_data, word2id):
data = []
def map_word_to_id(word):
output = []
if word in word2id:
output.append(word2id[word])
# 将词拆成字对英文不适用
else:
'''
chars = list(word)
for char in chars:
if char in word2id:
output.append(word2id[char])
else:
'''
# UNK token
output.append(1)
return output
def map_sent_to_id(sent):
output = []
for word in sent:
output.extend(map_word_to_id(word))
return output
for one in raw_data:
post = map_sent_to_id(one[0])
comm1 = map_sent_to_id(one[1])
comm2 = map_sent_to_id(one[2])
data.append([post, comm1, comm2])
return data
def transform_dev_to_id(raw_data, word2id):
data = []
def map_word_to_id(word):
output = []
if word in word2id:
output.append(word2id[word])
# 将词拆成字对英文不适用
else:
'''
chars = list(word)
for char in chars:
if char in word2id:
output.append(word2id[char])
else:
'''
# UNK token
output.append(1)
return output
def map_sent_to_id(sent):
output = []
for word in sent:
output.extend(map_word_to_id(word))
return output
for one in raw_data:
post = map_sent_to_id(one[0])
comm = map_sent_to_id(one[1])
flag = one[2]
data.append([post, comm, flag])
return data
def process_data(data_path, threshold):
train_file_path = data_path + 'train.txt'
dev_file_path = data_path + 'dev.txt'
test_file_path = data_path + 'test.txt'
path_lst = [train_file_path, dev_file_path, test_file_path]
output_path = [data_path + x for x in ['train.pickle', 'dev.pickle', 'test.pickle']]
return _process_data(path_lst, threshold, output_path)
def _process_data(path_lst, word_min_count=5, output_file_path=[]):
raw_data = []
train_file_path = path_lst[0]
raw_data.append(train_data(train_file_path))
dev_file_path = path_lst[1]
raw_data.append(dev_data(dev_file_path))
word_count = build_word_count([y for x in raw_data for y in x])
test_file_path = path_lst[2]
raw_data.append(dev_data(test_file_path))
with open('data/word-count.obj', 'wb') as f:
cPickle.dump(word_count, f)
word2id = build_word2id(word_count, word_min_count)
with open('data/word2id.obj', 'wb') as f:
cPickle.dump(word2id, f)
i = 0
for one_raw_data, one_output_file_path in zip(raw_data, output_file_path):
with open(one_output_file_path, 'wb') as f:
if i == 0:
one_data = transform_train_to_id(one_raw_data, word2id)
else:
one_data = transform_dev_to_id(one_raw_data, word2id)
i += 1
cPickle.dump(one_data, f)
return len(word2id)
def build_word_embedding():
with open('/home/sunyawei/glove.840B.300d.txt', 'r') as fr:
glove_word_emb = dict()
for line in fr.readlines():
word = line.strip().split()[0]
glove_word_emb[word] = line
with open('data/vocab_50002.txt', 'r') as fr:
word_embedding = str()
for line in fr.readlines():
word = line.strip()
if word in glove_word_emb:
word_embedding += glove_word_emb[word]
else:
emb = [word] + ['0']*300
word_embedding += ' '.join(emb) + '\n'
with open('data/word_emb_50002.txt', 'w') as fw:
fw.write(word_embedding)
if __name__ == '__main__':
process_data('data/', 5)
# build_word_embedding()
| [
"[email protected]"
] | |
a38bc5dad2aaa1bda7ad1e5a463b3066f2dd7422 | 290499258919c5ca039cc725bbc5d0a47511f1ef | /pbtotflite.py | cdcbf5a2a694a2c46335cb28fffdbf4ecc8886b4 | [] | no_license | Medicmind/grading_demo | d4f22eefa8131faa64768a830ad2e4ca80c8a044 | f53b1f3093966ee17be9e03b69ddd0a8a4a63ba3 | refs/heads/master | 2022-12-11T04:08:56.217525 | 2022-12-03T00:06:26 | 2022-12-03T00:06:26 | 140,697,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | #Convert a Medicmind grading classifier frozen_model.pb file into a keras tflite model
import tensorflow as tf
from tensorflow_addons.optimizers import LAMB
import glob
import sys
tf.keras.optimizers.Lamb = LAMB
if True:
converter = tf.compat.v1.lite.TFLiteConverter.from_frozen_graph('camera/data/frozen_model.pb',
input_arrays = ['Reshape'],output_arrays = ['inception_v3/logits/logits/xw_plus_b'])
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS,
tf.lite.OpsSet.SELECT_TF_OPS
]
print('converting')
tflite_model = converter.convert()
print('writing')
with open('pruned.lite', "wb") as f:
f.write(tflite_model)
| [
"[email protected]"
] | |
5789883cf8c75a130c8dc57f3ab2e517f8d42824 | eab9d48cfc6318ab3c94a6156662826209db27f4 | /server-src/NNModels/allPairs.py | 94f2904ae6d15729b3223743ebbdc3a98ebbc1bc | [] | no_license | schasins/unstructured-data-scraping-extension | 715d77d832bb5ffe185ebfddfe11848e24955c60 | ebdbb758f665b596d45b084662583665ac443e2f | refs/heads/master | 2021-01-21T21:47:26.042228 | 2016-03-14T16:09:42 | 2016-03-14T16:09:42 | 42,411,510 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 35,306 | py | #!/usr/bin/python
from operator import attrgetter
import libfann
import re
import copy
import sys
import array
import csv
import os
import itertools
import time
import random
import math
# **********************************************************************
# Data structure for documents, for dealing with single-node
# features that depend on other textboxes in the same document
# **********************************************************************
def values(list, attrName):
return map(attrgetter(attrName),list)
def highest(list, attrName):
attrVals = values(list, attrName)
return max(attrVals)
def lowest(list, attrName):
attrVals = values(list, attrName)
return min(attrVals)
def isNumber(x):
return (isinstance(x, (int, long, float, complex)) and not isinstance(x,bool))
class Document:
def __init__(self, boxList, name):
self.boxList = boxList
self.name = name
def addSingleNodeFeaturesOneDocument(self):
# for numerical features, compare each value to the range of values in the document
self.addSmallestLargestRanksForNumerical()
# for some features, compare to the docHeight, docWidth
self.addPercentagesForWidthAndHeightRelated()
def addSmallestLargestRanksForNumerical(self):
# for numerical features, compare each value to the range of values in the document
# first collect the range of values in the document
# TODO: the below relies on the assumption that all numeric values are shared by all boxes
# if that ever changes, update this
ranges = {}
firstBox = self.boxList[0]
firstBoxFeatures = firstBox.getFeatures()
for feature in firstBoxFeatures:
if (isNumber(firstBox.getFeature(feature)) and not feature.startswith("wordfreq")): # we don't want to add ranks and so on for wordfreqs
rangeSet = set()
for box in self.boxList:
rangeSet.add(box.getFeature(feature))
ranges[feature] = sorted(list(rangeSet))
# then add a feature that gives the ranking
for feature in firstBoxFeatures:
if (isNumber(firstBox.getFeature(feature)) and not feature.startswith("wordfreq")):
rangeLs = ranges[feature]
rangeLsLen = len(rangeLs)
for box in self.boxList:
index = rangeLs.index(box.getFeature(feature))
box.addFeature(feature+"-smallest-rank", index + 1)
box.addFeature(feature+"-largest-rank", rangeLsLen - index)
def addPercentagesForWidthAndHeightRelated(self):
# first figure out some whole-document stuff
docTop = lowest(self.boxList, "top")
docLeft = lowest(self.boxList, "left")
docHeight = highest(self.boxList, "bottom") - docTop
docWidth = highest(self.boxList, "right") - docLeft
# for some features, compare to the docHeight, docWidth
for box in self.boxList:
for feature in ["right", "left"]:
box.addFeature(feature+"-relative", float(box.getFeature(feature))-docLeft)
for feature in ["right-relative", "left-relative", "width"]:
box.addFeature(feature+"-percent", float(box.getFeature(feature))/docWidth)
for feature in ["top", "bottom"]:
box.addFeature(feature+"-relative", float(box.getFeature(feature))-docTop)
for feature in ["top-relative", "bottom-relative", "height"]:
box.addFeature(feature+"-percent", float(box.getFeature(feature))/docHeight)
def allBoxesFeatures(self):
return reduce(lambda acc, box : acc.union(box.getFeatures()), self.boxList, set())
# **********************************************************************
# Data structure for textboxes, tracking single-node features
# **********************************************************************
class Box:
def __init__(self, left, top, right, bottom, text, label, otherFeaturesDict, name="dontcare"):
self.left = left
self.top = top
self.right = right
self.bottom = bottom
self.text = text
self.label = label
self.otherFeaturesDict = otherFeaturesDict
self.features = {}
self.numFeatureVector = array.array('f')
self.name = name
self.addFeatures()
def addFeatures(self):
for coord in ["left","top","right","bottom"]:
self.addFeature(coord, attrgetter(coord)(self))
self.addFeature("width", self.right-self.left)
self.addFeature("height", self.bottom-self.top)
self.addWordFeatures()
for feature in self.otherFeaturesDict:
self.addFeature(feature, self.otherFeaturesDict[feature])
def __str__(self):
return self.name
def addFeature(self, featureName, value):
self.features[featureName] = value
def hasFeature(self, featureName):
return featureName in self.features
def getFeature(self, featureName):
return self.features[featureName]
def getFeatureSafe(self, featureName):
if self.hasFeature(featureName):
return self.getFeature(featureName)
else:
return 0
def getFeatures(self):
return self.features.keys()
def addWordFeatures(self):
wordsStr = self.text.strip().lower()
words = re.split("[\s\.,\-\/\#\!\$%\^&\*\;\:\{\}=\-\_\`\~\(\)]*", wordsStr)
numWords = len(words)
self.addFeature("numwords", numWords)
wordFreqs = {}
for word in words:
wordFreqs[word] = wordFreqs.get(word, 0) + 1
for word in wordFreqs:
self.addFeature("wordfreq-"+word, wordFreqs[word])
self.addFeature("numuniquewords", len(wordFreqs.keys()))
def setNumFeatureVector(self, numFeatureList):
a = array.array('f')
for feature in numFeatureList:
if not self.hasFeature(feature):
if feature.startswith("wordfreq"): #special case because for that we want to just set the count to 0
a.append(0)
else:
print "Freak out! One of our boxes doesn't have a numeric feature so we don't know what value to put in. Feature:", feature
exit(1)
else:
try:
a.append(self.getFeature(feature))
except:
print feature
print self.getFeature(feature)
exit()
self.numFeatureVector = a
def wholeSingleBoxFeatureVector(self):
vec = list(self.numFeatureVector)
return vec
# **********************************************************************
# CSV details
# **********************************************************************
class CSVHandling():
@staticmethod
def canInterpretAsFloat(s):
try:
float(s)
return True
except ValueError:
return False
@staticmethod
def csvToBoxlists(csvname):
csvfile = open(csvname, "rb")
reader = csv.reader(csvfile, delimiter=",", escapechar='\\', quotechar="\"")
documents = {}
boxIdCounter = 0
firstRow = True
columnTitles = []
numColumns = 0
specialElements = ["doc", "left", "top", "right", "bottom", "text", "label"]
for row in reader:
if firstRow:
firstRow = False
columnTitles = row
numColumns = len(columnTitles)
for specialElement in specialElements:
if specialElement not in columnTitles:
print "Freak out! One of the column titles we really need isn't present:", specialElement
else:
sVals = {}
oVals = {}
for i in range(numColumns):
valType = columnTitles[i]
if valType in ["font-family","font-style","font-weight","color","background-color","font_family", "column"]:
# for now we don't have a good way of turning these into booleans or numeric features
# todo: decide how to actually deal with categorical things like this
continue
targetDict = oVals
if valType in specialElements:
targetDict = sVals
val = row[i]
if (len(row)) != numColumns:
raise Exception("Malformed dataset file. Number of cells is not consistent across rows.")
if valType not in ["text", "doc", "label"] and CSVHandling.canInterpretAsFloat(val):
val = float(val)
elif valType not in ["text", "doc", "label"]:
# for now we need everything to be numbers, so...
if val == "TRUE" or val == "True":
val = 1
elif val == "FALSE" or val == "False":
val = -1
else:
val = 0
targetDict[valType] = val
if sVals["left"] < 0 or sVals["top"] < 0:
# for now, filter out boxes that appear offscreen here. might want to filter these earlier
continue
box = Box(sVals["left"], sVals["top"], sVals["right"], sVals["bottom"], sVals["text"], sVals["label"], oVals, str(boxIdCounter))
boxIdCounter += 1
boxList = documents.get(sVals["doc"], [])
boxList.append(box)
documents[sVals["doc"]] = boxList
documentList = []
for feature in documents:
newDocument = Document(documents[feature], feature)
newDocument.addSingleNodeFeaturesOneDocument() # there are some features that depend on the whole doc, so let's add those now
documentList.append(newDocument)
return documentList
# **********************************************************************
# Data structures for custom filter synthesis
# **********************************************************************
class FilterComponent():
def __init__(self, colIndex, lessEq, threshold, numFiltered):
self.colIndex = colIndex
self.lessEq = lessEq
self.threshold = threshold
self.numFiltered = numFiltered
def __str__(self):
op = ">="
if self.lessEq:
op = "<="
return "row["+str(self.colIndex)+"] "+op+" "+str(self.threshold)
def stringWithHeadings(self, headings):
op = ">="
if self.lessEq:
op = "<="
return "row["+headings[self.colIndex]+"] "+op+" "+str(self.threshold)
def accepts(self, row):
if self.lessEq:
return row[self.colIndex] <= self.threshold
else:
return row[self.colIndex] >= self.threshold
class Filter():
def __init__(self, filterComponentList):
self.filterComponentList = filterComponentList
def __str__(self):
return " or ".join(map(str, self.filterComponentList))
def stringWithHeadings(self, headings):
return " or ".join(map(lambda x: x.stringWithHeadings(headings), self.filterComponentList))
def numFiltered(self, dataset):
numFilteredCounter = 0
for row in dataset:
if self.accepts(row):
numFilteredCounter += 1
return numFilteredCounter
def test(self, dataset):
numDatasetRows = len(dataset)
numFilteredCounter = 0
numFilteredThatHaveLabel = 0
for row in dataset:
if self.accepts(row):
numFilteredCounter += 1
if row[0] != noLabelString:
numFilteredThatHaveLabel += 1
print "num rows in test set", numDatasetRows
print "num rows in test set that are filtered", numFilteredCounter
print "num rows in test set that are filtered but shouldn't be (have labels)", numFilteredThatHaveLabel
def accepts(self, row):
for filterComponent in self.filterComponentList:
if filterComponent.accepts(row):
return True # we're ORing, so return true if any accept
return False
# this is a weird filter, because the filtered things are the things we're throwing out -- remember, we made the filter for getting rid of "nolabel" items
def filterDataset(self, dataset):
outputDataset = []
for row in dataset:
if not self.accepts(row):
outputDataset.append(row)
return outputDataset
# **********************************************************************
# Custom filter synthesis
# **********************************************************************
def synthesizeFilter(dataset, numericalColIndexes):
# first let's decide how many "nolabel" items we want to filter
labelCount = 0
nolabelCount = 0
for row in dataset:
if row[0] != noLabelString:
labelCount += 1
else:
nolabelCount += 1
targetNumFiltered = nolabelCount - (2 * labelCount) # there should be at most 2 nolabels per label in the output dataset
print "number of rows in dataset:", len(dataset)
print "number of rows with labels:", labelCount
print "target number of rows to filter:", targetNumFiltered
possibleFilters = []
bestFilterSoFar = None
bestFilterScore = 0
for currColIndex in numericalColIndexes:
# loop for finding the lowest col val associated with a label, highest col val associated with label
lowestLabel = sys.maxint
highestLabel = - sys.maxint
for row in dataset:
label = row[0]
if label != noLabelString:
currVal = row[currColIndex]
if currVal < lowestLabel:
lowestLabel = currVal
elif currVal > highestLabel:
highestLabel = currVal
# loop for counting rows with col vals below lowestLabel, finding highest val below lowestLabel, counting rows with col vals above highestLabel, finding lowest val above highestLabel
startNum = 0 # the number of nolabel values at the start of the sorted col, before the first labeled value
endNum = 0 # the number of nolabel values at the end of the sorted col, after the last labeled value
startThreshold = - sys.maxint # don't actually want to use the labeled val as the threshold. better to be cautious, use the highest val associated with a nolabel
endThreshold = sys.maxint # don't actually want to use the labeled val as the threshold. better to be cautious, use the lowest val associated with a nolabel
for row in dataset:
currVal = row[currColIndex]
if currVal < lowestLabel:
startNum += 1
if currVal > startThreshold:
startThreshold = currVal
elif currVal > highestLabel:
endNum += 1
if currVal < endThreshold:
endThreshold = currVal
if startNum > 0:
newFilter = FilterComponent(currColIndex, True, startThreshold, startNum)
possibleFilters.append(newFilter)
if startNum > bestFilterScore:
bestFilterSoFar = newFilter
bestFilterScore = startNum
if endNum > 0:
newFilter = FilterComponent(currColIndex, False, endThreshold, endNum)
possibleFilters.append(newFilter)
if endNum > bestFilterScore:
bestFilterSoFar = newFilter
bestFilterScore = endNum
print "best single filter score:", bestFilterScore
# if a single filter is sufficient, let's go for that
if bestFilterSoFar.numFiltered > targetNumFiltered:
return Filter([bestFilterSoFar])
bestFilterSoFar = Filter([bestFilterSoFar])
# let's try using more than one
maxComponents = 3 # don't want to go beyond 3 for fear of overfitting
# we'll try better combinations sooner if we first sort the list of possible filters
# this is worth it since testing combinations on a large dataset is pretty expensive
possibleFilters.sort(key=lambda x: x.numFiltered, reverse=True)
for i in range(2, maxComponents + 1):
filterCombos = itertools.combinations(possibleFilters, i)
for filterCombo in filterCombos:
if filterCombo[0].numFiltered < bestFilterScore/i:
# recall that we sorted the list first, and combinations retains sorting: ABCD -> AB, AC, AD, BC, BD, CD
# so if we get a filter where the first component filters less than half of what we need, and only 2
# components are allowed, we know we can call off this search
break
# same idea here -- can't do better than the sum
sumFiltered = 0
for component in filterCombo:
sumFiltered += component.numFiltered
if sumFiltered < bestFilterScore:
continue
f = Filter(list(filterCombo))
numFiltered = f.numFiltered(dataset)
if numFiltered > bestFilterScore:
bestFilterScore = numFiltered
bestFilterSoFar = f
print "best filter with no more than", i, "components:", bestFilterScore
if bestFilterScore > targetNumFiltered:
return bestFilterSoFar
return bestFilterSoFar
# **********************************************************************
# Helpers
# **********************************************************************
def splitDocumentsIntoTrainingAndTestingSets(docList, trainingPortion):
numDocuments = len(docList)
splitPoint = int(trainingPortion*numDocuments)
trainingDocuments = docList[:splitPoint]
testingDocuments = docList[splitPoint:]
return trainingDocuments, testingDocuments
# converts a set of documents to feature vectors
def datasetToRelation(docList, features):
data = []
firstRow = ["label", "docName"] + features
data.append(firstRow)
i = 0
for doc in docList:
i += 1
for box in doc.boxList:
box.setNumFeatureVector(features)
row = [box.label, doc.name]
featureVec = box.wholeSingleBoxFeatureVector()
row = row + featureVec
data.append(row)
return data
def popularSingleBoxFeatures(docList, targetPercentDocuments):
# first go through each document and figure out the single-node features for the document
featureLists = []
for doc in docList:
featureLists.append(doc.allBoxesFeatures())
# decide on the filtered set of single-node features that is interesting to us, based on how many
# different document use each single-node feature
featureScores = {}
for featureList in featureLists:
for feature in featureList:
featureScores[feature] = featureScores.get(feature, 0) + 1
numberOfDocumentsThreshold = int(len(docList)*targetPercentDocuments)
popularFeatures = [k for k, v in featureScores.items() if v >= numberOfDocumentsThreshold]
print "decided on a feature set with", len(popularFeatures), "features"
return popularFeatures
class LabelHandler():
labelsToLabelIds = {}
labelIdsToLabels = []
numLabels = 0
def __init__(self, labelLs):
self.labelIdsToLabels = labelLs
for i in range(len(labelLs)):
self.labelsToLabelIds[labelLs[i]] = i
self.numLabels = len(labelLs)
def getOneInNRepForLabel(self, label):
labelVec = [-1]*self.numLabels
labelVec[self.labelsToLabelIds[label]] = 1
return labelVec
def getXInNRepForLabels(self, labelLs):
labelVec = [-1]*self.numLabels
for label in labelLs:
labelVec[self.labelsToLabelIds[label]] = 1
return labelVec
def closestLabel(self, labelVec):
winningIndex = labelVec.index(max(labelVec))
return self.labelAtIndex(winningIndex)
def labelsFromNetAnswer(self, labelVec):
indices = [i for i, x in enumerate(labelVec) if x > 0]
labels = map(lambda x: self.labelAtIndex(x), indices)
return labels
def getLabelForOneInNRep(self, labelVec):
index = labelVec.index(1)
return self.labelAtIndex(index)
def getLabelsForXInNRep(self, labelVec):
indices = [i for i, x in enumerate(labelVec) if x == 1]
labels = map(lambda x: self.labelAtIndex(x), indices)
return labels
def labelAtIndex(self, index):
return self.labelIdsToLabels[index]
def getLabelsFromDataset(dataset):
labelSet = set()
for row in dataset:
labelStr = row[0]
labels = labelStr.split("|")
for label in labels:
labelSet.add(label)
return list(labelSet)
def relationToDocuments(datasetRaw, labelHandler):
documents = {}
for row in datasetRaw:
docName = row[1]
docRows = documents.get(docName, [])
try:
label = labelHandler.getXInNRepForLabels(row[0].split("|"))
except:
print "Hey! This is bad. You saw a label that labelHandler doesn't have.", row[0]
continue
docRows.append((row[2:], label))
documents[docName] = docRows
return documents
# convert the purely relational form to (input, output) pairs, convert output to vector form
def saveDatasetMemConscious(datasetRaw, trainingSetFilename, labelHandler):
# we want to make an input vector from each pair of textboxes in each document
documentsNums = relationToDocuments(datasetRaw, labelHandler)
# doing all the string conversions for each pair is silly. let's avoid that
documents = {}
for key in documentsNums:
strPairs = []
pairs = documentsNums[key]
for pair in pairs:
inputStr = " ".join(map(lambda x: str(x), pair[0]))
outputStr = " ".join(map(lambda x: str(x), pair[1]))
strPairs.append((inputStr,outputStr))
documents[key] = strPairs
# let's get the data we need for starting the output file
randomKey = documentsNums.keys()[0]
inputSize = len(documentsNums[randomKey][0][0]) * 2 # times 2 because we'll be sticking together 2 diff boxes for each row
outputSize = len(documentsNums[randomKey][0][1]) * 2
# let's just estimate how large this will actually be...
numDatapoints = 0
for document in documents:
numBoxes = len(documents[document])
numDatapoints += numBoxes*(numBoxes - 1)
print "expected number of datapoints in final dataset:", numDatapoints
fileHandle = NNWrapper.startDatasetFile(numDatapoints, inputSize, outputSize, trainingSetFilename)
batchSize = 10000
outputDataset = []
docsSoFar = 0
boxCounter = 0
counter = 0
for document in documents:
boxes = documents[document]
boxPairs = itertools.permutations(boxes, 2) # is there a nice way to do this with combinations instead of permuations? or do we want the largest dataset we can get? probably yes.
for pair in boxPairs:
boxCounter += 1
outputDataset.append(([pair[0][0],pair[1][0]], [pair[0][1],pair[1][1]])) # (input, output)
if boxCounter == batchSize:
print "data points so far:", (counter + 1) * batchSize
NNWrapper.addToDatasetFile(fileHandle, outputDataset)
counter += 1
boxCounter = 0
outputDataset = [] # clear it out, because this can get huge even for one document
docsSoFar += 1
print "documents so far (out of", len(documents.keys()), "):", docsSoFar
# and let's flush out anything left in here
NNWrapper.addToDatasetFile(fileHandle, outputDataset)
print "last addition to dataset file. total data points:", counter * batchSize + len(outputDataset)
print "predicted num datapoints:", numDatapoints
fileHandle.close()
return inputSize, outputSize
def convertColumnToRange(dataset, colIndex, newMin, newMax):
rangeAllowed = newMax - newMin
values = map(lambda row: row[colIndex], dataset)
oldMax = max(values)
oldMin = min(values)
oldRange = (oldMax - oldMin)
if oldRange == 0:
# print "new constant col after filtering", colIndex
raise Exception("trying to convert a constant column to a range")
for j in range(len(dataset)):
dataset[j][colIndex] = (float((dataset[j][colIndex] - oldMin) * rangeAllowed) / oldRange) + newMin
return (oldMin, oldMax)
# the same as the normal convertColumnToRange, but uses fixed oldrange, so anything out of the target oldrange gets pushed into that range first
def convertColumnToRangeCutoff(dataset, colIndex, newMin, newMax, oldMin, oldMax):
rangeAllowed = newMax - newMin
oldRange = (oldMax - oldMin)
for i in range(len(dataset)):
if dataset[i][colIndex] > oldMax:
dataset[i][colIndex] = oldMax
elif dataset[i][colIndex] < oldMin:
dataset[i][colIndex] = oldMin
dataset[i][colIndex] = (float((dataset[i][colIndex] - oldMin) * rangeAllowed) / oldRange) + newMin
def scaleRelation(datasetRaw, ranges=None):
dataset = datasetRaw[1:]
if ranges == None:
ranges = []
for i in range(2, len(dataset[0])): # start at 2 because we don't do this for labels or document names
oldRange = convertColumnToRange(dataset, i, -1, 1)
ranges.append(oldRange)
else:
for i in range(2, len(dataset[0])): # start at 2 because we don't do this for labels or document names
currColRange = ranges[i-2]
convertColumnToRangeCutoff(dataset, i, -1, 1, currColRange[0], currColRange[1])
return dataset, ranges
def removeConstantColumns(dataset):
constantIndexes = []
for i in range(len(dataset[0])):
firstVal = dataset[1][i] # first row is headers
constantCol = True
for j in range(2,len(dataset)):
if firstVal != dataset[j][i]:
constantCol = False
break
if constantCol:
constantIndexes.append(i)
return removeColumns(dataset, constantIndexes), constantIndexes
def removeColumns(dataset, indexes):
sortedIndexes = sorted(indexes, reverse=True)
#print sortedIndexes
for row in dataset:
for index in sortedIndexes:
try:
del row[index]
except Exception:
print "row len", len(row)
print "index to remove", index
print "last row len", len(dataset[-1])
raise Exception("gah")
return dataset
# **********************************************************************
# NN and NN-related functionality
# **********************************************************************
class NNWrapper():
connection_rate = 1
learning_rate = 0.05
iterations_between_reports = 1
testingSummaryFilename = "testingSummary.csv"
totalTested = 0
totalCorrect = 0
numThatActuallyHaveLabel = None
numThatActuallyHaveLabelCorrectlyLabeled = None
@staticmethod
def saveDatasetToFile(datasetPairs, filename):
numPairs = len(datasetPairs)
inputSize = len(datasetPairs[0][0])
outputSize = len(datasetPairs[0][1])
fileStrs = [str(numPairs)+" "+str(inputSize)+" "+str(outputSize)]
for pair in datasetPairs:
fileStrs.append(" ".join(map(lambda x: str(x), pair[0])))
fileStrs.append(" ".join(map(lambda x: str(x), pair[1])))
f = open(filename, "w")
f.write("\n".join(fileStrs))
f.close()
@staticmethod
def saveDatasetToFileStringLists(datasetPairs, inputSize, outputSize, filename):
numPairs = len(datasetPairs)
fileStrs = [str(numPairs)+" "+str(inputSize)+" "+str(outputSize)]
for pair in datasetPairs:
fileStrs.append(" ".join(pair[0]))
fileStrs.append(" ".join(pair[1]))
f = open(filename, "w")
f.write("\n".join(fileStrs))
f.close()
@staticmethod
def startDatasetFile(numPairs, inputSize, outputSize, filename):
f = open(filename, "w")
f.write(str(numPairs)+" "+str(inputSize)+" "+str(outputSize)+"\n")
return f
@staticmethod
def addToDatasetFile(fileHandle, datasetPairs):
fileStrs = []
for pair in datasetPairs:
fileStrs.append(" ".join(pair[0]))
fileStrs.append(" ".join(pair[1]))
fileHandle.write("\n".join(fileStrs))
# todo: actually make this ready
@staticmethod
def saveDatasetToFileAlreadyString(numPairs, inputSize, outputSize, string, filename):
f = open(filename, "w")
numPairs = len(datasetPairs)
inputSize = len(datasetPairs[0][0])
outputSize = len(datasetPairs[0][1])
f.write(str(numPairs)+" "+str(inputSize)+" "+str(outputSize)+"\n")
for pair in datasetPairs:
f.write(" ".join(map(lambda x: str(x), pair[0]))+"\n")
f.write(" ".join(map(lambda x: str(x), pair[1]))+"\n")
f.close()
@staticmethod
def mergeSingleDocumentTrainingFiles(filenames, finalFilename):
totalNumExamples = 0
inputSize = 0
outputSize = 0
for filename in filenames:
with open(filename, 'r') as f:
firstLine = f.readline()
items = firstLine.split(" ")
numInputOutputPairs = int(items[0])
totalNumExamples += numInputOutputPairs
inputSize = int(items[1])
outputSize = int(items[2])
try:
os.remove(finalFilename)
except:
print "already no such file"
outputfile = open(finalFilename, "w")
outputfile.write(str(totalNumExamples)+" "+str(inputSize)+" "+str(outputSize)+"\n")
for filename in filenames:
print filename
with open(filename) as f:
fileStr = f.read()
endFirstLine = fileStr.index("\n")
remainingStr = fileStr[endFirstLine+1:]
outputfile.write(remainingStr+"\n")
os.remove(filename) # can take so much space!
outputfile.close()
@staticmethod
def trainNetwork(dataFilename, netFilename, layerSizes, max_iterations, desired_error):
# layerSizes should look something like this: (numInput, 200, 80, 40, 20, 10, numOutput)
ann = libfann.neural_net()
#ann.create_sparse_array(NNWrapper.connection_rate, (numInput, 6, 4, numOutput)) #TODO: is this what we want? # the one that works in 40 seconds 4, 10, 6, 1. the one that trained in 30 secs was 6,6
ann.create_standard_array(layerSizes)
ann.set_learning_rate(NNWrapper.learning_rate) # rprop doesn't use learning rate
ann.set_activation_function_output(libfann.SIGMOID_SYMMETRIC_STEPWISE)
#ann.set_training_algorithm(libfann.TRAIN_RPROP)
ann.set_training_algorithm(libfann.TRAIN_QUICKPROP)
ann.set_bit_fail_limit(.3)
#ann.randomize_weights(0,0)
t0 = time.time()
ann.train_on_file(dataFilename, max_iterations, NNWrapper.iterations_between_reports, desired_error)
t1 = time.time()
seconds = t1-t0
print "Seconds: ", seconds
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
print "Time to train:"
print "%d:%02d:%02d" % (h, m, s)
ann.save(netFilename)
@staticmethod
def testNet(datasetRaw, netFilename, labelHandler):
if NNWrapper.numThatActuallyHaveLabel == None:
NNWrapper.numThatActuallyHaveLabel = {}
NNWrapper.numThatActuallyHaveLabelCorrectlyLabeled = {}
try:
os.remove(testingSummaryFilename)
except:
print "already no such file"
testingSummaryFile = open(NNWrapper.testingSummaryFilename, "a")
ann = libfann.neural_net()
ann.create_from_file(netFilename)
#ann.print_connections()
labelLen = labelHandler.numLabels
stats = {}
documents = relationToDocuments(datasetRaw, labelHandler)
for document in documents:
print document
boxes = documents[document]
boxPairs = itertools.permutations(boxes, 2)
for pair in boxPairs:
featureVec = pair[0][0]+pair[1][0]
actualLabelsBox1 = labelHandler.getLabelsForXInNRep(pair[0][1])
actualLabelsBox2 = labelHandler.getLabelsForXInNRep(pair[1][1])
result = ann.run(featureVec)
testingSummaryFile.write(str(pair[0][1])+"\t;"+str(pair[1][1])+"\t;"+str(result))
guessedLabelsBox1 = labelHandler.labelsFromNetAnswer(result[:labelLen])
guessedLabelsBox2 = labelHandler.labelsFromNetAnswer(result[labelLen:])
for actualLabelBox1 in actualLabelsBox1:
box1Stats = stats.get(actualLabelBox1, {"left": {}, "right": {}})
for guessedLabelBox1 in guessedLabelsBox1:
box1Stats["left"][guessedLabelBox1] = box1Stats["left"].get(guessedLabelBox1, 0) + 1
stats[actualLabelBox1] = box1Stats
for actualLabelBox2 in actualLabelsBox2:
box2Stats = stats.get(actualLabelBox2, {"left": {}, "right": {}})
for guessedLabelBox2 in guessedLabelsBox2:
box2Stats["right"][guessedLabelBox2] = box2Stats["right"].get(guessedLabelBox2, 0) + 1
stats[actualLabelBox2] = box2Stats
for key in stats:
print key, "left"
print "*******************"
for label in labelHandler.labelIdsToLabels:
count = stats[key]["left"].get(label, 0)
print label, "\t\t\t", count
print key, "right"
print "*******************"
for label in labelHandler.labelIdsToLabels:
count = stats[key]["right"].get(label, 0)
print label, "\t\t\t", count
# **********************************************************************
# High level structure
# **********************************************************************
def makeSingleNodeNumericFeatureVectors(filename, trainingsetFilename, netFilename, testOnly):
docList = CSVHandling.csvToBoxlists(filename) # each boxList corresponds to a document
trainingDocuments, testingDocuments = splitDocumentsIntoTrainingAndTestingSets(docList, .8) # go back to .8 once done testing
# get everything we need to make feature vectors from both training and testing data
popularFeatures = popularSingleBoxFeatures(trainingDocuments, .1) # go back to .07 once done testing
trainingFeatureVectors = datasetToRelation(trainingDocuments, popularFeatures)
testingFeatureVectors = datasetToRelation(testingDocuments, popularFeatures)
# let's synthesize a filter
numericalColIndexes = range(2, 2 + len(popularFeatures)) # recall first two rows are label and doc name. todo: do this more cleanly in future
noLabelFilter = synthesizeFilter(trainingFeatureVectors[1:], numericalColIndexes) # cut off that first row, since that's just the headings
print noLabelFilter
print noLabelFilter.stringWithHeadings(trainingFeatureVectors[0])
noLabelFilter.test(testingFeatureVectors[1:])
# now that we have a filter, we're ready to filter both the training set and the test set
trainingFeatureVectors = [trainingFeatureVectors[0]] + noLabelFilter.filterDataset(trainingFeatureVectors[1:])
testingFeatureVectors = [testingFeatureVectors[0]] + noLabelFilter.filterDataset(testingFeatureVectors[1:])
print "len before", len(trainingFeatureVectors[0])
trainingFeatureVectors, columnsToRemove = removeConstantColumns(trainingFeatureVectors)
print "identified", len(columnsToRemove), "constant columns"
print "len after", len(trainingFeatureVectors[0])
print "len before", len(testingFeatureVectors[0])
testingFeatureVectors = removeColumns(testingFeatureVectors, columnsToRemove)
print "removed constant columns from test set"
print "len after", len(testingFeatureVectors[0])
# now we need to process the data for the NN -- scale everything to the [-1,1] range
trainingFeatureVectors, ranges = scaleRelation(trainingFeatureVectors)
testingFeatureVectors, ranges = scaleRelation(testingFeatureVectors, ranges)
print "scaled"
labelHandler = LabelHandler(getLabelsFromDataset(trainingFeatureVectors))
if not testOnly:
# now let's actually save the training set to file
numInput, numOutput = saveDatasetMemConscious(trainingFeatureVectors, trainingsetFilename, labelHandler)
print "saved data"
# now that we've saved the datasets we need, let's actually run the NN on them
desired_error = 0.015
max_iterations = 500
layerStructure = makeLayerStructure(numInput, numOutput, 20)
NNWrapper.trainNetwork(trainingsetFilename, netFilename, layerStructure, max_iterations, desired_error)
NNWrapper.testNet(testingFeatureVectors, netFilename, labelHandler)
#noLabelString = "nolabel"
noLabelString = "null"
def makeLayerStructure(numInput, numOutput, numHiddenLayers):
start = numInput/3 # otherwise it can just get so big...
end = numOutput*2
denominator = ((start / end) ** (1.0 / (numHiddenLayers)))
currLayerSize = start
layerSizes = [numInput, start]
for i in range(numHiddenLayers - 1):
currLayerSize = currLayerSize / denominator
layerSizes.append(int(math.ceil(currLayerSize)))
layerSizes.append(numOutput)
print layerSizes
return layerSizes
def main():
testOnly = False
makeSingleNodeNumericFeatureVectors("cvDataset.csv", "trainingSetCV.data", "netCV.net", testOnly)
#makeSingleNodeNumericFeatureVectors("webDatasetFullCleaned.csv", "trainingSet.data", "net.net")
main()
| [
"[email protected]"
] | |
d15f7f874707565cc02a4efbcddec8e03367b55d | 3cd2de2d088afd4908cf2bb9b87953d86b6e76b8 | /intcode/test.py | a18884550793019c34ea7ac9529e908a2af3c14c | [] | no_license | pedrokiefer/adventofcode2019 | 055c4cb84afe598ec064d8a48b2384fa14d02de2 | 56a2bc4102a69f7511c00ea3f23dca258b100bc5 | refs/heads/master | 2020-09-30T02:08:30.163519 | 2019-12-28T02:08:21 | 2019-12-28T02:08:21 | 227,174,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,453 | py | from unittest.mock import Mock
from .cpu import IntComputer
def test_simple_program():
program = [1, 0, 0, 0, 99]
c = IntComputer(program)
c.run_program()
assert c.p == [2, 0, 0, 0, 99]
def test_multiply_program():
program = [2, 3, 0, 3, 99]
c = IntComputer(program)
c.run_program()
assert c.p == [2, 3, 0, 6, 99]
def test_complex_1():
program = [2, 4, 4, 5, 99, 0]
c = IntComputer(program)
c.run_program()
assert c.p == [2, 4, 4, 5, 99, 9801]
def test_complex_2():
program = [1, 1, 1, 4, 99, 5, 6, 0, 99]
c = IntComputer(program)
c.run_program()
assert c.p == [30, 1, 1, 4, 2, 5, 6, 0, 99]
def test_complex_3():
program = [1101, 100, -1, 4, 0]
c = IntComputer(program)
c.run_program()
assert c.p == [1101, 100, -1, 4, 99]
def test_position_mode_input_equals_8():
program = [3, 9, 8, 9, 10, 9, 4, 9, 99, -1, 8]
c = IntComputer(program)
def output(a):
assert a == 1
def input():
return 8
c.output = output
c.input = input
c.run_program()
assert c.p == [3, 9, 8, 9, 10, 9, 4, 9, 99, 1, 8]
def test_position_mode_input_less_than_8():
program = [3, 9, 7, 9, 10, 9, 4, 9, 99, -1, 8]
c = IntComputer(program)
def output(a):
assert a == 0
def input():
return 8
c.output = output
c.input = input
c.run_program()
assert c.p == [3, 9, 7, 9, 10, 9, 4, 9, 99, 0, 8]
def test_immediate_mode_input_equals_8():
program = [3, 9, 8, 9, 10, 9, 4, 9, 99, -1, 8]
c = IntComputer(program)
def output(a):
assert a == 1
def input():
return 8
c.output = output
c.input = input
c.run_program()
assert c.p == [3, 9, 8, 9, 10, 9, 4, 9, 99, 1, 8]
def test_immediate_mode_input_less_than_8():
program = [3, 9, 7, 9, 10, 9, 4, 9, 99, -1, 8]
c = IntComputer(program)
def output(a):
assert a == 0
def input():
return 8
c.output = output
c.input = input
c.run_program()
assert c.p == [3, 9, 7, 9, 10, 9, 4, 9, 99, 0, 8]
def test_offset_1():
program = [109, 19, 99]
c = IntComputer(program)
c.offset = 2000
c.output = Mock()
c.run_program()
assert c.offset == 2019
def test_offset_2():
program = [109, 19, 204, -34, 99]
c = IntComputer(program)
c.offset = 2000
c.output = Mock()
c.run_program()
assert c.offset == 2019
assert c.output.call_count == 1
def test_quine():
program = [109, 1, 204, -1, 1001, 100, 1, 100, 1008, 100, 16, 101, 1006, 101, 0, 99]
c = IntComputer(program)
c.output = Mock()
c.run_program()
assert c.output.call_count == 16
call_values = [call[0][0] for call in c.output.call_args_list]
assert call_values == [
109,
1,
204,
-1,
1001,
100,
1,
100,
1008,
100,
16,
101,
1006,
101,
0,
99,
]
def test_16bit_number():
program = [1102, 34915192, 34915192, 7, 4, 7, 99, 0]
c = IntComputer(program)
def output(a):
assert a == 1219070632396864
c.output = output
c.run_program()
def test_print_large_number():
program = [104, 1125899906842624, 99]
c = IntComputer(program)
def output(a):
assert a == 1125899906842624
def input():
return 8
c.output = output
c.input = input
c.run_program()
| [
"[email protected]"
] | |
d0335d56677c85543a5999292c7d92942cb7f4ad | c0204acff3fd9d276ab315dd4134eb228bbadf2d | /116_version1.py | c1f8fa3a3245d3c8286a0952069cf20d5a7846d1 | [] | no_license | haruto0519/haruto_hirai | bee95366adde7702e276679d7d5832ddf2cce54f | 8668a7bee65d109f5a0afa6357f5a45337cb7bc4 | refs/heads/main | 2023-08-23T21:21:40.078199 | 2021-10-27T20:55:12 | 2021-10-27T20:55:12 | 421,956,481 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | # a116_buggy_image.py
import turtle as trtl
# instead of a descriptive name of the turtle such as painter,
# a less useful variable name x is used
spider = trtl.Turtle()
spider.pensize(40)
spider.circle(20)
n_legs = 6
lengh = 70
angle = 360 / n_legs
spider.pensize(5)
n = 0
while (n < n_legs ):
spider.goto(0,0)
spider.setheading(angle*n)
spider.forward(lengh)
n = n + 1
spider.hideturtle()
wn = trtl.Screen()
wn.mainloop() | [
"[email protected]"
] | |
8618008c2deaeda1af5852467c436db706c5400b | ff31d31493bc75de2bc1a206ebf13ca91d937f9b | /Master.py | 262c03f98f9ca9c3cf97d0d9d5e37e28159287a1 | [
"MIT"
] | permissive | Researchnix/HackThe6ix | c96bcc8d892747e94a483c23d09862b02d23f962 | 85057a0eede9ef4e30f3246221071e028946cbc0 | refs/heads/master | 2020-09-21T12:56:45.390518 | 2016-10-22T22:48:55 | 2016-10-22T22:48:55 | 66,153,079 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,983 | py | #
# Master.py
# HackThe6ix
#
# Created by Jeffrey, Maliha and Lennart on 2016-08-20.
# Copyright 2016 Researchnix. All rights reserved.
#
import sys
from random import *
import Map
import Car
import RoutePlanner
# the main traffic coordinator
class Master:
''' constants'''
maxRunTime = 10000
verbose = True
''' time stepping '''
m = Map.Map() # Map...
cars = [] # List of cars on the map
navi = RoutePlanner.RoutePlanner() # Navigation device
# Position from which you can't progress to the next one
blocked = []
# trafficLights is a dictionary containing a dictionary with keys Red and Green returning a list of Red and Green files
''' traffic lights'''
trafficLights = {}
# Give each intersection the state of either
# 1X always green
# 2X always green, maybe later randomly red due to pedestrians crossing
# 3X TODO
# 4X horizontal, vertical, hleft, vleft
# or RED, meaning everything is red
# all GREEN is an unusual state...
interState = {}
# Evaluation helpers
evaluation = {}
numberOfCars = 2
travelLength = {}
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##### #
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### INITIALIZATION ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##### #
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##### #
def __init__(self):
self.initialize()
def initialize(self):
print "Initializing the Master with a map from text files and one car"
# Load the intersections first
self.loadIntersections()
# Load the streets
self.loadStreets()
# Initialize the navi
self.navi.setMap(self.m)
# Initialize incomingStreets in the map
self.m.calcIncomingStreets()
# Initialize all traffic lights to red :D
self.initializeTrafficLights()
self.updateBlocked()
self.initializeInterState()
# Initialize so far only one car
# start, destination, name
#car1 = Car.Car(5, 30, "Porsche ")
#car2 = Car.Car(19, 18, "Volkswagen")
#car3 = Car.Car(27, 11, "A")
#car4 = Car.Car(11, 5, "B")
#car5 = Car.Car(30, 12, "C")
#car6 = Car.Car(8, 26, "D")
#car7 = Car.Car(11, 27, "E")
#self.cars.append(car1)
#self.cars.append(car2)
#self.cars.append(car3)
#self.cars.append(car4)
#self.cars.append(car5)
#self.cars.append(car6)
#self.cars.append(car7)
self.initializeRandomCars(20)
# Calculate all the routes of the cars
self.calculateRoutes()
# Prepare the evaluation of the efficiency
self.numberOfCars = len(self.cars)
for c in self.cars:
self.travelLength[c.name] = len(c.fineRoute)
# Initialize blocked positions
# A) by cars
for c in self.cars:
self.blocked.append(c.curPos)
print '... done!'
def initializeRandomCars(self, n):
everywhere = self.m.intersections.keys()
available = self.m.intersections.keys()
for x in range(n):
start = available[randint(0,len(available)-1)]
finish = everywhere[randint(0,len(everywhere)-1)]
name = "car" + str(x)
if not start == finish:
self.cars.append( Car.Car(start, finish, name) )
available.remove(start)
def initializeInterState(self):
# make everything TL that has 2 roards or less GREEN
for i in self.m.intersections:
if self.m.character[i] <= 2:
self.interState[i] = "GREEN"
else:
self.interState[i] = "RED"
self.updateInterState()
def initializeTrafficLights(self):
for i in self.m.intersections:
state = {}
state['Green'] = []
state['Red'] = self.m.incoming[i]
self.trafficLights[i] = state
def loadIntersections(self):
f = open('inter.txt', 'r')
for line in f:
line = line.split()
self.m.addIntersection(int(line[0]), float(line[1]), float(line[2]))
f.close()
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##### #
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### MAKING DATA AVAILABLE ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##### #
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##### #
#def getInitializionData(self):
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##### #
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### LOADING DATA ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##### #
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##### #
def loadStreets(self):
# First add streets in right orientation
# Format of ori.txt is
# to from1 from2 from3 ...
f = open('ori.txt', 'r')
for line in f:
line = line.split()
to = int(line.pop(0))
for remaining in line:
self.m.addStreet(int(remaining), to, 0)
f.close()
# Now load the lengths from dist.txt
#f = open('dist.txt', 'r')
#for line in f:
# line = line.split()
#self.m.setDist(int(line[0]), int(line[1]), int(line[2]))
#f.close()
''' Trick: every dist = 10 '''
for x in self.m.streets:
x[-1] = 10
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##### #
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### OUTPUT METHODS ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##### #
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##### #
def printCars(self):
for c in self.cars:
print c.name + " on " + str(c.curPos) + " with route " + str(c.fineRoute[:5])
def printState(self):
print '\nThe current state of the program is'
print '\n### intersections: '
print self.m.intersections
print '### streets: '
print self.m.streets
print '### incoming: '
print self.m.incoming
print '### character: '
print self.m.character
print '### traffic lights: '
print self.trafficLights
print '### Intersection states: '
print self.interState
print '### cars: '
self.printCars()
print '\n'
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##### #
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ROUTE CALCULATION ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##### #
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##### #
def calculateRoutes(self):
# Give every car a coarse route
for c in self.cars:
c.coarseRoute = self.navi.calcCoarseRoute(c.start, c.destination)
#for c in self.cars:
# print c.name + ' coarse ' + str(c.coarseRoute)
# Give every car a fine route based on its coarse route
# move this function to Routeplanner later
for c in self.cars:
c.fineRoute = self.navi.calcFineRoute(c.coarseRoute)
#for c in self.cars:
# print c.name + ' fine ' + str(c.fineRoute)
# Update general car information
# That is, find initial direction of travel from coarse route,
# update nextIntersection and calculate therewith the street
# and curPos the car should be on.
for c in self.cars:
c.nextIntersection = c.coarseRoute[1]
street = self.m.findStreet(c.start, c.nextIntersection)
c.curPos = (street, 0)
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##### #
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### STEPPING FUNCTIONS ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##### #
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##### #
# Do as many as maxRunTime steps to try to get every car to its destination
def run(self, total):
#print "\n\n"
#print "######################################################################"
#print "TIME STEPPING STARTS HERE"
#print "######################################################################"
#print "\n\n"
#########################
# Insert the model here #
#########################
#self.useModel1(8, total)
self.useModel2(11, 11, total)
if len(self.cars) == 0:
sys.exit("All cars reached their destination")
self.timeStep()
if self.verbose:
self.printCars()
print self.interState
# Check if any car reached its destination
for c in self.cars[::-1]:
if c.destinationReached:
self.evaluation[c.name] = total
self.blocked.remove(c.curPos)
self.cars.remove(c)
#print "\n\n"
#print "######################################################################"
#print "All cars are done ... or the time is up "
#print "Evaluation of the time it took each car " + str(self.evaluation)
#print "Total time a car was using fuel = " + str(sum(self.evaluation.values()))
#print "Relative inefficiency = " + str(sum(self.evaluation.values()) / self.numberOfCars)
#print "\n\n"
#diff = [self.evaluation.values()[c] -self.travelLength.values()[c] for c in range(len(self.evaluation))]
#print "Time a car was standing still =" + str(diff)
#print "Overall time a car was waiting for a traffic light = " + str(sum(diff))
def canProgress(self, car):
return not (car.nextPos() in self.blocked)
# One time step in which every car that can potentially progress progresses.
def timeStep(self):
for c in self.cars:
if not c.destinationReached:
if self.canProgress(c):
# We have a ghosting problem, due to time, ignore the error...
# This error appears exactly when the traffic lights free an
# intersection that is still occupied by a car. Hence when the car
# moves on, it can't free its last position that's already free
if c.curPos in self.blocked:
# clear the spot that the car was on
self.blocked.remove(c.curPos)
c.oldPos = c.curPos
c.curPos = c.fineRoute.pop(0) # move car forward by one unit
self.blocked.append(c.curPos) # call dips on the current position
c.needsUpdate = True
if len(c.fineRoute) == 0:
c.destinationReached = True
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##### #
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### TRAFFIC LIGHT FUNCTIONs### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##### #
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##### #
def updateBlocked(self):
for i in self.m.intersections.keys():
# We need to remove all blocks from this intersections that are in Green
# and add all the ones that are in Red
for g in self.trafficLights[i]['Green']:
street = self.m.lastPos(g)
if street in self.blocked:
# Remove (g, last elem of g)
self.blocked.remove(street)
for r in self.trafficLights[i]['Red']:
street = self.m.lastPos(r)
if street not in self.blocked:
# Add (r, last elem of r)
self.blocked.append(street)
# To change the sign of a traffic light use this format
# (inter = intersection_index, street = street_index of incoming street)
def turnGreen(self, inter, street):
self.trafficLights[inter]['Red'].remove(street)
self.trafficLights[inter]['Green'].append(street)
self.updateBlocked()
def turnRed(self, inter, street):
self.trafficLights[inter]['Green'].remove(street)
self.trafficLights[inter]['Red'].append(street)
self.updateBlocked()
def updateInterState(self):
for i in self.interState.keys():
if self.interState[i] == 'RED':
self.trafficLights[i]['Green'] = []
self.trafficLights[i]['Red'] = self.m.incoming[i]
if self.interState[i] == 'GREEN':
self.trafficLights[i]['Red'] = []
self.trafficLights[i]['Green'] = self.m.incoming[i]
if self.interState[i] == 'horizontal': # len(incoming[i]) = 4 required
reds = []
greens = []
nghs = self.m.incoming[i]
reds.append(nghs[0])
reds.append(nghs[2])
greens.append(nghs[1])
greens.append(nghs[3])
self.trafficLights[i]['Red'] = reds
self.trafficLights[i]['Green'] = greens
if self.interState[i] == 'vertical': # len(incoming[i]) = 4 required
reds = []
greens = []
nghs = self.m.incoming[i]
reds.append(nghs[1])
reds.append(nghs[3])
greens.append(nghs[0])
greens.append(nghs[2])
self.trafficLights[i]['Red'] = reds
self.trafficLights[i]['Green'] = greens
if self.interState[i] == 'hleft': # len(incoming[i]) = 4 required
pass
if self.interState[i] == 'vleft': # len(incoming[i]) = 4 required
pass
self.updateBlocked()
def changeInterState(self, inter, mode):
self.interState[inter] = mode
self.updateInterState()
#Flips state from horizontal to vertical and vice versa
def flipInterState(self, inter):
if self.interState[inter] == 'horizontal':
self.interState[inter] = 'vertical'
else: # default for RED or GREEN
self.interState[inter] = 'horizontal'
self.updateInterState()
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##### #
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### TRAFFIC MODELS ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##### #
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##### #
# Model 1 sets every intersection for some interval par to horizontal and then reversed
def useModel1(self, par, curTime):
# One line solution, haha !!
signal = [int(float(i)/par) % 2 for i in range(self.maxRunTime)]
if signal[curTime] == 0:
for i in self.m.fourFoldInter:
self.changeInterState(i, 'horizontal')
else:
for i in self.m.fourFoldInter:
self.changeInterState(i, 'vertical')
# Random Model flips every par1 interval par2 of the fourFoldInter
def useModel2(self, par1, par2, curTime):
signal = [0 for y in range(self.maxRunTime)]
for i in range(self.maxRunTime):
if i%par1 == 0:
signal[i] = 1
if signal[curTime] == 1:
for x in range(par2):
i = randint(0,len(self.m.fourFoldInter)-1)
self.flipInterState(self.m.fourFoldInter[i])
| [
"[email protected]"
] | |
dcc9d8b7a1b36caead18840a6d9efa1039e3f5ce | 09e8a0935bb6c96bb42616b6931d2ea37b4288a7 | /MovuinoUSB_2015/MovementRecognition_LibSVM_Python/PLAY_AppFitNS_01.py | e74f515d1a47962f322b6f88538c97e1dce7e7e1 | [] | no_license | hssnadr/Movuino | 817a36058b93c8a4f13e142a0faac4a69991c5fd | aa4b9fb9435aadd0d45661bbb0663061f0047287 | refs/heads/master | 2020-06-09T09:09:47.424345 | 2017-12-15T13:05:09 | 2017-12-15T13:05:09 | 76,047,127 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,725 | py | import math
import time
import numpy as np
from svmutil import *
#Not necessary
import serial #to get our sensor data
import pygame #to close session by closing pygame window
#Local script
import getFeatures_Detection
#Initialize graphic window
pygame.display.init()
fig = pygame.display.set_mode((640, 480)) #Open main window
#Initialize serial communication with the sensor
# ser = serial.Serial('COM7',38400, timeout=1) # for Windows
ser = serial.Serial('/dev/cu.usbmodem1421',38400, timeout=1) # for Mac
#Check sensor configuration
ser.write("i")
dataSensor = ser.readline().split()
print dataSensor
##
# INITIALIZE CAPTURE
# Get logistic regression model parameters FOR DETECTION
mDetection = svm_load_model('WM_SVMmodel_Detection.model') # based on libsvm library
npzfile = np.load('SVMVariable_Detection/SVMparameters.npz')
rangeInput_Detection = npzfile['rangeInput']
meanInput_Detection = npzfile['meanInput']
Ureduce_Detection = npzfile['Ureduce']
# Real time parameters
delay = 0.01 # delay between each row from the sensor
sizeDataWindow_Detection = 40 # number of data store at each moment for movement detection
timeStart = time.time() #time from 1st of January 1970 (second)
# Moving average Data matrix parameters
N = 5; # range for moving average
dataCollect = np.empty((N,7)) # matrix containing sensors datas to compute moving average
dataCollect[:] = np.NaN
dataCollect_MMean = np.empty((sizeDataWindow_Detection,7)) # matrix containing sensors moving average datas on the observation window
dataCollect_MMean[:] = np.NaN
cRegulator = 0 #Regulator counter
cRegulatorMax = 10 #Regulate number of time data get analysed (cRegulator go back to 0 if cRegulator > cRegulatorMax)
##----------------------------------------------------------------------------
# START PLAY MODE
# Send command to sensor (l = Live mode)
ser.write("l")
time.sleep(0.1)
print("start!")
isFig = True #check if main window is still open
while(isFig):
# time.sleep(delay) # allow time delay between measures
# Scan serial port
data = ser.readline().split()
curTime = time.time()-timeStart # get current time
# Data format verification before manipulation
if (len(data)==7 and (data[0]=="l" or data[0]=="r")):
# Extract values from serial data
accX = float(data[1])
accY = float(data[2])
accZ = float(data[3])
gyrX = float(data[4])
gyrY = float(data[5])
gyrZ = float(data[6])
# Store into data window matrix
dataCollect = np.concatenate((dataCollect[1:,:] , [[accX, accY, accZ, gyrX, gyrY, gyrZ, curTime]]),axis=0);
# Moving average (store into dataCollect_MMean)
if (np.where(np.isnan(dataCollect))[0].size == 0) : # mean average if enough data
meanDat_ = (1/float(N))*np.sum(dataCollect[:,:6],axis=0) # mean average on data (not on time)
meanDat_ = np.concatenate((meanDat_ , [dataCollect[-1,6]]),axis=1) # add time
dataCollect_MMean = np.concatenate((dataCollect_MMean[1:,:], [meanDat_]),axis=0) # matrix containing filtered sensors datas on the observation window
cRegulator += 1
# print cRegulator, curTime
# Analyze data only if dataCollect_Mean is full of data
if (np.where(np.isnan(dataCollect_MMean))[0].size == 0 and cRegulator >= cRegulatorMax) : # if there's no more NaN (dataCollect is full) the window is evaluated
# Movement DETECTION (on dataCollect_MMean)
cRegulator = 0
# Get features
X = getFeatures_Detection.getFeatures_Detection(dataCollect_MMean)
# Features normalization based on parameters of the model
Xnorm = np.empty(X.shape)
Xnorm[:] = np.NAN
for i in range(0,len(X)):
if (rangeInput_Detection[i] != 0) : # avoid infinite case (when rangeInput=0)
Xnorm[i] = (X[i] - meanInput_Detection[i])/rangeInput_Detection[i]
else:
Xnorm[i]=0
#Features reduction
Z = np.dot(Ureduce_Detection.T,Xnorm)
Z = np.concatenate(([1],Z),axis=0).tolist() #add bias units
dicZ = [{x: Z[x] for x in range(0,len(Z))}]
# Get output estimation
p_label, p_acc, p_val = svm_predict([1], dicZ, mDetection,'-q')
p_label = p_label[0]
print p_label, curTime
# Check output estimation
if p_label>0.5 :
print 'DETECTION : ', p_val
# Resize dataCollect_MMean on Detection window
dataCollect_MMean = np.empty((sizeDataWindow_Detection,7))
dataCollect_MMean[:] = np.NaN
ser.flushInput() #empty serial
# Manage serial issues
elif not data :
print "serial is empty"
elif data[0]=="L": # manage specific case (cf. sensor)
print "sensor sending bytes: session terminated"
break
elif data[0]=="M": # manage specific case (cf. sensor)
print "M error"
# Close window to terminate session
for event in pygame.event.get(): #check for event
if(event.type==pygame.QUIT or (event.type==pygame.KEYDOWN and event.key==pygame.K_ESCAPE)): #press escape to leave the while loop
isFig=False
pygame.quit()
##----------------------------------------------------------------------------
#close serial communication with sensor
print("stop!")
ser.write("q") # stop sending data from movuino
ser.close()
print "\nYOU MUST DEFEAT SHENG LONG TO STAND A CHANCE" | [
"[email protected]"
] | |
b947e26aedcbe4d02c260f1b1f68502685c232fb | a2f753285b0c270168e260d72a08cf0fcfc77b22 | /firstApp/app_V1.py | 50bdc011ecf1fd511ce0aeb71e1a5df39ff6406b | [] | no_license | IzumiHoshi/My-Python-Code | cc7df2d1ac5e4f5481f7cf9647b95f648a722aa3 | 73ba606f1c9be8f9651971da9be4c83903f6f903 | refs/heads/master | 2021-01-16T23:03:32.111385 | 2020-06-22T06:29:40 | 2020-06-22T06:29:40 | 71,847,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | import logging; logging.basicConfig(level=logging.INFO)
import asyncio, os, json, time
from datetime import datetime
from aiohttp import web
def index(request):
return web.Response(body=b'<h1>Awesome</h1>', content_type='text/html', charset='utf-8')
@asyncio.coroutine
def init(loop):
app = web.Application(loop=loop)
app.router.add_route('GET', '/', index)
srv = yield from loop.create_server(app.make_handler(), '127.0.0.1', 9000)
logging.info('server started at http://127.0.0.1:9000...')
return srv
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop))
loop.run_forever() | [
"[email protected]"
] | |
abd6aba250f267573bacd3daf43a21b2c1952ce2 | 22fe6ed51715486ebbc09e404504ed4d7a28c37d | /python-katas/PN2RangeSum.py | efe3460738df4c950e68422a3ee4ffe022739815 | [] | no_license | Jethet/Practice-more | 1dd3ff19dcb3342a543ea1553a1a6fb0264b9c38 | 8488a679730e3406329ef30b4f438d41dd3167d6 | refs/heads/master | 2023-01-28T14:51:39.283741 | 2023-01-06T10:14:41 | 2023-01-06T10:14:41 | 160,946,017 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | # Given a range of numbers, iterate from 0-th to end number and
# print the sum of the current number and previous numbers
def range_sum(nums):
sum_old = 0
for i in range(nums):
sum = sum_old + i
print(sum)
sum_old = i
print(range_sum(10))
| [
"[email protected]"
] | |
b66326ad63b412657ddd4448b5320a22614e8070 | 143d912d73d8f75a0a235b027c5212475ac54981 | /detect_model/yolo_net.py | 7aaabb389168f214e6b576b3cc9a610ed154b1dd | [] | no_license | SolarSword/Camera-Object-Detection | fc3d60f7df89ef582907d8dc5f4dd578eca05001 | ca9aeaecab6516ffbd81c33b48319f988e6ed617 | refs/heads/master | 2020-05-15T15:17:05.640295 | 2019-04-20T06:30:31 | 2019-04-20T06:30:31 | 182,368,691 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,031 | py | import copy
import numpy as np
import tensorflow as tf
import cv2
#import config as cfg
from . import config as cfg
class YOLONet():
def __init__(self):
# initialize all parameters
self.weights_file = cfg.WEIGHTS_FILE
self.classes = cfg.CLASSES
self.num_classes = len(self.classes)
self.image_size = cfg.IMAGE_SIZE
self.cell = cfg.CELL
self.boxes_per_cell = cfg.BOXES_PER_CELL
self.alpha = cfg.ALPHA
self.threshold = cfg.THRESHOLD
self.iou_threshold = cfg.IOU_THRESHOLD
# final_objects is a list to store the detected objects in an image
# self.final_objects = []
self.build_network()
#self.detect_from_image(image)
def set_image(self,image):
self.image = image
# final_objects is a list to store the detected objects in an image
# if we reset a new image, we need to reset this list, too
self.final_objects = []
self.final_classes_names = []
# network part
def build_network(self):
with tf.Graph().as_default() as yolo_net_graph:
self.x = tf.placeholder('float32',[None,self.image_size,self.image_size,3])
self.conv_1 = self.conv_layer(1,self.x,64,7,2)
self.pool_2 = self.pooling_layer(2,self.conv_1,2,2)
self.conv_3 = self.conv_layer(3,self.pool_2,192,3,1)
self.pool_4 = self.pooling_layer(4,self.conv_3,2,2)
self.conv_5 = self.conv_layer(5,self.pool_4,128,1,1)
self.conv_6 = self.conv_layer(6,self.conv_5,256,3,1)
self.conv_7 = self.conv_layer(7,self.conv_6,256,1,1)
self.conv_8 = self.conv_layer(8,self.conv_7,512,3,1)
self.pool_9 = self.pooling_layer(9,self.conv_8,2,2)
self.conv_10 = self.conv_layer(10,self.pool_9,256,1,1)
self.conv_11 = self.conv_layer(11,self.conv_10,512,3,1)
self.conv_12 = self.conv_layer(12,self.conv_11,256,1,1)
self.conv_13 = self.conv_layer(13,self.conv_12,512,3,1)
self.conv_14 = self.conv_layer(14,self.conv_13,256,1,1)
self.conv_15 = self.conv_layer(15,self.conv_14,512,3,1)
self.conv_16 = self.conv_layer(16,self.conv_15,256,1,1)
self.conv_17 = self.conv_layer(17,self.conv_16,512,3,1)
self.conv_18 = self.conv_layer(18,self.conv_17,512,1,1)
self.conv_19 = self.conv_layer(19,self.conv_18,1024,3,1)
self.pool_20 = self.pooling_layer(20,self.conv_19,2,2)
self.conv_21 = self.conv_layer(21,self.pool_20,512,1,1)
self.conv_22 = self.conv_layer(22,self.conv_21,1024,3,1)
self.conv_23 = self.conv_layer(23,self.conv_22,512,1,1)
self.conv_24 = self.conv_layer(24,self.conv_23,1024,3,1)
self.conv_25 = self.conv_layer(25,self.conv_24,1024,3,1)
self.conv_26 = self.conv_layer(26,self.conv_25,1024,3,2)
self.conv_27 = self.conv_layer(27,self.conv_26,1024,3,1)
self.conv_28 = self.conv_layer(28,self.conv_27,1024,3,1)
self.fc_29 = self.fc_layer(29,self.conv_28,512,flat=True,linear=False)
self.fc_30 = self.fc_layer(30,self.fc_29,4096,flat=False,linear=False)
#skip dropout_31
self.fc_32 = self.fc_layer(32,self.fc_30,1470,flat=False,linear=True)
self.sess = tf.Session(graph = yolo_net_graph)
#tf.initialize_all_variables()
#THIS FUNCTION IS DEPRECATED. It will be removed after 2017-03-02. Instructions for updating: Use tf.global_variables_initializer instead.
self.sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver()
self.saver.restore(self.sess,self.weights_file)
def conv_layer(self,idx,inputs,filters,size,stride):
channels = inputs.get_shape()[3]
weight = tf.Variable(tf.truncated_normal([size,size,int(channels),filters], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[filters]))
pad_size = size//2
pad_mat = np.array([[0,0],[pad_size,pad_size],[pad_size,pad_size],[0,0]])
inputs_pad = tf.pad(inputs,pad_mat)
conv = tf.nn.conv2d(inputs_pad, weight, strides=[1, stride, stride, 1], padding='VALID',name=str(idx)+'_conv')
conv_biased = tf.add(conv,biases,name=str(idx)+'_conv_biased')
return tf.maximum(self.alpha*conv_biased,conv_biased,name=str(idx)+'_leaky_relu')#activation
def pooling_layer(self,idx,inputs,size,stride):
return tf.nn.max_pool(inputs, ksize=[1, size, size, 1],strides=[1, stride, stride, 1], padding='SAME',name=str(idx)+'_pool')
def fc_layer(self,idx,inputs,hiddens,flat = False,linear = False):
input_shape = inputs.get_shape().as_list()
if flat:
dim = input_shape[1]*input_shape[2]*input_shape[3]
inputs_transposed = tf.transpose(inputs,(0,3,1,2))
inputs_processed = tf.reshape(inputs_transposed, [-1,dim])
else:
dim = input_shape[1]
inputs_processed = inputs
weight = tf.Variable(tf.truncated_normal([dim,hiddens], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[hiddens]))
if linear : return tf.add(tf.matmul(inputs_processed,weight),biases,name=str(idx)+'_fc')
ip = tf.add(tf.matmul(inputs_processed,weight),biases)
return tf.maximum(self.alpha*ip,ip,name=str(idx)+'_fc')
# detection part
def detect_from_cvmat(self,img):
self.h_img,self.w_img,_ = img.shape
img_resized = cv2.resize(img, (self.image_size, self.image_size))
img_RGB = cv2.cvtColor(img_resized,cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray( img_RGB )
inputs = np.zeros((1,448,448,3),dtype='float32')
inputs[0] = (img_resized_np/255.0)*2.0-1.0
in_dict = {self.x: inputs}
net_output = self.sess.run(self.fc_32,feed_dict=in_dict)
self.result = self.interpret_output(net_output[0])
self.generate_bounding_box(img)
def detect_from_image(self):
img = self.image
self.detect_from_cvmat(img)
def interpret_output(self,output):
probs = np.zeros((7,7,2,20))
class_probs = np.reshape(output[0:980],(7,7,20))
scales = np.reshape(output[980:1078],(7,7,2))
boxes = np.reshape(output[1078:],(7,7,2,4))
offset = np.transpose(np.reshape(np.array([np.arange(7)]*14),(2,7,7)),(1,2,0))
boxes[:,:,:,0] += offset
boxes[:,:,:,1] += np.transpose(offset,(1,0,2))
boxes[:,:,:,0:2] = boxes[:,:,:,0:2] / 7.0
boxes[:,:,:,2] = np.multiply(boxes[:,:,:,2],boxes[:,:,:,2])
boxes[:,:,:,3] = np.multiply(boxes[:,:,:,3],boxes[:,:,:,3])
boxes[:,:,:,0] *= self.w_img
boxes[:,:,:,1] *= self.h_img
boxes[:,:,:,2] *= self.w_img
boxes[:,:,:,3] *= self.h_img
for i in range(2):
for j in range(20):
probs[:,:,i,j] = np.multiply(class_probs[:,:,j],scales[:,:,i])
filter_mat_probs = np.array(probs>=self.threshold,dtype='bool')
filter_mat_boxes = np.nonzero(filter_mat_probs)
boxes_filtered = boxes[filter_mat_boxes[0],filter_mat_boxes[1],filter_mat_boxes[2]]
probs_filtered = probs[filter_mat_probs]
classes_num_filtered = np.argmax(filter_mat_probs,axis=3)[filter_mat_boxes[0],filter_mat_boxes[1],filter_mat_boxes[2]]
argsort = np.array(np.argsort(probs_filtered))[::-1]
boxes_filtered = boxes_filtered[argsort]
probs_filtered = probs_filtered[argsort]
classes_num_filtered = classes_num_filtered[argsort]
for i in range(len(boxes_filtered)):
if probs_filtered[i] == 0 : continue
for j in range(i+1,len(boxes_filtered)):
if self.iou(boxes_filtered[i],boxes_filtered[j]) > self.iou_threshold :
probs_filtered[j] = 0.0
filter_iou = np.array(probs_filtered>0.0,dtype='bool')
boxes_filtered = boxes_filtered[filter_iou]
probs_filtered = probs_filtered[filter_iou]
classes_num_filtered = classes_num_filtered[filter_iou]
result = []
for i in range(len(boxes_filtered)):
result.append([self.classes[classes_num_filtered[i]],boxes_filtered[i][0],boxes_filtered[i][1],boxes_filtered[i][2],boxes_filtered[i][3],probs_filtered[i]])
return result
def iou(self,box1,box2):
tb = min(box1[0]+0.5*box1[2],box2[0]+0.5*box2[2])-max(box1[0]-0.5*box1[2],box2[0]-0.5*box2[2])
lr = min(box1[1]+0.5*box1[3],box2[1]+0.5*box2[3])-max(box1[1]-0.5*box1[3],box2[1]-0.5*box2[3])
if tb < 0 or lr < 0 : intersection = 0
else : intersection = tb*lr
return intersection / (box1[2]*box1[3] + box2[2]*box2[3] - intersection)
def generate_bounding_box(self,image):
self.final_with_bounding_box = copy.deepcopy(image)
# the number of objects detected
self.num_objects = len(self.result)
for i in range(self.num_objects):
left_up_x = int(self.result[i][1] - self.result[i][3]/2)
left_up_y = int(self.result[i][2] - self.result[i][4]/2)
right_down_x = int(self.result[i][1] + self.result[i][3]/2)
right_down_y = int(self.result[i][2] + self.result[i][4]/2)
self.final_objects.append(image[left_up_y:right_down_y,left_up_x:right_down_x])
self.final_classes_names.append(self.result[i][0])
cv2.rectangle(self.final_with_bounding_box,(left_up_x,left_up_y),(right_down_x,right_down_y),(0,0,255),2)
# to add some note above the objects, the second parameter is the text
# adding the idx 'i' is to see whether the order of objects is relatively unchanged
# but still hard to say
cv2.putText(self.final_with_bounding_box,self.result[i][0] + " " + str(i), (left_up_x,left_up_y),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,0,255),1)
| [
"[email protected]"
] | |
fd5a18ddc398096b45e83773bf591eedc129469d | 5269e071cca7d3ed6c12bf3d4672ed80c99208b3 | /testa.py | 604f1f6347f6f8531145c723da5ec7a44a4a3f13 | [] | no_license | VictorJaque/SukobanPython | 7856071d059e65b27fd59b5b059dc2e63a0ca135 | 98f39a72165b106e294f2b3b6b55d1eda8e74635 | refs/heads/main | 2023-03-25T18:12:53.276222 | 2021-03-12T12:30:00 | 2021-03-12T12:30:00 | 347,057,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | def create_level_1(level):
x = 0
y = 0
teck = []
with open("first_level.sokoban") as f:
for rad in f:
for tecken in rad:
x = x + 1
if tecken == '\n':
y = y + 1
x = 0
if tecken != " ":
teck.append({'symb': tecken, 'posy': y, 'posx': x})
return(teck)
def display_level(teck):
rad1 = [ ]
rad2 = []
displaylevel = rad1 + rad2
i = 0
for l in teck:
while i < teck[i]['posx']:
rad1.append(" ")
i = i + 1
if teck[i]['posy'] == 0:
#rad1.append((" "*teck[0]['posx']))
rad1.append([teck[i]['symb'], teck[i]['posx']])
else:
rad2.append({'symb', 'posx'})
i = i + 1
return(rad1)
return(rad2)
return(displaylevel)
level = 1
teck = create_level_1(level)
teck = display_level(teck)
print(teck)
| [
"[email protected]"
] | |
94986732f3b504318a4e2610f8c4c6c1ce8cc693 | 087d0c6d58dca56148033662a0f6841482615b2a | /crawler/spiders/focusedscrape/disprot.py | d299d8ac5591ad235f95d5e343c09639337ab54d | [
"Apache-2.0"
] | permissive | biothings/biothings.crawler | 52c1aa7ab13b9cebcf6b4366ec8f96c54101656e | 5d5c7d089b1e2e4344dfff34c077cd70ff46e5cc | refs/heads/master | 2022-02-13T14:10:10.354966 | 2021-08-31T23:07:36 | 2021-08-31T23:07:36 | 209,951,853 | 0 | 5 | Apache-2.0 | 2022-02-03T17:36:06 | 2019-09-21T08:39:50 | Python | UTF-8 | Python | false | false | 1,377 | py | import requests
from scrapy.spiders import Spider
from scrapy_selenium.http import SeleniumRequest
from selenium.webdriver.support.expected_conditions import presence_of_element_located
from selenium.webdriver.common.by import By
from ..helper import JsonLdMixin
class DisProtSpider(Spider, JsonLdMixin):
name = 'disprot'
# DisProt has robots.txt and sitemap.xml
# but they also have a publicly documented API
# FIXME: use API and the sitemap, but sitemap is preferred when only one can be
# implemented. currently because SitemapSpider has to yield Request, we are
# forced to use the API only and rely on the page url pattern not changing
# on the other hand, they use client side rendering, which is very annoying
custom_settings = {
'DOWNLOADER_MIDDLEWARES': {
'scrapy_selenium.SeleniumMiddleware': 800,
}
}
def start_requests(self):
ids = requests.get('https://disprot.org/api/list_ids').json()['disprot_ids']
for disprot_id in ids:
yield SeleniumRequest(
url=f'http://disprot.org/{disprot_id}',
callback=self.extract_jsonld,
wait_time=10,
wait_until=presence_of_element_located(
(By.XPATH,
'//script[@type="application/ld+json"]')
)
)
| [
"[email protected]"
] | |
d4e8e220a67ba05abe839b534b6f318d04f9c75a | 08a1e46bab043055d56ae0e8914a3045562b2ac4 | /Edmodo/searchEngine/scraping.py | 4e0cb2b86e3f46b1f5be3df1910d58775c54c4b1 | [
"BSD-3-Clause"
] | permissive | calchulus/Product-Search-Engine | 67eea389076dd2199f7d3842b9c87cd6b43e9c55 | ebf9c7bbf4e7083e25698a563f35a3d5781f794e | refs/heads/master | 2021-01-10T00:46:53.560734 | 2016-04-06T18:25:10 | 2016-04-06T18:25:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,745 | py | import json
import urllib2
import MySQLdb
from searchEngine.models import product_info
count=0
def getJSONUtil(url):
"""
This method will pull data from Edmodo api and using models push data into
'searchEngine_product_info' table.
"""
global count
data = json.load(urllib2.urlopen(url))
products=data["products"]
## get _source,_index,_id from products
for i in range(len(products)):
_index=products[i]["_index"]
_id=products[i]["_id"]
fields=products[i]["fields"]
print("\n Id : {0}").format(_id)
print("\n Index : {0}").format(_index)
### get nested data in _source
long_desc_html=fields["long_desc_html"][0]
seller_thumb_url=fields["seller_thumb_url"][0]
resource_types=fields["resource_types"][0]
content_type=fields["content_type"][0]
long_desc=fields["long_desc"][0]
title=fields["title"][0]
greads_review_url=fields["greads_review_url"][0]
url=fields["url"][0]
edm_score=fields["edm_score"][0]
avg_rating=fields["avg_rating"][0]
creation_date=fields["creation_date"][0]
#print("\n long_desc_html : {0} \n seller_thumb_url : {1} \n resource_types : {2}").format(long_desc_html[0],seller_thumb_url[0],resource_types[0])
#print("\n content_type : {0} \n long_desc : {1} \n title : {2} \n greads_review_url : {3}").format(content_type[0],long_desc[0],title[0],greads_review_url[0])
#print("\n url : {0} \n edm_score : {1} \n avg_rating : {2} \n creation_date : {3}").format(url[0],edm_score[0],avg_rating[0],creation_date[0])
try:
p1=product_info(index=_index,product_id=_id,long_desc_html=long_desc_html,seller_thumb_url=seller_thumb_url,resource_types=resource_types,\
content_type=content_type,long_desc=long_desc,title=title,url=url,edm_score=edm_score,avg_rating=avg_rating,creation_date=creation_date)
p1.save()
except Exception,e:
print("\n ERROR : Insert unsuccessful... ")
print str(e)
print("\n Insert Success ...")
count+=1
def getJSONdata():
"""
This will build url using search term and call getJSONUtil
"""
base_url="https://spotlight.edmodo.com/api/search/?q="
query_term_list=["math","mitosis", "fractions","history","arts","holidays","cooking","dance","drama","music","graphical arts","programming","web design","game design","phonics",
"reading","poetry","writting","chinese","german","english","latin","french","arabic","spanish","italian","calculus","decimals","statistics","geometry","religion","coaching","arithmetic"]
for query_term in query_term_list:
try:
url=base_url+query_term
getJSONUtil(url)
except Exception,e:
print("ERROR at : {0}").format(query_term)
print str(e)
print("\n **** Total products *** : {0}").format(count)
# if __name__=="__main__":
# getJSONdata()
# #MySQLConn() | [
"[email protected]"
] |
Subsets and Splits